From 2a463016a9fcd086247f62f504d91bd87800af56 Mon Sep 17 00:00:00 2001 From: Wenkai Yin Date: Sun, 28 Apr 2019 12:00:27 +0800 Subject: [PATCH] Upgrade the distribution and notary library (#7516) * Return 404 when the log of task doesn't exist Return 404 when the log of task doesn't exist Signed-off-by: Wenkai Yin * Upgrade the distribution and notary library Upgrade the distribution library to 2.7.1, the notary library to 0.6.1 Signed-off-by: Wenkai Yin --- src/Gopkg.lock | 97 +++-- src/Gopkg.toml | 8 +- src/common/utils/notary/helper.go | 12 +- src/common/utils/registry/auth/path.go | 5 +- src/common/utils/registry/auth/path_test.go | 4 +- .../utils/registry/auth/tokenauthorizer.go | 4 +- src/core/api/replication_execution.go | 7 + src/jobservice/job/impl/scan/clair_job.go | 2 +- src/replication/operation/controller.go | 15 - src/replication/operation/controller_test.go | 25 -- .../github.com/Sirupsen/logrus/.travis.yml | 15 - .../github.com/Sirupsen/logrus/CHANGELOG.md | 123 ------ .../github.com/Sirupsen/logrus/entry.go | 288 ------------- .../github.com/Sirupsen/logrus/formatter.go | 45 -- .../Sirupsen/logrus/json_formatter.go | 79 ---- .../Sirupsen/logrus/terminal_bsd.go | 10 - .../Sirupsen/logrus/terminal_linux.go | 14 - .../Sirupsen/logrus/text_formatter.go | 178 -------- .../github.com/docker/distribution/.gitignore | 1 + .../docker/distribution/.gometalinter.json | 16 + .../github.com/docker/distribution/.mailmap | 25 +- .../docker/distribution/.travis.yml | 51 +++ .../github.com/docker/distribution/AUTHORS | 182 -------- .../docker/distribution/BUILDING.md | 12 +- .../docker/distribution/CHANGELOG.md | 114 ----- .../docker/distribution/CONTRIBUTING.md | 14 +- .../github.com/docker/distribution/Dockerfile | 17 +- .../docker/distribution/MAINTAINERS | 221 +++++++++- .../github.com/docker/distribution/Makefile | 153 ++++--- .../github.com/docker/distribution/README.md | 3 +- .../docker/distribution/RELEASE-CHECKLIST.md | 36 -- .../github.com/docker/distribution/blobs.go | 16 +- .../github.com/docker/distribution/circle.yml | 93 ---- .../docker/distribution/context/context.go | 18 +- .../docker/distribution/context/doc.go | 7 +- .../docker/distribution/context/http.go | 55 +-- .../docker/distribution/context/logger.go | 23 +- .../docker/distribution/context/trace.go | 5 +- .../docker/distribution/context/util.go | 5 +- .../docker/distribution/context/version.go | 16 +- .../docker/distribution/coverpkg.sh | 7 - .../docker/distribution/digest/digest.go | 139 ------ .../docker/distribution/digest/digester.go | 155 ------- .../docker/distribution/digest/doc.go | 42 -- .../docker/distribution/digest/verifiers.go | 44 -- .../distribution/{digest => digestset}/set.go | 30 +- .../github.com/docker/distribution/errors.go | 8 +- .../docker/distribution/health/doc.go | 8 +- .../docker/distribution/health/health.go | 4 +- .../manifest/manifestlist/manifestlist.go | 216 ++++++++++ .../manifest/schema1/config_builder.go | 13 +- .../distribution/manifest/schema1/manifest.go | 4 +- .../manifest/schema1/reference_builder.go | 6 +- .../distribution/manifest/schema1/verify.go | 2 +- .../distribution/manifest/schema2/builder.go | 35 +- .../distribution/manifest/schema2/manifest.go | 20 +- .../docker/distribution/manifests.go | 24 +- .../docker/distribution/reference/helpers.go | 42 ++ .../distribution/reference/normalize.go | 170 ++++++++ .../distribution/reference/reference.go | 211 +++++---- .../docker/distribution/reference/regexp.go | 41 +- .../docker/distribution/registry.go | 25 +- .../registry/api/errcode/handler.go | 6 +- .../docker/distribution/registry/auth/auth.go | 7 +- .../registry/auth/token/accesscontroller.go | 67 ++- .../distribution/registry/auth/token/token.go | 2 +- .../client/auth/challenge/authchallenge.go | 8 +- .../github.com/docker/distribution/tags.go | 2 +- .../docker/distribution/vendor.conf | 51 +++ .../github.com/docker/notary/NOTARY_VERSION | 1 - .../docker/notary/server.minimal.Dockerfile | 19 - .../docker/notary/signer.minimal.Dockerfile | 20 - .../github.com/docker/notary/vendor.conf | 30 -- .../go-windows-terminal-sequences/LICENSE | 9 + .../go-windows-terminal-sequences/README.md | 41 ++ .../go-windows-terminal-sequences/go.mod | 1 + .../sequences.go | 36 ++ .../sequences_dummy.go | 11 + .../opencontainers/image-spec/LICENSE | 191 ++++++++ .../image-spec/specs-go/v1/annotations.go | 56 +++ .../image-spec/specs-go/v1/config.go | 103 +++++ .../image-spec/specs-go/v1/descriptor.go | 64 +++ .../image-spec/specs-go/v1/index.go | 29 ++ .../image-spec/specs-go/v1/layout.go | 28 ++ .../image-spec/specs-go/v1/manifest.go | 32 ++ .../image-spec/specs-go/v1/mediatype.go | 48 +++ .../image-spec/specs-go/version.go | 32 ++ .../image-spec/specs-go/versioned.go | 23 + .../{Sirupsen => sirupsen}/logrus/.gitignore | 1 + .../github.com/sirupsen/logrus/.travis.yml | 21 + .../github.com/sirupsen/logrus/CHANGELOG.md | 198 +++++++++ .../{Sirupsen => sirupsen}/logrus/LICENSE | 0 .../{Sirupsen => sirupsen}/logrus/README.md | 98 ++--- .../{Sirupsen => sirupsen}/logrus/alt_exit.go | 18 +- .../logrus/appveyor.yml | 0 .../{Sirupsen => sirupsen}/logrus/doc.go | 0 .../github.com/sirupsen/logrus/entry.go | 407 ++++++++++++++++++ .../{Sirupsen => sirupsen}/logrus/exported.go | 66 ++- .../github.com/sirupsen/logrus/formatter.go | 78 ++++ src/vendor/github.com/sirupsen/logrus/go.mod | 10 + src/vendor/github.com/sirupsen/logrus/go.sum | 13 + .../{Sirupsen => sirupsen}/logrus/hooks.go | 0 .../sirupsen/logrus/json_formatter.go | 121 ++++++ .../{Sirupsen => sirupsen}/logrus/logger.go | 240 ++++++----- .../{Sirupsen => sirupsen}/logrus/logrus.go | 75 +++- .../logrus/terminal_check_appengine.go | 2 +- .../sirupsen/logrus/terminal_check_bsd.go | 13 + .../sirupsen/logrus/terminal_check_js.go | 11 + .../logrus/terminal_check_notappengine.go | 6 +- .../sirupsen/logrus/terminal_check_unix.go | 13 + .../sirupsen/logrus/terminal_check_windows.go | 20 + .../sirupsen/logrus/terminal_notwindows.go | 8 + .../sirupsen/logrus/terminal_windows.go | 18 + .../sirupsen/logrus/text_formatter.go | 299 +++++++++++++ .../{Sirupsen => sirupsen}/logrus/writer.go | 2 + .../notary/.gitignore | 0 .../notary/CHANGELOG.md | 50 +++ .../notary/CONTRIBUTING.md | 4 +- .../notary/CONTRIBUTORS | 0 .../notary/Dockerfile | 7 +- .../notary/Jenkinsfile | 3 +- .../notary/LICENSE | 2 +- .../notary/MAINTAINERS | 10 +- .../notary/MAINTAINERS.ALUMNI | 22 + .../notary/MAINTAINERS_RULES.md | 39 ++ .../notary/Makefile | 13 +- .../theupdateframework/notary/NOTARY_VERSION | 1 + .../notary/README.md | 74 ++-- .../notary/ROADMAP.md | 0 .../notary/circle.yml | 0 .../notary/client/changelist/change.go | 6 +- .../notary/client/changelist/changelist.go | 0 .../client/changelist/file_changelist.go | 4 +- .../notary/client/changelist/interface.go | 2 +- .../notary/client/client.go | 328 +++++++++----- .../notary/client/delegations.go | 30 +- .../notary/client/errors.go | 3 +- .../notary/client/helpers.go | 48 ++- .../notary/client/interface.go | 47 ++ .../notary/client/repo.go | 4 +- .../notary/client/repo_pkcs11.go | 6 +- .../notary/client/tufclient.go | 46 +- .../notary/client/witness.go | 8 +- .../notary/codecov.yml | 0 .../notary/const.go | 0 .../notary/const_nowindows.go | 0 .../notary/const_windows.go | 0 .../notary/cross.Dockerfile | 7 +- .../notary/cryptoservice/certificate.go | 4 +- .../notary/cryptoservice/crypto_service.go | 64 +-- .../notary/development.mysql.yml | 2 +- .../notary/development.postgresql.yml | 5 +- .../notary/development.rethink.yml | 0 .../notary/docker-compose.postgresql.yml | 5 +- .../notary/docker-compose.rethink.yml | 0 .../notary/docker-compose.yml | 2 +- .../notary/escrow.Dockerfile | 5 +- .../theupdateframework/notary/fips.go | 14 + .../notary/notary.go | 0 .../notary/server.Dockerfile | 15 +- .../notary/server.minimal.Dockerfile | 37 ++ .../notary/signer.Dockerfile | 15 +- .../notary/signer.minimal.Dockerfile | 39 ++ .../notary/storage/errors.go | 0 .../notary/storage/filestore.go | 11 +- .../notary/storage/httpstore.go | 30 +- .../notary/storage/interfaces.go | 2 +- .../notary/storage/memorystore.go | 6 +- .../notary/storage/offlinestore.go | 2 +- .../notary/trustmanager/errors.go | 0 .../notary/trustmanager/importLogic.md | 8 + .../notary/trustmanager/interfaces.go | 2 +- .../notary/trustmanager/keys.go | 246 +++++++++++ .../notary/trustmanager/keystore.go | 33 +- .../notary/trustmanager/yubikey/import.go | 9 +- .../notary/trustmanager/yubikey/non_pkcs11.go | 0 .../trustmanager/yubikey/pkcs11_darwin.go | 0 .../trustmanager/yubikey/pkcs11_interface.go | 0 .../trustmanager/yubikey/pkcs11_linux.go | 2 + .../trustmanager/yubikey/yubikeystore.go | 12 +- .../notary/trustpinning/ca.crt | 0 .../notary/trustpinning/certs.go | 8 +- .../notary/trustpinning/test.crt | 0 .../notary/trustpinning/trustpin.go | 50 ++- .../notary/tuf/LICENSE | 0 .../notary/tuf/README.md | 0 .../notary/tuf/builder.go | 10 +- .../notary/tuf/data/errors.go | 0 .../notary/tuf/data/keys.go | 2 +- .../notary/tuf/data/roles.go | 2 +- .../notary/tuf/data/root.go | 0 .../notary/tuf/data/serializer.go | 0 .../notary/tuf/data/snapshot.go | 4 +- .../notary/tuf/data/targets.go | 0 .../notary/tuf/data/timestamp.go | 2 +- .../notary/tuf/data/types.go | 8 +- .../notary/tuf/signed/ed25519.go | 6 +- .../notary/tuf/signed/errors.go | 2 +- .../notary/tuf/signed/interface.go | 2 +- .../notary/tuf/signed/sign.go | 8 +- .../notary/tuf/signed/verifiers.go | 4 +- .../notary/tuf/signed/verify.go | 18 +- .../notary/tuf/tuf.go | 21 +- .../notary/tuf/utils/pkcs8.go | 341 +++++++++++++++ .../notary/tuf/utils/role_sort.go | 0 .../notary/tuf/utils/stack.go | 0 .../notary/tuf/utils/utils.go | 2 +- .../notary/tuf/utils/x509.go | 175 ++++---- .../notary/tuf/validation/errors.go | 0 .../theupdateframework/notary/vendor.conf | 59 +++ 210 files changed, 5182 insertions(+), 2791 deletions(-) delete mode 100644 src/vendor/github.com/Sirupsen/logrus/.travis.yml delete mode 100644 src/vendor/github.com/Sirupsen/logrus/CHANGELOG.md delete mode 100644 src/vendor/github.com/Sirupsen/logrus/entry.go delete mode 100644 src/vendor/github.com/Sirupsen/logrus/formatter.go delete mode 100644 src/vendor/github.com/Sirupsen/logrus/json_formatter.go delete mode 100644 src/vendor/github.com/Sirupsen/logrus/terminal_bsd.go delete mode 100644 src/vendor/github.com/Sirupsen/logrus/terminal_linux.go delete mode 100644 src/vendor/github.com/Sirupsen/logrus/text_formatter.go create mode 100644 src/vendor/github.com/docker/distribution/.gometalinter.json create mode 100644 src/vendor/github.com/docker/distribution/.travis.yml delete mode 100644 src/vendor/github.com/docker/distribution/AUTHORS delete mode 100644 src/vendor/github.com/docker/distribution/CHANGELOG.md delete mode 100644 src/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md delete mode 100644 src/vendor/github.com/docker/distribution/circle.yml delete mode 100755 src/vendor/github.com/docker/distribution/coverpkg.sh delete mode 100644 src/vendor/github.com/docker/distribution/digest/digest.go delete mode 100644 src/vendor/github.com/docker/distribution/digest/digester.go delete mode 100644 src/vendor/github.com/docker/distribution/digest/doc.go delete mode 100644 src/vendor/github.com/docker/distribution/digest/verifiers.go rename src/vendor/github.com/docker/distribution/{digest => digestset}/set.go (90%) create mode 100644 src/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go create mode 100644 src/vendor/github.com/docker/distribution/reference/helpers.go create mode 100644 src/vendor/github.com/docker/distribution/reference/normalize.go create mode 100644 src/vendor/github.com/docker/distribution/vendor.conf delete mode 100644 src/vendor/github.com/docker/notary/NOTARY_VERSION delete mode 100644 src/vendor/github.com/docker/notary/server.minimal.Dockerfile delete mode 100644 src/vendor/github.com/docker/notary/signer.minimal.Dockerfile delete mode 100644 src/vendor/github.com/docker/notary/vendor.conf create mode 100644 src/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE create mode 100644 src/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md create mode 100644 src/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod create mode 100644 src/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go create mode 100644 src/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go create mode 100644 src/vendor/github.com/opencontainers/image-spec/LICENSE create mode 100644 src/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go create mode 100644 src/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go create mode 100644 src/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go create mode 100644 src/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go create mode 100644 src/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go create mode 100644 src/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go create mode 100644 src/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go create mode 100644 src/vendor/github.com/opencontainers/image-spec/specs-go/version.go create mode 100644 src/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go rename src/vendor/github.com/{Sirupsen => sirupsen}/logrus/.gitignore (50%) create mode 100644 src/vendor/github.com/sirupsen/logrus/.travis.yml create mode 100644 src/vendor/github.com/sirupsen/logrus/CHANGELOG.md rename src/vendor/github.com/{Sirupsen => sirupsen}/logrus/LICENSE (100%) rename src/vendor/github.com/{Sirupsen => sirupsen}/logrus/README.md (71%) rename src/vendor/github.com/{Sirupsen => sirupsen}/logrus/alt_exit.go (74%) rename src/vendor/github.com/{Sirupsen => sirupsen}/logrus/appveyor.yml (100%) rename src/vendor/github.com/{Sirupsen => sirupsen}/logrus/doc.go (100%) create mode 100644 src/vendor/github.com/sirupsen/logrus/entry.go rename src/vendor/github.com/{Sirupsen => sirupsen}/logrus/exported.go (73%) create mode 100644 src/vendor/github.com/sirupsen/logrus/formatter.go create mode 100644 src/vendor/github.com/sirupsen/logrus/go.mod create mode 100644 src/vendor/github.com/sirupsen/logrus/go.sum rename src/vendor/github.com/{Sirupsen => sirupsen}/logrus/hooks.go (100%) create mode 100644 src/vendor/github.com/sirupsen/logrus/json_formatter.go rename src/vendor/github.com/{Sirupsen => sirupsen}/logrus/logger.go (59%) rename src/vendor/github.com/{Sirupsen => sirupsen}/logrus/logrus.go (72%) rename src/vendor/github.com/{Sirupsen => sirupsen}/logrus/terminal_check_appengine.go (75%) create mode 100644 src/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go create mode 100644 src/vendor/github.com/sirupsen/logrus/terminal_check_js.go rename src/vendor/github.com/{Sirupsen => sirupsen}/logrus/terminal_check_notappengine.go (58%) create mode 100644 src/vendor/github.com/sirupsen/logrus/terminal_check_unix.go create mode 100644 src/vendor/github.com/sirupsen/logrus/terminal_check_windows.go create mode 100644 src/vendor/github.com/sirupsen/logrus/terminal_notwindows.go create mode 100644 src/vendor/github.com/sirupsen/logrus/terminal_windows.go create mode 100644 src/vendor/github.com/sirupsen/logrus/text_formatter.go rename src/vendor/github.com/{Sirupsen => sirupsen}/logrus/writer.go (96%) rename src/vendor/github.com/{docker => theupdateframework}/notary/.gitignore (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/CHANGELOG.md (51%) rename src/vendor/github.com/{docker => theupdateframework}/notary/CONTRIBUTING.md (98%) rename src/vendor/github.com/{docker => theupdateframework}/notary/CONTRIBUTORS (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/Dockerfile (74%) rename src/vendor/github.com/{docker => theupdateframework}/notary/Jenkinsfile (55%) rename src/vendor/github.com/{docker => theupdateframework}/notary/LICENSE (99%) rename src/vendor/github.com/{docker => theupdateframework}/notary/MAINTAINERS (87%) create mode 100644 src/vendor/github.com/theupdateframework/notary/MAINTAINERS.ALUMNI create mode 100644 src/vendor/github.com/theupdateframework/notary/MAINTAINERS_RULES.md rename src/vendor/github.com/{docker => theupdateframework}/notary/Makefile (95%) create mode 100644 src/vendor/github.com/theupdateframework/notary/NOTARY_VERSION rename src/vendor/github.com/{docker => theupdateframework}/notary/README.md (55%) rename src/vendor/github.com/{docker => theupdateframework}/notary/ROADMAP.md (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/circle.yml (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/client/changelist/change.go (94%) rename src/vendor/github.com/{docker => theupdateframework}/notary/client/changelist/changelist.go (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/client/changelist/file_changelist.go (99%) rename src/vendor/github.com/{docker => theupdateframework}/notary/client/changelist/interface.go (97%) rename src/vendor/github.com/{docker => theupdateframework}/notary/client/client.go (76%) rename src/vendor/github.com/{docker => theupdateframework}/notary/client/delegations.go (87%) rename src/vendor/github.com/{docker => theupdateframework}/notary/client/errors.go (96%) rename src/vendor/github.com/{docker => theupdateframework}/notary/client/helpers.go (84%) create mode 100644 src/vendor/github.com/theupdateframework/notary/client/interface.go rename src/vendor/github.com/{docker => theupdateframework}/notary/client/repo.go (80%) rename src/vendor/github.com/{docker => theupdateframework}/notary/client/repo_pkcs11.go (78%) rename src/vendor/github.com/{docker => theupdateframework}/notary/client/tufclient.go (89%) rename src/vendor/github.com/{docker => theupdateframework}/notary/client/witness.go (88%) rename src/vendor/github.com/{docker => theupdateframework}/notary/codecov.yml (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/const.go (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/const_nowindows.go (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/const_windows.go (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/cross.Dockerfile (89%) rename src/vendor/github.com/{docker => theupdateframework}/notary/cryptoservice/certificate.go (92%) rename src/vendor/github.com/{docker => theupdateframework}/notary/cryptoservice/crypto_service.go (75%) rename src/vendor/github.com/{docker => theupdateframework}/notary/development.mysql.yml (97%) rename src/vendor/github.com/{docker => theupdateframework}/notary/development.postgresql.yml (71%) rename src/vendor/github.com/{docker => theupdateframework}/notary/development.rethink.yml (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/docker-compose.postgresql.yml (69%) rename src/vendor/github.com/{docker => theupdateframework}/notary/docker-compose.rethink.yml (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/docker-compose.yml (97%) rename src/vendor/github.com/{docker => theupdateframework}/notary/escrow.Dockerfile (67%) create mode 100644 src/vendor/github.com/theupdateframework/notary/fips.go rename src/vendor/github.com/{docker => theupdateframework}/notary/notary.go (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/server.Dockerfile (53%) create mode 100644 src/vendor/github.com/theupdateframework/notary/server.minimal.Dockerfile rename src/vendor/github.com/{docker => theupdateframework}/notary/signer.Dockerfile (57%) create mode 100644 src/vendor/github.com/theupdateframework/notary/signer.minimal.Dockerfile rename src/vendor/github.com/{docker => theupdateframework}/notary/storage/errors.go (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/storage/filestore.go (97%) rename src/vendor/github.com/{docker => theupdateframework}/notary/storage/httpstore.go (92%) rename src/vendor/github.com/{docker => theupdateframework}/notary/storage/interfaces.go (95%) rename src/vendor/github.com/{docker => theupdateframework}/notary/storage/memorystore.go (96%) rename src/vendor/github.com/{docker => theupdateframework}/notary/storage/offlinestore.go (96%) rename src/vendor/github.com/{docker => theupdateframework}/notary/trustmanager/errors.go (100%) create mode 100644 src/vendor/github.com/theupdateframework/notary/trustmanager/importLogic.md rename src/vendor/github.com/{docker => theupdateframework}/notary/trustmanager/interfaces.go (97%) create mode 100644 src/vendor/github.com/theupdateframework/notary/trustmanager/keys.go rename src/vendor/github.com/{docker => theupdateframework}/notary/trustmanager/keystore.go (89%) rename src/vendor/github.com/{docker => theupdateframework}/notary/trustmanager/yubikey/import.go (87%) rename src/vendor/github.com/{docker => theupdateframework}/notary/trustmanager/yubikey/non_pkcs11.go (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/trustmanager/yubikey/pkcs11_darwin.go (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/trustmanager/yubikey/pkcs11_interface.go (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/trustmanager/yubikey/pkcs11_linux.go (58%) rename src/vendor/github.com/{docker => theupdateframework}/notary/trustmanager/yubikey/yubikeystore.go (98%) rename src/vendor/github.com/{docker => theupdateframework}/notary/trustpinning/ca.crt (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/trustpinning/certs.go (98%) rename src/vendor/github.com/{docker => theupdateframework}/notary/trustpinning/test.crt (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/trustpinning/trustpin.go (71%) rename src/vendor/github.com/{docker => theupdateframework}/notary/tuf/LICENSE (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/tuf/README.md (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/tuf/builder.go (99%) rename src/vendor/github.com/{docker => theupdateframework}/notary/tuf/data/errors.go (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/tuf/data/keys.go (99%) rename src/vendor/github.com/{docker => theupdateframework}/notary/tuf/data/roles.go (99%) rename src/vendor/github.com/{docker => theupdateframework}/notary/tuf/data/root.go (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/tuf/data/serializer.go (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/tuf/data/snapshot.go (98%) rename src/vendor/github.com/{docker => theupdateframework}/notary/tuf/data/targets.go (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/tuf/data/timestamp.go (98%) rename src/vendor/github.com/{docker => theupdateframework}/notary/tuf/data/types.go (97%) rename src/vendor/github.com/{docker => theupdateframework}/notary/tuf/signed/ed25519.go (94%) rename src/vendor/github.com/{docker => theupdateframework}/notary/tuf/signed/errors.go (98%) rename src/vendor/github.com/{docker => theupdateframework}/notary/tuf/signed/interface.go (96%) rename src/vendor/github.com/{docker => theupdateframework}/notary/tuf/signed/sign.go (95%) rename src/vendor/github.com/{docker => theupdateframework}/notary/tuf/signed/verifiers.go (98%) rename src/vendor/github.com/{docker => theupdateframework}/notary/tuf/signed/verify.go (82%) rename src/vendor/github.com/{docker => theupdateframework}/notary/tuf/tuf.go (98%) create mode 100644 src/vendor/github.com/theupdateframework/notary/tuf/utils/pkcs8.go rename src/vendor/github.com/{docker => theupdateframework}/notary/tuf/utils/role_sort.go (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/tuf/utils/stack.go (100%) rename src/vendor/github.com/{docker => theupdateframework}/notary/tuf/utils/utils.go (98%) rename src/vendor/github.com/{docker => theupdateframework}/notary/tuf/utils/x509.go (82%) rename src/vendor/github.com/{docker => theupdateframework}/notary/tuf/validation/errors.go (100%) create mode 100644 src/vendor/github.com/theupdateframework/notary/vendor.conf diff --git a/src/Gopkg.lock b/src/Gopkg.lock index 0c9c66ff5..815277ae6 100644 --- a/src/Gopkg.lock +++ b/src/Gopkg.lock @@ -25,14 +25,6 @@ revision = "c7af12943936e8c39859482e61f0574c2fd7fc75" version = "v1.4.2" -[[projects]] - digest = "1:9e9193aa51197513b3abcb108970d831fbcf40ef96aa845c4f03276e1fa316d2" - name = "github.com/Sirupsen/logrus" - packages = ["."] - pruneopts = "UT" - revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc" - version = "v1.0.5" - [[projects]] digest = "1:e8078e5f9d84e87745efb3c0961e78045500cda10d7102fdf839fbac4b49a423" name = "github.com/Unknwon/goconfig" @@ -141,14 +133,15 @@ version = "v3.0.0" [[projects]] - digest = "1:5a39bab16f84dd753a3af60076a915b55584cc6df3b3dfba53bfd48bf4420e77" + digest = "1:d06c54bbda3a04ec18a2fa0577896b3c40f13409639b442379ee0a5a53be8259" name = "github.com/docker/distribution" packages = [ ".", "context", - "digest", + "digestset", "health", "manifest", + "manifest/manifestlist", "manifest/schema1", "manifest/schema2", "reference", @@ -159,8 +152,8 @@ "uuid", ] pruneopts = "UT" - revision = "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" - version = "v2.6.2" + revision = "2461543d988979529609e8cb6fca9ca190dc48da" + version = "v2.7.1" [[projects]] branch = "master" @@ -178,27 +171,6 @@ pruneopts = "UT" revision = "aabc10ec26b754e797f9028f4589c5b7bd90dc20" -[[projects]] - digest = "1:fea1a444386b05e00dfcf2cb9a95fb09e11f1689056161471229baa4a7a65459" - name = "github.com/docker/notary" - packages = [ - ".", - "client", - "client/changelist", - "cryptoservice", - "storage", - "trustmanager", - "trustmanager/yubikey", - "trustpinning", - "tuf", - "tuf/data", - "tuf/signed", - "tuf/utils", - "tuf/validation", - ] - pruneopts = "UT" - revision = "c04e3e6d05881045def11167c51d4a8baa34899a" - [[projects]] digest = "1:0594af97b2f4cec6554086eeace6597e20a4b69466eb4ada25adf9f4300dddd2" name = "github.com/garyburd/redigo" @@ -354,6 +326,14 @@ revision = "0ff49de124c6f76f8494e194af75bde0f1a49a29" version = "v1.1.6" +[[projects]] + digest = "1:31e761d97c76151dde79e9d28964a812c46efc5baee4085b86f68f0c654450de" + name = "github.com/konsorten/go-windows-terminal-sequences" + packages = ["."] + pruneopts = "UT" + revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e" + version = "v1.0.2" + [[projects]] branch = "master" digest = "1:bd26bbaf1e9f9dfe829a88f87a0849b56f717c31785443a67668f2c752fa8412" @@ -396,6 +376,17 @@ revision = "aa2ec055abd10d26d539eb630a92241b781ce4bc" version = "v1.0.0-rc0" +[[projects]] + digest = "1:11db38d694c130c800d0aefb502fb02519e514dc53d9804ce51d1ad25ec27db6" + name = "github.com/opencontainers/image-spec" + packages = [ + "specs-go", + "specs-go/v1", + ] + pruneopts = "UT" + revision = "d60099175f88c47cd379c4738d158884749ed235" + version = "v1.0.1" + [[projects]] digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b" name = "github.com/pkg/errors" @@ -431,6 +422,14 @@ revision = "b024fc5ea0e34bc3f83d9941c8d60b0622bfaca4" version = "v1" +[[projects]] + digest = "1:fd61cf4ae1953d55df708acb6b91492d538f49c305b364a014049914495db426" + name = "github.com/sirupsen/logrus" + packages = ["."] + pruneopts = "UT" + revision = "8bdbc7bcc01dcbb8ec23dc8a28e332258d25251f" + version = "v1.4.1" + [[projects]] digest = "1:9424f440bba8f7508b69414634aef3b2b3a877e522d8a4624692412805407bb7" name = "github.com/spf13/pflag" @@ -460,6 +459,28 @@ revision = "ffdc059bfe9ce6a4e144ba849dbedead332c6053" version = "v1.3.0" +[[projects]] + digest = "1:a5702d6fd0891671faf050c05451d3ee4cfd70cb958e11556fefaca628ce832e" + name = "github.com/theupdateframework/notary" + packages = [ + ".", + "client", + "client/changelist", + "cryptoservice", + "storage", + "trustmanager", + "trustmanager/yubikey", + "trustpinning", + "tuf", + "tuf/data", + "tuf/signed", + "tuf/utils", + "tuf/validation", + ] + pruneopts = "UT" + revision = "d6e1431feb32348e0650bf7551ac5cffd01d857b" + version = "v0.6.1" + [[projects]] digest = "1:ab3259b9f5008a18ff8c1cc34623eccce354f3a9faf5b409983cd6717d64b40b" name = "golang.org/x/crypto" @@ -731,18 +752,14 @@ "github.com/dghubble/sling", "github.com/dgrijalva/jwt-go", "github.com/docker/distribution", - "github.com/docker/distribution/digest", "github.com/docker/distribution/health", + "github.com/docker/distribution/manifest/manifestlist", "github.com/docker/distribution/manifest/schema1", "github.com/docker/distribution/manifest/schema2", "github.com/docker/distribution/reference", "github.com/docker/distribution/registry/auth/token", "github.com/docker/distribution/registry/client/auth/challenge", "github.com/docker/libtrust", - "github.com/docker/notary", - "github.com/docker/notary/client", - "github.com/docker/notary/trustpinning", - "github.com/docker/notary/tuf/data", "github.com/garyburd/redigo/redis", "github.com/ghodss/yaml", "github.com/go-sql-driver/mysql", @@ -761,6 +778,10 @@ "github.com/stretchr/testify/mock", "github.com/stretchr/testify/require", "github.com/stretchr/testify/suite", + "github.com/theupdateframework/notary", + "github.com/theupdateframework/notary/client", + "github.com/theupdateframework/notary/trustpinning", + "github.com/theupdateframework/notary/tuf/data", "golang.org/x/crypto/pbkdf2", "golang.org/x/oauth2", "golang.org/x/oauth2/clientcredentials", diff --git a/src/Gopkg.toml b/src/Gopkg.toml index 7804266a4..67a00c2c2 100644 --- a/src/Gopkg.toml +++ b/src/Gopkg.toml @@ -42,7 +42,7 @@ ignored = ["github.com/goharbor/harbor/tests*"] [[constraint]] name = "github.com/docker/distribution" - version = "=2.6.2" + version = "=2.7.1" [[constraint]] branch = "master" @@ -126,8 +126,12 @@ ignored = ["github.com/goharbor/harbor/tests*"] [[constraint]] name = "github.com/bmatcuk/doublestar" - version = "1.1.1" + version = "=1.1.1" [[constraint]] name = "github.com/pkg/errors" version = "=0.8.1" + +[[constraint]] + name = "github.com/docker/notary" + version = "=0.6.1" diff --git a/src/common/utils/notary/helper.go b/src/common/utils/notary/helper.go index 1c6b3f361..76bd2ac0f 100644 --- a/src/common/utils/notary/helper.go +++ b/src/common/utils/notary/helper.go @@ -23,16 +23,16 @@ import ( "strings" "github.com/docker/distribution/registry/auth/token" - "github.com/docker/notary" - "github.com/docker/notary/client" - "github.com/docker/notary/trustpinning" - "github.com/docker/notary/tuf/data" "github.com/goharbor/harbor/src/common/utils/log" "github.com/goharbor/harbor/src/common/utils/registry" "github.com/goharbor/harbor/src/core/config" tokenutil "github.com/goharbor/harbor/src/core/service/token" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/trustpinning" + "github.com/theupdateframework/notary/tuf/data" - "github.com/opencontainers/go-digest" + digest "github.com/opencontainers/go-digest" ) var ( @@ -91,7 +91,7 @@ func GetTargets(notaryEndpoint string, username string, fqRepo string) ([]Target } tr := registry.NewTransport(registry.GetHTTPTransport(), authorizer) gun := data.GUN(fqRepo) - notaryRepo, err := client.NewFileCachedNotaryRepository(notaryCachePath, gun, notaryEndpoint, tr, mockRetriever, trustPin) + notaryRepo, err := client.NewFileCachedRepository(notaryCachePath, gun, notaryEndpoint, tr, mockRetriever, trustPin) if err != nil { return res, err } diff --git a/src/common/utils/registry/auth/path.go b/src/common/utils/registry/auth/path.go index 0e3d1a403..c125e000a 100644 --- a/src/common/utils/registry/auth/path.go +++ b/src/common/utils/registry/auth/path.go @@ -17,7 +17,6 @@ package auth import ( "regexp" - "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/goharbor/harbor/src/common/utils/log" ) @@ -26,8 +25,8 @@ var ( base = regexp.MustCompile("/v2") catalog = regexp.MustCompile("/v2/_catalog") tag = regexp.MustCompile("/v2/(" + reference.NameRegexp.String() + ")/tags/list") - manifest = regexp.MustCompile("/v2/(" + reference.NameRegexp.String() + ")/manifests/(" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + ")") - blob = regexp.MustCompile("/v2/(" + reference.NameRegexp.String() + ")/blobs/" + digest.DigestRegexp.String()) + manifest = regexp.MustCompile("/v2/(" + reference.NameRegexp.String() + ")/manifests/(" + reference.TagRegexp.String() + "|" + reference.DigestRegexp.String() + ")") + blob = regexp.MustCompile("/v2/(" + reference.NameRegexp.String() + ")/blobs/" + reference.DigestRegexp.String()) blobUpload = regexp.MustCompile("/v2/(" + reference.NameRegexp.String() + ")/blobs/uploads") blobUploadChunk = regexp.MustCompile("/v2/(" + reference.NameRegexp.String() + ")/blobs/uploads/[a-zA-Z0-9-_.=]+") diff --git a/src/common/utils/registry/auth/path_test.go b/src/common/utils/registry/auth/path_test.go index 7542592e1..6f304b500 100644 --- a/src/common/utils/registry/auth/path_test.go +++ b/src/common/utils/registry/auth/path_test.go @@ -31,8 +31,8 @@ func TestParseRepository(t *testing.T) { {"/v2/tags/list", ""}, {"/v2/tags/list/tags/list", "tags/list"}, {"/v2/library/manifests/latest", "library"}, - {"/v2/library/manifests/sha256:1234567890", "library"}, - {"/v2/library/blobs/sha256:1234567890", "library"}, + {"/v2/library/manifests/sha256:eec76eedea59f7bf39a2713bfd995c82cfaa97724ee5b7f5aba253e07423d0ae", "library"}, + {"/v2/library/blobs/sha256:eec76eedea59f7bf39a2713bfd995c82cfaa97724ee5b7f5aba253e07423d0ae", "library"}, {"/v2/library/blobs/uploads", "library"}, {"/v2/library/blobs/uploads/1234567890", "library"}, } diff --git a/src/common/utils/registry/auth/tokenauthorizer.go b/src/common/utils/registry/auth/tokenauthorizer.go index 66959e2e5..cac7c1c4a 100644 --- a/src/common/utils/registry/auth/tokenauthorizer.go +++ b/src/common/utils/registry/auth/tokenauthorizer.go @@ -193,7 +193,7 @@ func parseScopes(req *http.Request) ([]*token.ResourceActions, error) { // base scope = nil } else { - // unknow + // unknown return scopes, fmt.Errorf("can not parse scope from the request: %s %s", req.Method, req.URL.Path) } @@ -205,7 +205,7 @@ func parseScopes(req *http.Request) ([]*token.ResourceActions, error) { for _, s := range scopes { strs = append(strs, scopeString(s)) } - log.Debugf("scopses parsed from request: %s", strings.Join(strs, " ")) + log.Debugf("scopes parsed from request: %s", strings.Join(strs, " ")) return scopes, nil } diff --git a/src/core/api/replication_execution.go b/src/core/api/replication_execution.go index 2bba83db0..440b7c38c 100644 --- a/src/core/api/replication_execution.go +++ b/src/core/api/replication_execution.go @@ -20,6 +20,7 @@ import ( "net/http" "strconv" + common_http "github.com/goharbor/harbor/src/common/http" "github.com/goharbor/harbor/src/replication" "github.com/goharbor/harbor/src/replication/dao/models" "github.com/goharbor/harbor/src/replication/event" @@ -264,6 +265,12 @@ func (r *ReplicationOperationAPI) GetTaskLog() { logBytes, err := replication.OperationCtl.GetTaskLog(taskID) if err != nil { + if httpErr, ok := err.(*common_http.Error); ok { + if ok && httpErr.Code == http.StatusNotFound { + r.SendNotFoundError(fmt.Errorf("the log of task %d not found", taskID)) + return + } + } r.SendInternalServerError(fmt.Errorf("failed to get log of task %d: %v", taskID, err)) return } diff --git a/src/jobservice/job/impl/scan/clair_job.go b/src/jobservice/job/impl/scan/clair_job.go index 829408fc8..4fcb79456 100644 --- a/src/jobservice/job/impl/scan/clair_job.go +++ b/src/jobservice/job/impl/scan/clair_job.go @@ -160,7 +160,7 @@ func prepareLayers(payload []byte, registryURL, repo, tk string) ([]models.Clair // form the chain by using the digests of all parent layers in the image, such that if another image is built on top of this image the layer name can be re-used. shaChain := "" for _, d := range manifest.References() { - if d.MediaType == schema2.MediaTypeConfig { + if d.MediaType == schema2.MediaTypeImageConfig { continue } shaChain += string(d.Digest) + "-" diff --git a/src/replication/operation/controller.go b/src/replication/operation/controller.go index c56bf42f9..5b7756ad9 100644 --- a/src/replication/operation/controller.go +++ b/src/replication/operation/controller.go @@ -16,7 +16,6 @@ package operation import ( "fmt" - "strings" "time" "github.com/goharbor/harbor/src/common/job" @@ -115,10 +114,6 @@ func (c *controller) StopReplication(executionID int64) error { continue } if err = c.scheduler.Stop(task.JobID); err != nil { - if isNotRunningJobError(err) { - log.Warningf("got not running job error when trying stop the task %d(job ID: %s): %v, skip", task.ID, task.JobID, err) - continue - } return err } log.Debugf("the stop request for task %d(job ID: %s) sent", task.ID, task.JobID) @@ -139,16 +134,6 @@ func isTaskRunning(task *models.Task) bool { return true } -// when trying to stop a job which isn't running in jobservice, -// an error whose message contains "xxx is not a running job" -// will be returned -func isNotRunningJobError(err error) bool { - if err == nil { - return false - } - return strings.Contains(err.Error(), "is not a running job") -} - func (c *controller) ListExecutions(query ...*models.ExecutionQuery) (int64, []*models.Execution, error) { return c.executionMgr.List(query...) } diff --git a/src/replication/operation/controller_test.go b/src/replication/operation/controller_test.go index 482022e46..b2698ba9a 100644 --- a/src/replication/operation/controller_test.go +++ b/src/replication/operation/controller_test.go @@ -15,7 +15,6 @@ package operation import ( - "errors" "io" "testing" @@ -329,27 +328,3 @@ func TestIsTaskRunning(t *testing.T) { assert.Equal(t, c.isRunning, isTaskRunning(c.task)) } } - -func TestIsNotRunningJobError(t *testing.T) { - cases := []struct { - err error - isNotRunningJobError bool - }{ - { - err: nil, - isNotRunningJobError: false, - }, - { - err: errors.New("not the error"), - isNotRunningJobError: false, - }, - { - err: errors.New(`[ERROR] [handler.go:253]: Serve http request 'POST /api/v1/jobs/734a11140d939ef700889725' error: 500 {"code":10008,"message":"Stop job failed with error","details":"job '734a11140d939ef700889725' is not a running job"}`), - isNotRunningJobError: true, - }, - } - - for _, c := range cases { - assert.Equal(t, c.isNotRunningJobError, isNotRunningJobError(c.err)) - } -} diff --git a/src/vendor/github.com/Sirupsen/logrus/.travis.yml b/src/vendor/github.com/Sirupsen/logrus/.travis.yml deleted file mode 100644 index a23296a53..000000000 --- a/src/vendor/github.com/Sirupsen/logrus/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -go: - - 1.6.x - - 1.7.x - - 1.8.x - - tip -env: - - GOMAXPROCS=4 GORACE=halt_on_error=1 -install: - - go get github.com/stretchr/testify/assert - - go get gopkg.in/gemnasium/logrus-airbrake-hook.v2 - - go get golang.org/x/sys/unix - - go get golang.org/x/sys/windows -script: - - go test -race -v ./... diff --git a/src/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/src/vendor/github.com/Sirupsen/logrus/CHANGELOG.md deleted file mode 100644 index 1bd1deb29..000000000 --- a/src/vendor/github.com/Sirupsen/logrus/CHANGELOG.md +++ /dev/null @@ -1,123 +0,0 @@ -# 1.0.5 - -* Fix hooks race (#707) -* Fix panic deadlock (#695) - -# 1.0.4 - -* Fix race when adding hooks (#612) -* Fix terminal check in AppEngine (#635) - -# 1.0.3 - -* Replace example files with testable examples - -# 1.0.2 - -* bug: quote non-string values in text formatter (#583) -* Make (*Logger) SetLevel a public method - -# 1.0.1 - -* bug: fix escaping in text formatter (#575) - -# 1.0.0 - -* Officially changed name to lower-case -* bug: colors on Windows 10 (#541) -* bug: fix race in accessing level (#512) - -# 0.11.5 - -* feature: add writer and writerlevel to entry (#372) - -# 0.11.4 - -* bug: fix undefined variable on solaris (#493) - -# 0.11.3 - -* formatter: configure quoting of empty values (#484) -* formatter: configure quoting character (default is `"`) (#484) -* bug: fix not importing io correctly in non-linux environments (#481) - -# 0.11.2 - -* bug: fix windows terminal detection (#476) - -# 0.11.1 - -* bug: fix tty detection with custom out (#471) - -# 0.11.0 - -* performance: Use bufferpool to allocate (#370) -* terminal: terminal detection for app-engine (#343) -* feature: exit handler (#375) - -# 0.10.0 - -* feature: Add a test hook (#180) -* feature: `ParseLevel` is now case-insensitive (#326) -* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) -* performance: avoid re-allocations on `WithFields` (#335) - -# 0.9.0 - -* logrus/text_formatter: don't emit empty msg -* logrus/hooks/airbrake: move out of main repository -* logrus/hooks/sentry: move out of main repository -* logrus/hooks/papertrail: move out of main repository -* logrus/hooks/bugsnag: move out of main repository -* logrus/core: run tests with `-race` -* logrus/core: detect TTY based on `stderr` -* logrus/core: support `WithError` on logger -* logrus/core: Solaris support - -# 0.8.7 - -* logrus/core: fix possible race (#216) -* logrus/doc: small typo fixes and doc improvements - - -# 0.8.6 - -* hooks/raven: allow passing an initialized client - -# 0.8.5 - -* logrus/core: revert #208 - -# 0.8.4 - -* formatter/text: fix data race (#218) - -# 0.8.3 - -* logrus/core: fix entry log level (#208) -* logrus/core: improve performance of text formatter by 40% -* logrus/core: expose `LevelHooks` type -* logrus/core: add support for DragonflyBSD and NetBSD -* formatter/text: print structs more verbosely - -# 0.8.2 - -* logrus: fix more Fatal family functions - -# 0.8.1 - -* logrus: fix not exiting on `Fatalf` and `Fatalln` - -# 0.8.0 - -* logrus: defaults to stderr instead of stdout -* hooks/sentry: add special field for `*http.Request` -* formatter/text: ignore Windows for colors - -# 0.7.3 - -* formatter/\*: allow configuration of timestamp layout - -# 0.7.2 - -* formatter/text: Add configuration option for time format (#158) diff --git a/src/vendor/github.com/Sirupsen/logrus/entry.go b/src/vendor/github.com/Sirupsen/logrus/entry.go deleted file mode 100644 index 778f4c9f0..000000000 --- a/src/vendor/github.com/Sirupsen/logrus/entry.go +++ /dev/null @@ -1,288 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "os" - "sync" - "time" -) - -var bufferPool *sync.Pool - -func init() { - bufferPool = &sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, - } -} - -// Defines the key when adding errors using WithError. -var ErrorKey = "error" - -// An entry is the final or intermediate Logrus logging entry. It contains all -// the fields passed with WithField{,s}. It's finally logged when Debug, Info, -// Warn, Error, Fatal or Panic is called on it. These objects can be reused and -// passed around as much as you wish to avoid field duplication. -type Entry struct { - Logger *Logger - - // Contains all the fields set by the user. - Data Fields - - // Time at which the log entry was created - Time time.Time - - // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic - // This field will be set on entry firing and the value will be equal to the one in Logger struct field. - Level Level - - // Message passed to Debug, Info, Warn, Error, Fatal or Panic - Message string - - // When formatter is called in entry.log(), an Buffer may be set to entry - Buffer *bytes.Buffer -} - -func NewEntry(logger *Logger) *Entry { - return &Entry{ - Logger: logger, - // Default is three fields, give a little extra room - Data: make(Fields, 5), - } -} - -// Returns the string representation from the reader and ultimately the -// formatter. -func (entry *Entry) String() (string, error) { - serialized, err := entry.Logger.Formatter.Format(entry) - if err != nil { - return "", err - } - str := string(serialized) - return str, nil -} - -// Add an error as single field (using the key defined in ErrorKey) to the Entry. -func (entry *Entry) WithError(err error) *Entry { - return entry.WithField(ErrorKey, err) -} - -// Add a single field to the Entry. -func (entry *Entry) WithField(key string, value interface{}) *Entry { - return entry.WithFields(Fields{key: value}) -} - -// Add a map of fields to the Entry. -func (entry *Entry) WithFields(fields Fields) *Entry { - data := make(Fields, len(entry.Data)+len(fields)) - for k, v := range entry.Data { - data[k] = v - } - for k, v := range fields { - data[k] = v - } - return &Entry{Logger: entry.Logger, Data: data} -} - -// This function is not declared with a pointer value because otherwise -// race conditions will occur when using multiple goroutines -func (entry Entry) log(level Level, msg string) { - var buffer *bytes.Buffer - entry.Time = time.Now() - entry.Level = level - entry.Message = msg - - entry.fireHooks() - - buffer = bufferPool.Get().(*bytes.Buffer) - buffer.Reset() - defer bufferPool.Put(buffer) - entry.Buffer = buffer - - entry.write() - - entry.Buffer = nil - - // To avoid Entry#log() returning a value that only would make sense for - // panic() to use in Entry#Panic(), we avoid the allocation by checking - // directly here. - if level <= PanicLevel { - panic(&entry) - } -} - -// This function is not declared with a pointer value because otherwise -// race conditions will occur when using multiple goroutines -func (entry Entry) fireHooks() { - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - err := entry.Logger.Hooks.Fire(entry.Level, &entry) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) - } -} - -func (entry *Entry) write() { - serialized, err := entry.Logger.Formatter.Format(entry) - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - } else { - _, err = entry.Logger.Out.Write(serialized) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } - } -} - -func (entry *Entry) Debug(args ...interface{}) { - if entry.Logger.level() >= DebugLevel { - entry.log(DebugLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Print(args ...interface{}) { - entry.Info(args...) -} - -func (entry *Entry) Info(args ...interface{}) { - if entry.Logger.level() >= InfoLevel { - entry.log(InfoLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warn(args ...interface{}) { - if entry.Logger.level() >= WarnLevel { - entry.log(WarnLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warning(args ...interface{}) { - entry.Warn(args...) -} - -func (entry *Entry) Error(args ...interface{}) { - if entry.Logger.level() >= ErrorLevel { - entry.log(ErrorLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Fatal(args ...interface{}) { - if entry.Logger.level() >= FatalLevel { - entry.log(FatalLevel, fmt.Sprint(args...)) - } - Exit(1) -} - -func (entry *Entry) Panic(args ...interface{}) { - if entry.Logger.level() >= PanicLevel { - entry.log(PanicLevel, fmt.Sprint(args...)) - } - panic(fmt.Sprint(args...)) -} - -// Entry Printf family functions - -func (entry *Entry) Debugf(format string, args ...interface{}) { - if entry.Logger.level() >= DebugLevel { - entry.Debug(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Infof(format string, args ...interface{}) { - if entry.Logger.level() >= InfoLevel { - entry.Info(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Printf(format string, args ...interface{}) { - entry.Infof(format, args...) -} - -func (entry *Entry) Warnf(format string, args ...interface{}) { - if entry.Logger.level() >= WarnLevel { - entry.Warn(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Warningf(format string, args ...interface{}) { - entry.Warnf(format, args...) -} - -func (entry *Entry) Errorf(format string, args ...interface{}) { - if entry.Logger.level() >= ErrorLevel { - entry.Error(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Fatalf(format string, args ...interface{}) { - if entry.Logger.level() >= FatalLevel { - entry.Fatal(fmt.Sprintf(format, args...)) - } - Exit(1) -} - -func (entry *Entry) Panicf(format string, args ...interface{}) { - if entry.Logger.level() >= PanicLevel { - entry.Panic(fmt.Sprintf(format, args...)) - } -} - -// Entry Println family functions - -func (entry *Entry) Debugln(args ...interface{}) { - if entry.Logger.level() >= DebugLevel { - entry.Debug(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Infoln(args ...interface{}) { - if entry.Logger.level() >= InfoLevel { - entry.Info(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Println(args ...interface{}) { - entry.Infoln(args...) -} - -func (entry *Entry) Warnln(args ...interface{}) { - if entry.Logger.level() >= WarnLevel { - entry.Warn(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Warningln(args ...interface{}) { - entry.Warnln(args...) -} - -func (entry *Entry) Errorln(args ...interface{}) { - if entry.Logger.level() >= ErrorLevel { - entry.Error(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Fatalln(args ...interface{}) { - if entry.Logger.level() >= FatalLevel { - entry.Fatal(entry.sprintlnn(args...)) - } - Exit(1) -} - -func (entry *Entry) Panicln(args ...interface{}) { - if entry.Logger.level() >= PanicLevel { - entry.Panic(entry.sprintlnn(args...)) - } -} - -// Sprintlnn => Sprint no newline. This is to get the behavior of how -// fmt.Sprintln where spaces are always added between operands, regardless of -// their type. Instead of vendoring the Sprintln implementation to spare a -// string allocation, we do the simplest thing. -func (entry *Entry) sprintlnn(args ...interface{}) string { - msg := fmt.Sprintln(args...) - return msg[:len(msg)-1] -} diff --git a/src/vendor/github.com/Sirupsen/logrus/formatter.go b/src/vendor/github.com/Sirupsen/logrus/formatter.go deleted file mode 100644 index b183ff5b1..000000000 --- a/src/vendor/github.com/Sirupsen/logrus/formatter.go +++ /dev/null @@ -1,45 +0,0 @@ -package logrus - -import "time" - -const defaultTimestampFormat = time.RFC3339 - -// The Formatter interface is used to implement a custom Formatter. It takes an -// `Entry`. It exposes all the fields, including the default ones: -// -// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. -// * `entry.Data["time"]`. The timestamp. -// * `entry.Data["level"]. The level the entry was logged at. -// -// Any additional fields added with `WithField` or `WithFields` are also in -// `entry.Data`. Format is expected to return an array of bytes which are then -// logged to `logger.Out`. -type Formatter interface { - Format(*Entry) ([]byte, error) -} - -// This is to not silently overwrite `time`, `msg` and `level` fields when -// dumping it. If this code wasn't there doing: -// -// logrus.WithField("level", 1).Info("hello") -// -// Would just silently drop the user provided level. Instead with this code -// it'll logged as: -// -// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} -// -// It's not exported because it's still using Data in an opinionated way. It's to -// avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields) { - if t, ok := data["time"]; ok { - data["fields.time"] = t - } - - if m, ok := data["msg"]; ok { - data["fields.msg"] = m - } - - if l, ok := data["level"]; ok { - data["fields.level"] = l - } -} diff --git a/src/vendor/github.com/Sirupsen/logrus/json_formatter.go b/src/vendor/github.com/Sirupsen/logrus/json_formatter.go deleted file mode 100644 index fb01c1b10..000000000 --- a/src/vendor/github.com/Sirupsen/logrus/json_formatter.go +++ /dev/null @@ -1,79 +0,0 @@ -package logrus - -import ( - "encoding/json" - "fmt" -) - -type fieldKey string - -// FieldMap allows customization of the key names for default fields. -type FieldMap map[fieldKey]string - -// Default key names for the default fields -const ( - FieldKeyMsg = "msg" - FieldKeyLevel = "level" - FieldKeyTime = "time" -) - -func (f FieldMap) resolve(key fieldKey) string { - if k, ok := f[key]; ok { - return k - } - - return string(key) -} - -// JSONFormatter formats logs into parsable json -type JSONFormatter struct { - // TimestampFormat sets the format used for marshaling timestamps. - TimestampFormat string - - // DisableTimestamp allows disabling automatic timestamps in output - DisableTimestamp bool - - // FieldMap allows users to customize the names of keys for default fields. - // As an example: - // formatter := &JSONFormatter{ - // FieldMap: FieldMap{ - // FieldKeyTime: "@timestamp", - // FieldKeyLevel: "@level", - // FieldKeyMsg: "@message", - // }, - // } - FieldMap FieldMap -} - -// Format renders a single log entry -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+3) - for k, v := range entry.Data { - switch v := v.(type) { - case error: - // Otherwise errors are ignored by `encoding/json` - // https://github.com/sirupsen/logrus/issues/137 - data[k] = v.Error() - default: - data[k] = v - } - } - prefixFieldClashes(data) - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = defaultTimestampFormat - } - - if !f.DisableTimestamp { - data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) - } - data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message - data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() - - serialized, err := json.Marshal(data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} diff --git a/src/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/src/vendor/github.com/Sirupsen/logrus/terminal_bsd.go deleted file mode 100644 index 4880d13d2..000000000 --- a/src/vendor/github.com/Sirupsen/logrus/terminal_bsd.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build darwin freebsd openbsd netbsd dragonfly -// +build !appengine,!gopherjs - -package logrus - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TIOCGETA - -type Termios unix.Termios diff --git a/src/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/src/vendor/github.com/Sirupsen/logrus/terminal_linux.go deleted file mode 100644 index f29a0097c..000000000 --- a/src/vendor/github.com/Sirupsen/logrus/terminal_linux.go +++ /dev/null @@ -1,14 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine,!gopherjs - -package logrus - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TCGETS - -type Termios unix.Termios diff --git a/src/vendor/github.com/Sirupsen/logrus/text_formatter.go b/src/vendor/github.com/Sirupsen/logrus/text_formatter.go deleted file mode 100644 index 61b21caea..000000000 --- a/src/vendor/github.com/Sirupsen/logrus/text_formatter.go +++ /dev/null @@ -1,178 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "sort" - "strings" - "sync" - "time" -) - -const ( - nocolor = 0 - red = 31 - green = 32 - yellow = 33 - blue = 36 - gray = 37 -) - -var ( - baseTimestamp time.Time -) - -func init() { - baseTimestamp = time.Now() -} - -// TextFormatter formats logs into text -type TextFormatter struct { - // Set to true to bypass checking for a TTY before outputting colors. - ForceColors bool - - // Force disabling colors. - DisableColors bool - - // Disable timestamp logging. useful when output is redirected to logging - // system that already adds timestamps. - DisableTimestamp bool - - // Enable logging the full timestamp when a TTY is attached instead of just - // the time passed since beginning of execution. - FullTimestamp bool - - // TimestampFormat to use for display when a full timestamp is printed - TimestampFormat string - - // The fields are sorted by default for a consistent output. For applications - // that log extremely frequently and don't use the JSON formatter this may not - // be desired. - DisableSorting bool - - // QuoteEmptyFields will wrap empty fields in quotes if true - QuoteEmptyFields bool - - // Whether the logger's out is to a terminal - isTerminal bool - - sync.Once -} - -func (f *TextFormatter) init(entry *Entry) { - if entry.Logger != nil { - f.isTerminal = checkIfTerminal(entry.Logger.Out) - } -} - -// Format renders a single log entry -func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - var b *bytes.Buffer - keys := make([]string, 0, len(entry.Data)) - for k := range entry.Data { - keys = append(keys, k) - } - - if !f.DisableSorting { - sort.Strings(keys) - } - if entry.Buffer != nil { - b = entry.Buffer - } else { - b = &bytes.Buffer{} - } - - prefixFieldClashes(entry.Data) - - f.Do(func() { f.init(entry) }) - - isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = defaultTimestampFormat - } - if isColored { - f.printColored(b, entry, keys, timestampFormat) - } else { - if !f.DisableTimestamp { - f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) - } - f.appendKeyValue(b, "level", entry.Level.String()) - if entry.Message != "" { - f.appendKeyValue(b, "msg", entry.Message) - } - for _, key := range keys { - f.appendKeyValue(b, key, entry.Data[key]) - } - } - - b.WriteByte('\n') - return b.Bytes(), nil -} - -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { - var levelColor int - switch entry.Level { - case DebugLevel: - levelColor = gray - case WarnLevel: - levelColor = yellow - case ErrorLevel, FatalLevel, PanicLevel: - levelColor = red - default: - levelColor = blue - } - - levelText := strings.ToUpper(entry.Level.String())[0:4] - - if f.DisableTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message) - } else if !f.FullTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message) - } else { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) - } - for _, k := range keys { - v := entry.Data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) - f.appendValue(b, v) - } -} - -func (f *TextFormatter) needsQuoting(text string) bool { - if f.QuoteEmptyFields && len(text) == 0 { - return true - } - for _, ch := range text { - if !((ch >= 'a' && ch <= 'z') || - (ch >= 'A' && ch <= 'Z') || - (ch >= '0' && ch <= '9') || - ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { - return true - } - } - return false -} - -func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { - if b.Len() > 0 { - b.WriteByte(' ') - } - b.WriteString(key) - b.WriteByte('=') - f.appendValue(b, value) -} - -func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { - stringVal, ok := value.(string) - if !ok { - stringVal = fmt.Sprint(value) - } - - if !f.needsQuoting(stringVal) { - b.WriteString(stringVal) - } else { - b.WriteString(fmt.Sprintf("%q", stringVal)) - } -} diff --git a/src/vendor/github.com/docker/distribution/.gitignore b/src/vendor/github.com/docker/distribution/.gitignore index 1c3ae0a77..4cf7888e9 100644 --- a/src/vendor/github.com/docker/distribution/.gitignore +++ b/src/vendor/github.com/docker/distribution/.gitignore @@ -35,3 +35,4 @@ bin/* # Editor/IDE specific files. *.sublime-project *.sublime-workspace +.idea/* diff --git a/src/vendor/github.com/docker/distribution/.gometalinter.json b/src/vendor/github.com/docker/distribution/.gometalinter.json new file mode 100644 index 000000000..9df5b14bc --- /dev/null +++ b/src/vendor/github.com/docker/distribution/.gometalinter.json @@ -0,0 +1,16 @@ +{ + "Vendor": true, + "Deadline": "2m", + "Sort": ["linter", "severity", "path", "line"], + "EnableGC": true, + "Enable": [ + "structcheck", + "staticcheck", + "unconvert", + + "gofmt", + "goimports", + "golint", + "vet" + ] +} diff --git a/src/vendor/github.com/docker/distribution/.mailmap b/src/vendor/github.com/docker/distribution/.mailmap index 2d68669f3..0f48321d4 100644 --- a/src/vendor/github.com/docker/distribution/.mailmap +++ b/src/vendor/github.com/docker/distribution/.mailmap @@ -1,9 +1,9 @@ -Stephen J Day Stephen Day -Stephen J Day Stephen Day -Olivier Gambier Olivier Gambier -Brian Bland Brian Bland +Stephen J Day Stephen Day +Stephen J Day Stephen Day +Olivier Gambier Olivier Gambier +Brian Bland Brian Bland Brian Bland Brian Bland -Josh Hawn Josh Hawn +Josh Hawn Josh Hawn Richard Scothern Richard Richard Scothern Richard Scothern Andrew Meredith Andrew Meredith @@ -16,4 +16,17 @@ davidli davidli Omer Cohen Omer Cohen Eric Yang Eric Yang Nikita Tarasov Nikita -Misty Stanley-Jones Misty Stanley-Jones +Yu Wang yuwaMSFT2 +Yu Wang Yu Wang (UC) +Olivier Gambier dmp +Olivier Gambier Olivier +Olivier Gambier Olivier +Elsan Li 李楠 elsanli(李楠) +Rui Cao ruicao +Gwendolynne Barr gbarr01 +Haibing Zhou 周海兵 zhouhaibing089 +Feng Honglin tifayuki +Helen Xie Helen-xie +Mike Brown Mike Brown +Manish Tomar Manish Tomar +Sakeven Jiang sakeven diff --git a/src/vendor/github.com/docker/distribution/.travis.yml b/src/vendor/github.com/docker/distribution/.travis.yml new file mode 100644 index 000000000..44ced6045 --- /dev/null +++ b/src/vendor/github.com/docker/distribution/.travis.yml @@ -0,0 +1,51 @@ +dist: trusty +sudo: required +# setup travis so that we can run containers for integration tests +services: + - docker + +language: go + +go: + - "1.11.x" + +go_import_path: github.com/docker/distribution + +addons: + apt: + packages: + - python-minimal + + +env: + - TRAVIS_GOOS=linux DOCKER_BUILDTAGS="include_oss include_gcs" TRAVIS_CGO_ENABLED=1 + +before_install: + - uname -r + - sudo apt-get -q update + +install: + - go get -u github.com/vbatts/git-validation + # TODO: Add enforcement of license + # - go get -u github.com/kunalkushwaha/ltag + - cd $TRAVIS_BUILD_DIR + +script: + - export GOOS=$TRAVIS_GOOS + - export CGO_ENABLED=$TRAVIS_CGO_ENABLED + - DCO_VERBOSITY=-q script/validate/dco + - GOOS=linux script/setup/install-dev-tools + - script/validate/vendor + - go build -i . + - make check + - make build + - make binaries + # Currently takes too long + #- if [ "$GOOS" = "linux" ]; then make test-race ; fi + - if [ "$GOOS" = "linux" ]; then make coverage ; fi + +after_success: + - bash <(curl -s https://codecov.io/bash) -F linux + +before_deploy: + # Run tests with storage driver configurations diff --git a/src/vendor/github.com/docker/distribution/AUTHORS b/src/vendor/github.com/docker/distribution/AUTHORS deleted file mode 100644 index aaf029871..000000000 --- a/src/vendor/github.com/docker/distribution/AUTHORS +++ /dev/null @@ -1,182 +0,0 @@ -Aaron Lehmann -Aaron Schlesinger -Aaron Vinson -Adam Duke -Adam Enger -Adrian Mouat -Ahmet Alp Balkan -Alex Chan -Alex Elman -Alexey Gladkov -allencloud -amitshukla -Amy Lindburg -Andrew Hsu -Andrew Meredith -Andrew T Nguyen -Andrey Kostov -Andy Goldstein -Anis Elleuch -Antonio Mercado -Antonio Murdaca -Anton Tiurin -Anusha Ragunathan -a-palchikov -Arien Holthuizen -Arnaud Porterie -Arthur Baars -Asuka Suzuki -Avi Miller -Ayose Cazorla -BadZen -Ben Bodenmiller -Ben Firshman -bin liu -Brian Bland -burnettk -Carson A -Cezar Sa Espinola -Charles Smith -Chris Dillon -cuiwei13 -cyli -Daisuke Fujita -Daniel Huhn -Darren Shepherd -Dave Trombley -Dave Tucker -David Lawrence -davidli -David Verhasselt -David Xia -Dejan Golja -Derek McGowan -Diogo Mónica -DJ Enriquez -Donald Huang -Doug Davis -Edgar Lee -Eric Yang -Fabio Berchtold -Fabio Huser -farmerworking -Felix Yan -Florentin Raud -Frank Chen -Frederick F. Kautz IV -gabriell nascimento -Gleb Schukin -harche -Henri Gomez -Hua Wang -Hu Keping -HuKeping -Ian Babrou -igayoso -Jack Griffin -James Findley -Jason Freidman -Jason Heiss -Jeff Nickoloff -Jess Frazelle -Jessie Frazelle -jhaohai -Jianqing Wang -Jihoon Chung -Joao Fernandes -John Mulhausen -John Starks -Jonathan Boulle -Jon Johnson -Jon Poler -Jordan Liggitt -Josh Chorlton -Josh Hawn -Julien Fernandez -Keerthan Mala -Kelsey Hightower -Kenneth Lim -Kenny Leung -Ke Xu -liuchang0812 -Liu Hua -Li Yi -Lloyd Ramey -Louis Kottmann -Luke Carpenter -Marcus Martins -Mary Anthony -Matt Bentley -Matt Duch -Matthew Green -Matt Moore -Matt Robenolt -Michael Prokop -Michal Minar -Michal Minář -Mike Brown -Miquel Sabaté -Misty Stanley-Jones -Morgan Bauer -moxiegirl -Nathan Sullivan -nevermosby -Nghia Tran -Nikita Tarasov -Noah Treuhaft -Nuutti Kotivuori -Oilbeater -Olivier Gambier -Olivier Jacques -Omer Cohen -Patrick Devine -Phil Estes -Philip Misiowiec -Pierre-Yves Ritschard -Qiao Anran -Randy Barlow -Richard Scothern -Rodolfo Carvalho -Rusty Conover -Sean Boran -Sebastiaan van Stijn -Sebastien Coavoux -Serge Dubrouski -Sharif Nassar -Shawn Falkner-Horine -Shreyas Karnik -Simon Thulbourn -spacexnice -Spencer Rinehart -Stan Hu -Stefan Majewsky -Stefan Weil -Stephen J Day -Sungho Moon -Sven Dowideit -Sylvain Baubeau -Ted Reed -tgic -Thomas Sjögren -Tianon Gravi -Tibor Vass -Tonis Tiigi -Tony Holdstock-Brown -Trevor Pounds -Troels Thomsen -Victoria Bialas -Victor Vieux -Vincent Batts -Vincent Demeester -Vincent Giersch -weiyuan.yl -W. Trevor King -xg.song -xiekeyang -Yann ROBERT -yaoyao.xyy -yixi zhang -yuexiao-wang -yuzou -zhouhaibing089 -姜继忠 diff --git a/src/vendor/github.com/docker/distribution/BUILDING.md b/src/vendor/github.com/docker/distribution/BUILDING.md index 2d5a10119..2981d016b 100644 --- a/src/vendor/github.com/docker/distribution/BUILDING.md +++ b/src/vendor/github.com/docker/distribution/BUILDING.md @@ -71,9 +71,7 @@ commands, such as `go test`, should work per package (please see A `Makefile` has been provided as a convenience to support repeatable builds. Please install the following into `GOPATH` for it to work: - go get github.com/tools/godep github.com/golang/lint/golint - -**TODO(stevvooe):** Add a `make setup` command to Makefile to run this. Have to think about how to interact with Godeps properly. + go get github.com/golang/lint/golint Once these commands are available in the `GOPATH`, run `make` to get a full build: @@ -85,7 +83,7 @@ build: + lint + build github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar - github.com/Sirupsen/logrus + github.com/sirupsen/logrus github.com/docker/libtrust ... github.com/yvasiyarov/gorelic @@ -105,12 +103,12 @@ build: + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template + binaries -The above provides a repeatable build using the contents of the vendored -Godeps directory. This includes formatting, vetting, linting, building, +The above provides a repeatable build using the contents of the vendor +directory. This includes formatting, vetting, linting, building, testing and generating tagged binaries. We can verify this worked by running the registry binary generated in the "./bin" directory: - $ ./bin/registry -version + $ ./bin/registry --version ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m ### Optional build tags diff --git a/src/vendor/github.com/docker/distribution/CHANGELOG.md b/src/vendor/github.com/docker/distribution/CHANGELOG.md deleted file mode 100644 index b1a5c6824..000000000 --- a/src/vendor/github.com/docker/distribution/CHANGELOG.md +++ /dev/null @@ -1,114 +0,0 @@ -# Changelog - -## 2.6.1 (2017-04-05) - -#### Registry -- Fix `Forwarded` header handling, revert use of `X-Forwarded-Port` -- Use driver `Stat` for registry health check - -## 2.6.0 (2017-01-18) - -#### Storage -- S3: fixed bug in delete due to read-after-write inconsistency -- S3: allow EC2 IAM roles to be used when authorizing region endpoints -- S3: add Object ACL Support -- S3: fix delete method's notion of subpaths -- S3: use multipart upload API in `Move` method for performance -- S3: add v2 signature signing for legacy S3 clones -- Swift: add simple heuristic to detect incomplete DLOs during read ops -- Swift: support different user and tenant domains -- Swift: bulk deletes in chunks -- Aliyun OSS: fix delete method's notion of subpaths -- Aliyun OSS: optimize data copy after upload finishes -- Azure: close leaking response body -- Fix storage drivers dropping non-EOF errors when listing repositories -- Compare path properly when listing repositories in catalog -- Add a foreign layer URL host whitelist -- Improve catalog enumerate runtime - -#### Registry -- Export `storage.CreateOptions` in top-level package -- Enable notifications to endpoints that use self-signed certificates -- Properly validate multi-URL foreign layers -- Add control over validation of URLs in pushed manifests -- Proxy mode: fix socket leak when pull is cancelled -- Tag service: properly handle error responses on HEAD request -- Support for custom authentication URL in proxying registry -- Add configuration option to disable access logging -- Add notification filtering by target media type -- Manifest: `References()` returns all children -- Honor `X-Forwarded-Port` and Forwarded headers -- Reference: Preserve tag and digest in With* functions -- Add policy configuration for enforcing repository classes - -#### Client -- Changes the client Tags `All()` method to follow links -- Allow registry clients to connect via HTTP2 -- Better handling of OAuth errors in client - -#### Spec -- Manifest: clarify relationship between urls and foreign layers -- Authorization: add support for repository classes - -#### Manifest -- Override media type returned from `Stat()` for existing manifests -- Add plugin mediatype to distribution manifest - -#### Docs -- Document `TOOMANYREQUESTS` error code -- Document required Let's Encrypt port -- Improve documentation around implementation of OAuth2 -- Improve documentation for configuration - -#### Auth -- Add support for registry type in scope -- Add support for using v2 ping challenges for v1 -- Add leeway to JWT `nbf` and `exp` checking -- htpasswd: dynamically parse htpasswd file -- Fix missing auth headers with PATCH HTTP request when pushing to default port - -#### Dockerfile -- Update to go1.7 -- Reorder Dockerfile steps for better layer caching - -#### Notes - -Documentation has moved to the documentation repository at -`github.com/docker/docker.github.io/tree/master/registry` - -The registry is go 1.7 compliant, and passes newer, more restrictive `lint` and `vet` ing. - - -## 2.5.0 (2016-06-14) - -#### Storage -- Ensure uploads directory is cleaned after upload is committed -- Add ability to cap concurrent operations in filesystem driver -- S3: Add 'us-gov-west-1' to the valid region list -- Swift: Handle ceph not returning Last-Modified header for HEAD requests -- Add redirect middleware - -#### Registry -- Add support for blobAccessController middleware -- Add support for layers from foreign sources -- Remove signature store -- Add support for Let's Encrypt -- Correct yaml key names in configuration - -#### Client -- Add option to get content digest from manifest get - -#### Spec -- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported -- Clarify API documentation around catalog fetch behavior - -#### API -- Support returning HTTP 429 (Too Many Requests) - -#### Documentation -- Update auth documentation examples to show "expires in" as int - -#### Docker Image -- Use Alpine Linux as base image - - diff --git a/src/vendor/github.com/docker/distribution/CONTRIBUTING.md b/src/vendor/github.com/docker/distribution/CONTRIBUTING.md index 7cc7aedff..4c067d9e7 100644 --- a/src/vendor/github.com/docker/distribution/CONTRIBUTING.md +++ b/src/vendor/github.com/docker/distribution/CONTRIBUTING.md @@ -1,6 +1,6 @@ # Contributing to the registry -## Before reporting an issue... +## Before reporting an issue... ### If your problem is with... @@ -21,13 +21,21 @@ Then please do not open an issue here yet - you should first try one of the foll - irc: #docker-distribution on freenode - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution +### Reporting security issues + +The Docker maintainers take security seriously. If you discover a security +issue, please bring it to their attention right away! + +Please **DO NOT** file a public issue, instead send your report privately to +[security@docker.com](mailto:security@docker.com). + ## Reporting an issue properly By following these simple rules you will get better and faster feedback on your issue. - search the bugtracker for an already reported issue -### If you found an issue that describes your problem: +### If you found an issue that describes your problem: - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments - please refrain from adding "same thing here" or "+1" comments @@ -43,7 +51,7 @@ By following these simple rules you will get better and faster feedback on your 2. copy the output of: - `docker version` - `docker info` - - `docker exec registry -version` + - `docker exec registry --version` 3. copy the command line you used to launch your Registry 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) 5. reproduce your problem and get your docker daemon logs showing the error diff --git a/src/vendor/github.com/docker/distribution/Dockerfile b/src/vendor/github.com/docker/distribution/Dockerfile index 426954a11..9537817ca 100644 --- a/src/vendor/github.com/docker/distribution/Dockerfile +++ b/src/vendor/github.com/docker/distribution/Dockerfile @@ -1,17 +1,22 @@ -FROM golang:1.7-alpine +FROM golang:1.11-alpine AS build ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution -ENV DOCKER_BUILDTAGS include_oss include_gcs +ENV BUILDTAGS include_oss include_gcs + +ARG GOOS=linux +ARG GOARCH=amd64 +ARG GOARM=6 RUN set -ex \ - && apk add --no-cache make git + && apk add --no-cache make git file WORKDIR $DISTRIBUTION_DIR COPY . $DISTRIBUTION_DIR +RUN CGO_ENABLED=0 make PREFIX=/go clean binaries && file ./bin/registry | grep "statically linked" + +FROM alpine COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml - -RUN make PREFIX=/go clean binaries - +COPY --from=build /go/src/github.com/docker/distribution/bin/registry /bin/registry VOLUME ["/var/lib/registry"] EXPOSE 5000 ENTRYPOINT ["registry"] diff --git a/src/vendor/github.com/docker/distribution/MAINTAINERS b/src/vendor/github.com/docker/distribution/MAINTAINERS index bda400150..3183620c5 100644 --- a/src/vendor/github.com/docker/distribution/MAINTAINERS +++ b/src/vendor/github.com/docker/distribution/MAINTAINERS @@ -6,18 +6,198 @@ # It is structured to be consumable by both humans and programs. # To extract its contents programmatically, use any TOML-compliant parser. # -# This file is compiled into the MAINTAINERS file in docker/opensource. -# + +[Rules] + + [Rules.maintainers] + + title = "What is a maintainer?" + + text = """ +There are different types of maintainers, with different responsibilities, but +all maintainers have 3 things in common: + +1) They share responsibility in the project's success. +2) They have made a long-term, recurring time investment to improve the project. +3) They spend that time doing whatever needs to be done, not necessarily what +is the most interesting or fun. + +Maintainers are often under-appreciated, because their work is harder to appreciate. +It's easy to appreciate a really cool and technically advanced feature. It's harder +to appreciate the absence of bugs, the slow but steady improvement in stability, +or the reliability of a release process. But those things distinguish a good +project from a great one. +""" + + [Rules.reviewer] + + title = "What is a reviewer?" + + text = """ +A reviewer is a core role within the project. +They share in reviewing issues and pull requests and their LGTM count towards the +required LGTM count to merge a code change into the project. + +Reviewers are part of the organization but do not have write access. +Becoming a reviewer is a core aspect in the journey to becoming a maintainer. +""" + + [Rules.adding-maintainers] + + title = "How are maintainers added?" + + text = """ +Maintainers are first and foremost contributors that have shown they are +committed to the long term success of a project. Contributors wanting to become +maintainers are expected to be deeply involved in contributing code, pull +request review, and triage of issues in the project for more than three months. + +Just contributing does not make you a maintainer, it is about building trust +with the current maintainers of the project and being a person that they can +depend on and trust to make decisions in the best interest of the project. + +Periodically, the existing maintainers curate a list of contributors that have +shown regular activity on the project over the prior months. From this list, +maintainer candidates are selected and proposed on the maintainers mailing list. + +After a candidate has been announced on the maintainers mailing list, the +existing maintainers are given five business days to discuss the candidate, +raise objections and cast their vote. Candidates must be approved by at least 66% of the current maintainers by adding their vote on the mailing +list. Only maintainers of the repository that the candidate is proposed for are +allowed to vote. + +If a candidate is approved, a maintainer will contact the candidate to invite +the candidate to open a pull request that adds the contributor to the +MAINTAINERS file. The candidate becomes a maintainer once the pull request is +merged. +""" + + [Rules.stepping-down-policy] + + title = "Stepping down policy" + + text = """ +Life priorities, interests, and passions can change. If you're a maintainer but +feel you must remove yourself from the list, inform other maintainers that you +intend to step down, and if possible, help find someone to pick up your work. +At the very least, ensure your work can be continued where you left off. + +After you've informed other maintainers, create a pull request to remove +yourself from the MAINTAINERS file. +""" + + [Rules.inactive-maintainers] + + title = "Removal of inactive maintainers" + + text = """ +Similar to the procedure for adding new maintainers, existing maintainers can +be removed from the list if they do not show significant activity on the +project. Periodically, the maintainers review the list of maintainers and their +activity over the last three months. + +If a maintainer has shown insufficient activity over this period, a neutral +person will contact the maintainer to ask if they want to continue being +a maintainer. If the maintainer decides to step down as a maintainer, they +open a pull request to be removed from the MAINTAINERS file. + +If the maintainer wants to remain a maintainer, but is unable to perform the +required duties they can be removed with a vote of at least 66% of +the current maintainers. An e-mail is sent to the +mailing list, inviting maintainers of the project to vote. The voting period is +five business days. Issues related to a maintainer's performance should be +discussed with them among the other maintainers so that they are not surprised +by a pull request removing them. +""" + + [Rules.decisions] + + title = "How are decisions made?" + + text = """ +Short answer: EVERYTHING IS A PULL REQUEST. + +distribution is an open-source project with an open design philosophy. This means +that the repository is the source of truth for EVERY aspect of the project, +including its philosophy, design, road map, and APIs. *If it's part of the +project, it's in the repo. If it's in the repo, it's part of the project.* + +As a result, all decisions can be expressed as changes to the repository. An +implementation change is a change to the source code. An API change is a change +to the API specification. A philosophy change is a change to the philosophy +manifesto, and so on. + +All decisions affecting distribution, big and small, follow the same 3 steps: + +* Step 1: Open a pull request. Anyone can do this. + +* Step 2: Discuss the pull request. Anyone can do this. + +* Step 3: Merge or refuse the pull request. Who does this depends on the nature +of the pull request and which areas of the project it affects. +""" + + [Rules.DCO] + + title = "Helping contributors with the DCO" + + text = """ +The [DCO or `Sign your work`]( +https://github.com/moby/moby/blob/master/CONTRIBUTING.md#sign-your-work) +requirement is not intended as a roadblock or speed bump. + +Some distribution contributors are not as familiar with `git`, or have used a web +based editor, and thus asking them to `git commit --amend -s` is not the best +way forward. + +In this case, maintainers can update the commits based on clause (c) of the DCO. +The most trivial way for a contributor to allow the maintainer to do this, is to +add a DCO signature in a pull requests's comment, or a maintainer can simply +note that the change is sufficiently trivial that it does not substantially +change the existing contribution - i.e., a spelling change. + +When you add someone's DCO, please also add your own to keep a log. +""" + + [Rules."no direct push"] + + title = "I'm a maintainer. Should I make pull requests too?" + + text = """ +Yes. Nobody should ever push to master directly. All changes should be +made through a pull request. +""" + + [Rules.tsc] + + title = "Conflict Resolution and technical disputes" + + text = """ +distribution defers to the [Technical Steering Committee](https://github.com/moby/tsc) for escalations and resolution on disputes for technical matters." + """ + + [Rules.meta] + + title = "How is this process changed?" + + text = "Just like everything else: by making a pull request :)" + +# Current project organization [Org] - [Org."Core maintainers"] + + [Org.Maintainers] people = [ - "aaronlehmann", "dmcgowan", "dmp42", - "richardscothern", - "shykes", "stevvooe", ] + [Org.Reviewers] + people = [ + "manishtomar", + "caervs", + "davidswu", + "RobbKistler" + ] [people] @@ -27,10 +207,15 @@ # ADD YOURSELF HERE IN ALPHABETICAL ORDER - [people.aaronlehmann] - Name = "Aaron Lehmann" - Email = "aaron.lehmann@docker.com" - GitHub = "aaronlehmann" + [people.caervs] + Name = "Ryan Abrams" + Email = "rdabrams@gmail.com" + GitHub = "caervs" + + [people.davidswu] + Name = "David Wu" + Email = "dwu7401@gmail.com" + GitHub = "davidswu" [people.dmcgowan] Name = "Derek McGowan" @@ -42,15 +227,15 @@ Email = "olivier@docker.com" GitHub = "dmp42" - [people.richardscothern] - Name = "Richard Scothern" - Email = "richard.scothern@gmail.com" - GitHub = "richardscothern" + [people.manishtomar] + Name = "Manish Tomar" + Email = "manish.tomar@docker.com" + GitHub = "manishtomar" - [people.shykes] - Name = "Solomon Hykes" - Email = "solomon@docker.com" - GitHub = "shykes" + [people.RobbKistler] + Name = "Robb Kistler" + Email = "robb.kistler@docker.com" + GitHub = "RobbKistler" [people.stevvooe] Name = "Stephen Day" diff --git a/src/vendor/github.com/docker/distribution/Makefile b/src/vendor/github.com/docker/distribution/Makefile index 47b8f1d0b..4635c6eca 100644 --- a/src/vendor/github.com/docker/distribution/Makefile +++ b/src/vendor/github.com/docker/distribution/Makefile @@ -1,9 +1,21 @@ -# Set an output prefix, which is the local directory if not specified -PREFIX?=$(shell pwd) - +# Root directory of the project (absolute path). +ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST)))) # Used to populate version variable in main package. VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) +REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi) + + +PKG=github.com/docker/distribution + +# Project packages. +PACKAGES=$(shell go list -tags "${BUILDTAGS}" ./... | grep -v /vendor/) +INTEGRATION_PACKAGE=${PKG} +COVERAGE_PACKAGES=$(filter-out ${PKG}/registry/storage/driver/%,${PACKAGES}) + + +# Project binaries. +COMMANDS=registry digest registry-api-descriptor-template # Allow turning off function inlining and variable registerization ifeq (${DISABLE_OPTIMIZATION},true) @@ -11,99 +23,80 @@ ifeq (${DISABLE_OPTIMIZATION},true) VERSION:="$(VERSION)-noopt" endif -GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)" +WHALE = "+" -.PHONY: all build binaries clean dep-restore dep-save dep-validate fmt lint test test-full vet +# Go files +# +TESTFLAGS_RACE= +GOFILES=$(shell find . -type f -name '*.go') +GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",) +GO_LDFLAGS=-ldflags '-s -w -X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)' + +BINARIES=$(addprefix bin/,$(COMMANDS)) + +# Flags passed to `go test` +TESTFLAGS ?= -v $(TESTFLAGS_RACE) +TESTFLAGS_PARALLEL ?= 8 + +.PHONY: all build binaries check clean test test-race test-full integration coverage .DEFAULT: all -all: fmt vet lint build test binaries -AUTHORS: .mailmap .git/HEAD - git log --format='%aN <%aE>' | sort -fu > $@ +all: binaries # This only needs to be generated by hand when cutting full releases. version/version.go: + @echo "$(WHALE) $@" ./version/version.sh > $@ -# Required for go 1.5 to build -GO15VENDOREXPERIMENT := 1 +check: ## run all linters (TODO: enable "unused", "varcheck", "ineffassign", "unconvert", "staticheck", "goimports", "structcheck") + @echo "$(WHALE) $@" + gometalinter --config .gometalinter.json ./... -# Go files -GOFILES=$(shell find . -type f -name '*.go') +test: ## run tests, except integration test with test.short + @echo "$(WHALE) $@" + @go test ${GO_TAGS} -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) -# Package list -PKGS=$(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/) +test-race: ## run tests, except integration test with test.short and race + @echo "$(WHALE) $@" + @go test ${GO_TAGS} -race -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) -# Resolving binary dependencies for specific targets -GOLINT=$(shell which golint || echo '') -GODEP=$(shell which godep || echo '') +test-full: ## run tests, except integration tests + @echo "$(WHALE) $@" + @go test ${GO_TAGS} ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) -${PREFIX}/bin/registry: $(GOFILES) - @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry +integration: ## run integration tests + @echo "$(WHALE) $@" + @go test ${TESTFLAGS} -parallel ${TESTFLAGS_PARALLEL} ${INTEGRATION_PACKAGE} -${PREFIX}/bin/digest: $(GOFILES) - @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest +coverage: ## generate coverprofiles from the unit tests + @echo "$(WHALE) $@" + @rm -f coverage.txt + @go test ${GO_TAGS} -i ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}) 2> /dev/null + @( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}); do \ + go test ${GO_TAGS} ${TESTFLAGS} \ + -cover \ + -coverprofile=profile.out \ + -covermode=atomic $$pkg || exit; \ + if [ -f profile.out ]; then \ + cat profile.out >> coverage.txt; \ + rm profile.out; \ + fi; \ + done ) -${PREFIX}/bin/registry-api-descriptor-template: $(GOFILES) - @echo "+ $@" - @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template +FORCE: -docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template - ./bin/registry-api-descriptor-template $< > $@ +# Build a binary from a cmd. +bin/%: cmd/% FORCE + @echo "$(WHALE) $@${BINARY_SUFFIX}" + @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ./$< -vet: - @echo "+ $@" - @go vet -tags "${DOCKER_BUILDTAGS}" $(PKGS) - -fmt: - @echo "+ $@" - @test -z "$$(gofmt -s -l . 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" || \ - (echo >&2 "+ please format Go code with 'gofmt -s'" && false) - -lint: - @echo "+ $@" - $(if $(GOLINT), , \ - $(error Please install golint: `go get -u github.com/golang/lint/golint`)) - @test -z "$$($(GOLINT) ./... 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" +binaries: $(BINARIES) ## build binaries + @echo "$(WHALE) $@" build: - @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS) + @echo "$(WHALE) $@" + @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} ${GO_LDFLAGS} ${GO_TAGS} $(PACKAGES) -test: - @echo "+ $@" - @go test -test.short -tags "${DOCKER_BUILDTAGS}" $(PKGS) - -test-full: - @echo "+ $@" - @go test -tags "${DOCKER_BUILDTAGS}" $(PKGS) - -binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template - @echo "+ $@" - -clean: - @echo "+ $@" - @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/digest" "${PREFIX}/bin/registry-api-descriptor-template" - -dep-save: - @echo "+ $@" - $(if $(GODEP), , \ - $(error Please install godep: go get github.com/tools/godep)) - @$(GODEP) save $(PKGS) - -dep-restore: - @echo "+ $@" - $(if $(GODEP), , \ - $(error Please install godep: go get github.com/tools/godep)) - @$(GODEP) restore -v - -dep-validate: dep-restore - @echo "+ $@" - @rm -Rf .vendor.bak - @mv vendor .vendor.bak - @rm -Rf Godeps - @$(GODEP) save ./... - @test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \ - (echo >&2 "+ borked dependencies! what you have in Godeps/Godeps.json does not match with what you have in vendor" && false) - @rm -Rf .vendor.bak +clean: ## clean up binaries + @echo "$(WHALE) $@" + @rm -f $(BINARIES) diff --git a/src/vendor/github.com/docker/distribution/README.md b/src/vendor/github.com/docker/distribution/README.md index a6e8db0fb..998878850 100644 --- a/src/vendor/github.com/docker/distribution/README.md +++ b/src/vendor/github.com/docker/distribution/README.md @@ -76,8 +76,7 @@ may be the better choice. For those who have previously deployed their own registry based on the Registry 1.0 implementation and wish to deploy a Registry 2.0 while retaining images, data migration is required. A tool to assist with migration efforts has been -created. For more information see [docker/migrator] -(https://github.com/docker/migrator). +created. For more information see [docker/migrator](https://github.com/docker/migrator). ## Contribute diff --git a/src/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md b/src/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md deleted file mode 100644 index 49235cecd..000000000 --- a/src/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md +++ /dev/null @@ -1,36 +0,0 @@ -## Registry Release Checklist - -10. Compile release notes detailing features and since the last release. Update the `CHANGELOG.md` file. - -20. Update the version file: `https://github.com/docker/distribution/blob/master/version/version.go` - -30. Update the `MAINTAINERS` (if necessary), `AUTHORS` and `.mailmap` files. - - ``` -make AUTHORS -``` - -40. Create a signed tag. - - Distribution uses semantic versioning. Tags are of the format `vx.y.z[-rcn]` -You will need PGP installed and a PGP key which has been added to your Github account. The comment for the tag should include the release notes. - -50. Push the signed tag - -60. Create a new [release](https://github.com/docker/distribution/releases). In the case of a release candidate, tick the `pre-release` checkbox. - -70. Update the registry binary in [distribution library image repo](https://github.com/docker/distribution-library-image) by running the update script and opening a pull request. - -80. Update the official image. Add the new version in the [official images repo](https://github.com/docker-library/official-images) by appending a new version to the `registry/registry` file with the git hash pointed to by the signed tag. Update the major version to point to the latest version and the minor version to point to new patch release if necessary. -e.g. to release `2.3.1` - - `2.3.1 (new)` - - `2.3.0 -> 2.3.0` can be removed - - `2 -> 2.3.1` - - `2.3 -> 2.3.1` - -90. Build a new distribution/registry image on [Docker hub](https://hub.docker.com/u/distribution/dashboard) by adding a new automated build with the new tag and re-building the images. - diff --git a/src/vendor/github.com/docker/distribution/blobs.go b/src/vendor/github.com/docker/distribution/blobs.go index 1f91ae21e..c0e9261be 100644 --- a/src/vendor/github.com/docker/distribution/blobs.go +++ b/src/vendor/github.com/docker/distribution/blobs.go @@ -1,15 +1,16 @@ package distribution import ( + "context" "errors" "fmt" "io" "net/http" "time" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go/v1" ) var ( @@ -66,12 +67,19 @@ type Descriptor struct { Size int64 `json:"size,omitempty"` // Digest uniquely identifies the content. A byte stream can be verified - // against against this digest. + // against this digest. Digest digest.Digest `json:"digest,omitempty"` // URLs contains the source URLs of this content. URLs []string `json:"urls,omitempty"` + // Annotations contains arbitrary metadata relating to the targeted content. + Annotations map[string]string `json:"annotations,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + // This should only be used when referring to a manifest. + Platform *v1.Platform `json:"platform,omitempty"` + // NOTE: Before adding a field here, please ensure that all // other options have been exhausted. Much of the type relationships // depend on the simplicity of this type. @@ -152,7 +160,7 @@ type BlobProvider interface { // BlobServer can serve blobs via http. type BlobServer interface { - // ServeBlob attempts to serve the blob, identifed by dgst, via http. The + // ServeBlob attempts to serve the blob, identified by dgst, via http. The // service may decide to redirect the client elsewhere or serve the data // directly. // diff --git a/src/vendor/github.com/docker/distribution/circle.yml b/src/vendor/github.com/docker/distribution/circle.yml deleted file mode 100644 index 61f8be0cb..000000000 --- a/src/vendor/github.com/docker/distribution/circle.yml +++ /dev/null @@ -1,93 +0,0 @@ -# Pony-up! -machine: - pre: - # Install gvm - - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer) - # Install codecov for coverage - - pip install --user codecov - - post: - # go - - gvm install go1.7 --prefer-binary --name=stable - - environment: - # Convenient shortcuts to "common" locations - CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME - BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME - # Trick circle brainflat "no absolute path" behavior - BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR - DOCKER_BUILDTAGS: "include_oss include_gcs" - # Workaround Circle parsing dumb bugs and/or YAML wonkyness - CIRCLE_PAIN: "mode: set" - - hosts: - # Not used yet - fancy: 127.0.0.1 - -dependencies: - pre: - # Copy the code to the gopath of all go versions - - > - gvm use stable && - mkdir -p "$(dirname $BASE_STABLE)" && - cp -R "$CHECKOUT" "$BASE_STABLE" - - override: - # Install dependencies for every copied clone/go version - - gvm use stable && go get github.com/tools/godep: - pwd: $BASE_STABLE - - post: - # For the stable go version, additionally install linting tools - - > - gvm use stable && - go get github.com/axw/gocov/gocov github.com/golang/lint/golint - -test: - pre: - # Output the go versions we are going to test - # - gvm use old && go version - - gvm use stable && go version - - # todo(richard): replace with a more robust vendoring solution. Removed due to a fundamental disagreement in godep philosophies. - # Ensure validation of dependencies - # - gvm use stable && if test -n "`git diff --stat=1000 master | grep -Ei \"vendor|godeps\"`"; then make dep-validate; fi: - # pwd: $BASE_STABLE - - # First thing: build everything. This will catch compile errors, and it's - # also necessary for go vet to work properly (see #807). - - gvm use stable && godep go install $(go list ./... | grep -v "/vendor/"): - pwd: $BASE_STABLE - - # FMT - - gvm use stable && make fmt: - pwd: $BASE_STABLE - - # VET - - gvm use stable && make vet: - pwd: $BASE_STABLE - - # LINT - - gvm use stable && make lint: - pwd: $BASE_STABLE - - override: - # Test stable, and report - - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': - timeout: 1000 - pwd: $BASE_STABLE - - # Test stable with race - - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | grep -v "registry/handlers" | grep -v "registry/storage/driver" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -race -tags "$DOCKER_BUILDTAGS" -test.short $PACKAGE': - timeout: 1000 - pwd: $BASE_STABLE - post: - # Report to codecov - - bash <(curl -s https://codecov.io/bash): - pwd: $BASE_STABLE - - ## Notes - # Do we want these as well? - # - go get code.google.com/p/go.tools/cmd/goimports - # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" - # http://labix.org/gocheck diff --git a/src/vendor/github.com/docker/distribution/context/context.go b/src/vendor/github.com/docker/distribution/context/context.go index 23cbf5b54..ab6865467 100644 --- a/src/vendor/github.com/docker/distribution/context/context.go +++ b/src/vendor/github.com/docker/distribution/context/context.go @@ -1,21 +1,16 @@ package context import ( + "context" "sync" "github.com/docker/distribution/uuid" - "golang.org/x/net/context" ) -// Context is a copy of Context from the golang.org/x/net/context package. -type Context interface { - context.Context -} - // instanceContext is a context that provides only an instance id. It is // provided as the main background context. type instanceContext struct { - Context + context.Context id string // id of context, logged as "instance.id" once sync.Once // once protect generation of the id } @@ -42,17 +37,10 @@ var background = &instanceContext{ // Background returns a non-nil, empty Context. The background context // provides a single key, "instance.id" that is globally unique to the // process. -func Background() Context { +func Background() context.Context { return background } -// WithValue returns a copy of parent in which the value associated with key is -// val. Use context Values only for request-scoped data that transits processes -// and APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key, val interface{}) Context { - return context.WithValue(parent, key, val) -} - // stringMapContext is a simple context implementation that checks a map for a // key, falling back to a parent if not present. type stringMapContext struct { diff --git a/src/vendor/github.com/docker/distribution/context/doc.go b/src/vendor/github.com/docker/distribution/context/doc.go index 3b4ab8882..0c631a9c9 100644 --- a/src/vendor/github.com/docker/distribution/context/doc.go +++ b/src/vendor/github.com/docker/distribution/context/doc.go @@ -1,7 +1,6 @@ // Package context provides several utilities for working with -// golang.org/x/net/context in http requests. Primarily, the focus is on -// logging relevant request information but this package is not limited to -// that purpose. +// Go's context in http requests. Primarily, the focus is on logging relevant +// request information but this package is not limited to that purpose. // // The easiest way to get started is to get the background context: // @@ -64,7 +63,7 @@ // Note that this only affects the new context, the previous context, with the // version field, can be used independently. Put another way, the new logger, // added to the request context, is unique to that context and can have -// request scoped varaibles. +// request scoped variables. // // HTTP Requests // diff --git a/src/vendor/github.com/docker/distribution/context/http.go b/src/vendor/github.com/docker/distribution/context/http.go index 7fe9b8ab0..bc22f0bba 100644 --- a/src/vendor/github.com/docker/distribution/context/http.go +++ b/src/vendor/github.com/docker/distribution/context/http.go @@ -1,6 +1,7 @@ package context import ( + "context" "errors" "net" "net/http" @@ -8,9 +9,9 @@ import ( "sync" "time" - log "github.com/Sirupsen/logrus" "github.com/docker/distribution/uuid" "github.com/gorilla/mux" + log "github.com/sirupsen/logrus" ) // Common errors used with this package. @@ -68,7 +69,7 @@ func RemoteIP(r *http.Request) string { // is available at "http.request". Other common attributes are available under // the prefix "http.request.". If a request is already present on the context, // this method will panic. -func WithRequest(ctx Context, r *http.Request) Context { +func WithRequest(ctx context.Context, r *http.Request) context.Context { if ctx.Value("http.request") != nil { // NOTE(stevvooe): This needs to be considered a programming error. It // is unlikely that we'd want to have more than one request in @@ -87,7 +88,7 @@ func WithRequest(ctx Context, r *http.Request) Context { // GetRequest returns the http request in the given context. Returns // ErrNoRequestContext if the context does not have an http request associated // with it. -func GetRequest(ctx Context) (*http.Request, error) { +func GetRequest(ctx context.Context) (*http.Request, error) { if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok { return r, nil } @@ -96,25 +97,13 @@ func GetRequest(ctx Context) (*http.Request, error) { // GetRequestID attempts to resolve the current request id, if possible. An // error is return if it is not available on the context. -func GetRequestID(ctx Context) string { +func GetRequestID(ctx context.Context) string { return GetStringValue(ctx, "http.request.id") } // WithResponseWriter returns a new context and response writer that makes // interesting response statistics available within the context. -func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) { - if closeNotifier, ok := w.(http.CloseNotifier); ok { - irwCN := &instrumentedResponseWriterCN{ - instrumentedResponseWriter: instrumentedResponseWriter{ - ResponseWriter: w, - Context: ctx, - }, - CloseNotifier: closeNotifier, - } - - return irwCN, irwCN - } - +func WithResponseWriter(ctx context.Context, w http.ResponseWriter) (context.Context, http.ResponseWriter) { irw := instrumentedResponseWriter{ ResponseWriter: w, Context: ctx, @@ -125,7 +114,7 @@ func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.Respo // GetResponseWriter returns the http.ResponseWriter from the provided // context. If not present, ErrNoResponseWriterContext is returned. The // returned instance provides instrumentation in the context. -func GetResponseWriter(ctx Context) (http.ResponseWriter, error) { +func GetResponseWriter(ctx context.Context) (http.ResponseWriter, error) { v := ctx.Value("http.response") rw, ok := v.(http.ResponseWriter) @@ -145,7 +134,7 @@ var getVarsFromRequest = mux.Vars // example, if looking for the variable "name", it can be accessed as // "vars.name". Implementations that are accessing values need not know that // the underlying context is implemented with gorilla/mux vars. -func WithVars(ctx Context, r *http.Request) Context { +func WithVars(ctx context.Context, r *http.Request) context.Context { return &muxVarsContext{ Context: ctx, vars: getVarsFromRequest(r), @@ -155,7 +144,7 @@ func WithVars(ctx Context, r *http.Request) Context { // GetRequestLogger returns a logger that contains fields from the request in // the current context. If the request is not available in the context, no // fields will display. Request loggers can safely be pushed onto the context. -func GetRequestLogger(ctx Context) Logger { +func GetRequestLogger(ctx context.Context) Logger { return GetLogger(ctx, "http.request.id", "http.request.method", @@ -171,7 +160,7 @@ func GetRequestLogger(ctx Context) Logger { // Because the values are read at call time, pushing a logger returned from // this function on the context will lead to missing or invalid data. Only // call this at the end of a request, after the response has been written. -func GetResponseLogger(ctx Context) Logger { +func GetResponseLogger(ctx context.Context) Logger { l := getLogrusLogger(ctx, "http.response.written", "http.response.status", @@ -188,7 +177,7 @@ func GetResponseLogger(ctx Context) Logger { // httpRequestContext makes information about a request available to context. type httpRequestContext struct { - Context + context.Context startedAt time.Time id string @@ -247,7 +236,7 @@ fallback: } type muxVarsContext struct { - Context + context.Context vars map[string]string } @@ -269,20 +258,12 @@ func (ctx *muxVarsContext) Value(key interface{}) interface{} { return ctx.Context.Value(key) } -// instrumentedResponseWriterCN provides response writer information in a -// context. It implements http.CloseNotifier so that users can detect -// early disconnects. -type instrumentedResponseWriterCN struct { - instrumentedResponseWriter - http.CloseNotifier -} - // instrumentedResponseWriter provides response writer information in a // context. This variant is only used in the case where CloseNotifier is not // implemented by the parent ResponseWriter. type instrumentedResponseWriter struct { http.ResponseWriter - Context + context.Context mu sync.Mutex status int @@ -354,13 +335,3 @@ func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} { fallback: return irw.Context.Value(key) } - -func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} { - if keyStr, ok := key.(string); ok { - if keyStr == "http.response" { - return irw - } - } - - return irw.instrumentedResponseWriter.Value(key) -} diff --git a/src/vendor/github.com/docker/distribution/context/logger.go b/src/vendor/github.com/docker/distribution/context/logger.go index fbb6a0511..3e5b81bbf 100644 --- a/src/vendor/github.com/docker/distribution/context/logger.go +++ b/src/vendor/github.com/docker/distribution/context/logger.go @@ -1,10 +1,11 @@ package context import ( + "context" "fmt" - - "github.com/Sirupsen/logrus" "runtime" + + "github.com/sirupsen/logrus" ) // Logger provides a leveled-logging interface. @@ -38,24 +39,28 @@ type Logger interface { Warn(args ...interface{}) Warnf(format string, args ...interface{}) Warnln(args ...interface{}) + + WithError(err error) *logrus.Entry } +type loggerKey struct{} + // WithLogger creates a new context with provided logger. -func WithLogger(ctx Context, logger Logger) Context { - return WithValue(ctx, "logger", logger) +func WithLogger(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) } // GetLoggerWithField returns a logger instance with the specified field key // and value without affecting the context. Extra specified keys will be // resolved from the context. -func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger { +func GetLoggerWithField(ctx context.Context, key, value interface{}, keys ...interface{}) Logger { return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value) } // GetLoggerWithFields returns a logger instance with the specified fields // without affecting the context. Extra specified keys will be resolved from // the context. -func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger { +func GetLoggerWithFields(ctx context.Context, fields map[interface{}]interface{}, keys ...interface{}) Logger { // must convert from interface{} -> interface{} to string -> interface{} for logrus. lfields := make(logrus.Fields, len(fields)) for key, value := range fields { @@ -71,7 +76,7 @@ func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys . // argument passed to GetLogger will be passed to fmt.Sprint when expanded as // a logging key field. If context keys are integer constants, for example, // its recommended that a String method is implemented. -func GetLogger(ctx Context, keys ...interface{}) Logger { +func GetLogger(ctx context.Context, keys ...interface{}) Logger { return getLogrusLogger(ctx, keys...) } @@ -79,11 +84,11 @@ func GetLogger(ctx Context, keys ...interface{}) Logger { // are provided, they will be resolved on the context and included in the // logger. Only use this function if specific logrus functionality is // required. -func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry { +func getLogrusLogger(ctx context.Context, keys ...interface{}) *logrus.Entry { var logger *logrus.Entry // Get a logger, if it is present. - loggerInterface := ctx.Value("logger") + loggerInterface := ctx.Value(loggerKey{}) if loggerInterface != nil { if lgr, ok := loggerInterface.(*logrus.Entry); ok { logger = lgr diff --git a/src/vendor/github.com/docker/distribution/context/trace.go b/src/vendor/github.com/docker/distribution/context/trace.go index 721964a84..5b88ddaf4 100644 --- a/src/vendor/github.com/docker/distribution/context/trace.go +++ b/src/vendor/github.com/docker/distribution/context/trace.go @@ -1,6 +1,7 @@ package context import ( + "context" "runtime" "time" @@ -36,7 +37,7 @@ import ( // // Notice that the function name is automatically resolved, along with the // package and a trace id is emitted that can be linked with parent ids. -func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) { +func WithTrace(ctx context.Context) (context.Context, func(format string, a ...interface{})) { if ctx == nil { ctx = Background() } @@ -69,7 +70,7 @@ func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) { // also provides fast lookup for the various attributes that are available on // the trace. type traced struct { - Context + context.Context id string parent string start time.Time diff --git a/src/vendor/github.com/docker/distribution/context/util.go b/src/vendor/github.com/docker/distribution/context/util.go index cb9ef52e3..c462e7563 100644 --- a/src/vendor/github.com/docker/distribution/context/util.go +++ b/src/vendor/github.com/docker/distribution/context/util.go @@ -1,13 +1,14 @@ package context import ( + "context" "time" ) // Since looks up key, which should be a time.Time, and returns the duration // since that time. If the key is not found, the value returned will be zero. // This is helpful when inferring metrics related to context execution times. -func Since(ctx Context, key interface{}) time.Duration { +func Since(ctx context.Context, key interface{}) time.Duration { if startedAt, ok := ctx.Value(key).(time.Time); ok { return time.Since(startedAt) } @@ -16,7 +17,7 @@ func Since(ctx Context, key interface{}) time.Duration { // GetStringValue returns a string value from the context. The empty string // will be returned if not found. -func GetStringValue(ctx Context, key interface{}) (value string) { +func GetStringValue(ctx context.Context, key interface{}) (value string) { if valuev, ok := ctx.Value(key).(string); ok { value = valuev } diff --git a/src/vendor/github.com/docker/distribution/context/version.go b/src/vendor/github.com/docker/distribution/context/version.go index 746cda02e..97cf9d665 100644 --- a/src/vendor/github.com/docker/distribution/context/version.go +++ b/src/vendor/github.com/docker/distribution/context/version.go @@ -1,16 +1,22 @@ package context +import "context" + +type versionKey struct{} + +func (versionKey) String() string { return "version" } + // WithVersion stores the application version in the context. The new context // gets a logger to ensure log messages are marked with the application // version. -func WithVersion(ctx Context, version string) Context { - ctx = WithValue(ctx, "version", version) +func WithVersion(ctx context.Context, version string) context.Context { + ctx = context.WithValue(ctx, versionKey{}, version) // push a new logger onto the stack - return WithLogger(ctx, GetLogger(ctx, "version")) + return WithLogger(ctx, GetLogger(ctx, versionKey{})) } // GetVersion returns the application version from the context. An empty // string may returned if the version was not set on the context. -func GetVersion(ctx Context) string { - return GetStringValue(ctx, "version") +func GetVersion(ctx context.Context) string { + return GetStringValue(ctx, versionKey{}) } diff --git a/src/vendor/github.com/docker/distribution/coverpkg.sh b/src/vendor/github.com/docker/distribution/coverpkg.sh deleted file mode 100755 index 25d419ae8..000000000 --- a/src/vendor/github.com/docker/distribution/coverpkg.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash -# Given a subpackage and the containing package, figures out which packages -# need to be passed to `go test -coverpkg`: this includes all of the -# subpackage's dependencies within the containing package, as well as the -# subpackage itself. -DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2} | grep -v github.com/docker/distribution/vendor)" -echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ',' diff --git a/src/vendor/github.com/docker/distribution/digest/digest.go b/src/vendor/github.com/docker/distribution/digest/digest.go deleted file mode 100644 index 31d821bba..000000000 --- a/src/vendor/github.com/docker/distribution/digest/digest.go +++ /dev/null @@ -1,139 +0,0 @@ -package digest - -import ( - "fmt" - "hash" - "io" - "regexp" - "strings" -) - -const ( - // DigestSha256EmptyTar is the canonical sha256 digest of empty data - DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" -) - -// Digest allows simple protection of hex formatted digest strings, prefixed -// by their algorithm. Strings of type Digest have some guarantee of being in -// the correct format and it provides quick access to the components of a -// digest string. -// -// The following is an example of the contents of Digest types: -// -// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc -// -// This allows to abstract the digest behind this type and work only in those -// terms. -type Digest string - -// NewDigest returns a Digest from alg and a hash.Hash object. -func NewDigest(alg Algorithm, h hash.Hash) Digest { - return NewDigestFromBytes(alg, h.Sum(nil)) -} - -// NewDigestFromBytes returns a new digest from the byte contents of p. -// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) -// functions. This is also useful for rebuilding digests from binary -// serializations. -func NewDigestFromBytes(alg Algorithm, p []byte) Digest { - return Digest(fmt.Sprintf("%s:%x", alg, p)) -} - -// NewDigestFromHex returns a Digest from alg and a the hex encoded digest. -func NewDigestFromHex(alg, hex string) Digest { - return Digest(fmt.Sprintf("%s:%s", alg, hex)) -} - -// DigestRegexp matches valid digest types. -var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`) - -// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. -var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) - -var ( - // ErrDigestInvalidFormat returned when digest format invalid. - ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") - - // ErrDigestInvalidLength returned when digest has invalid length. - ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length") - - // ErrDigestUnsupported returned when the digest algorithm is unsupported. - ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") -) - -// ParseDigest parses s and returns the validated digest object. An error will -// be returned if the format is invalid. -func ParseDigest(s string) (Digest, error) { - d := Digest(s) - - return d, d.Validate() -} - -// FromReader returns the most valid digest for the underlying content using -// the canonical digest algorithm. -func FromReader(rd io.Reader) (Digest, error) { - return Canonical.FromReader(rd) -} - -// FromBytes digests the input and returns a Digest. -func FromBytes(p []byte) Digest { - return Canonical.FromBytes(p) -} - -// Validate checks that the contents of d is a valid digest, returning an -// error if not. -func (d Digest) Validate() error { - s := string(d) - - if !DigestRegexpAnchored.MatchString(s) { - return ErrDigestInvalidFormat - } - - i := strings.Index(s, ":") - if i < 0 { - return ErrDigestInvalidFormat - } - - // case: "sha256:" with no hex. - if i+1 == len(s) { - return ErrDigestInvalidFormat - } - - switch algorithm := Algorithm(s[:i]); algorithm { - case SHA256, SHA384, SHA512: - if algorithm.Size()*2 != len(s[i+1:]) { - return ErrDigestInvalidLength - } - break - default: - return ErrDigestUnsupported - } - - return nil -} - -// Algorithm returns the algorithm portion of the digest. This will panic if -// the underlying digest is not in a valid format. -func (d Digest) Algorithm() Algorithm { - return Algorithm(d[:d.sepIndex()]) -} - -// Hex returns the hex digest portion of the digest. This will panic if the -// underlying digest is not in a valid format. -func (d Digest) Hex() string { - return string(d[d.sepIndex()+1:]) -} - -func (d Digest) String() string { - return string(d) -} - -func (d Digest) sepIndex() int { - i := strings.Index(string(d), ":") - - if i < 0 { - panic("could not find ':' in digest: " + d) - } - - return i -} diff --git a/src/vendor/github.com/docker/distribution/digest/digester.go b/src/vendor/github.com/docker/distribution/digest/digester.go deleted file mode 100644 index f3105a45b..000000000 --- a/src/vendor/github.com/docker/distribution/digest/digester.go +++ /dev/null @@ -1,155 +0,0 @@ -package digest - -import ( - "crypto" - "fmt" - "hash" - "io" -) - -// Algorithm identifies and implementation of a digester by an identifier. -// Note the that this defines both the hash algorithm used and the string -// encoding. -type Algorithm string - -// supported digest types -const ( - SHA256 Algorithm = "sha256" // sha256 with hex encoding - SHA384 Algorithm = "sha384" // sha384 with hex encoding - SHA512 Algorithm = "sha512" // sha512 with hex encoding - - // Canonical is the primary digest algorithm used with the distribution - // project. Other digests may be used but this one is the primary storage - // digest. - Canonical = SHA256 -) - -var ( - // TODO(stevvooe): Follow the pattern of the standard crypto package for - // registration of digests. Effectively, we are a registerable set and - // common symbol access. - - // algorithms maps values to hash.Hash implementations. Other algorithms - // may be available but they cannot be calculated by the digest package. - algorithms = map[Algorithm]crypto.Hash{ - SHA256: crypto.SHA256, - SHA384: crypto.SHA384, - SHA512: crypto.SHA512, - } -) - -// Available returns true if the digest type is available for use. If this -// returns false, New and Hash will return nil. -func (a Algorithm) Available() bool { - h, ok := algorithms[a] - if !ok { - return false - } - - // check availability of the hash, as well - return h.Available() -} - -func (a Algorithm) String() string { - return string(a) -} - -// Size returns number of bytes returned by the hash. -func (a Algorithm) Size() int { - h, ok := algorithms[a] - if !ok { - return 0 - } - return h.Size() -} - -// Set implemented to allow use of Algorithm as a command line flag. -func (a *Algorithm) Set(value string) error { - if value == "" { - *a = Canonical - } else { - // just do a type conversion, support is queried with Available. - *a = Algorithm(value) - } - - return nil -} - -// New returns a new digester for the specified algorithm. If the algorithm -// does not have a digester implementation, nil will be returned. This can be -// checked by calling Available before calling New. -func (a Algorithm) New() Digester { - return &digester{ - alg: a, - hash: a.Hash(), - } -} - -// Hash returns a new hash as used by the algorithm. If not available, the -// method will panic. Check Algorithm.Available() before calling. -func (a Algorithm) Hash() hash.Hash { - if !a.Available() { - // NOTE(stevvooe): A missing hash is usually a programming error that - // must be resolved at compile time. We don't import in the digest - // package to allow users to choose their hash implementation (such as - // when using stevvooe/resumable or a hardware accelerated package). - // - // Applications that may want to resolve the hash at runtime should - // call Algorithm.Available before call Algorithm.Hash(). - panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) - } - - return algorithms[a].New() -} - -// FromReader returns the digest of the reader using the algorithm. -func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { - digester := a.New() - - if _, err := io.Copy(digester.Hash(), rd); err != nil { - return "", err - } - - return digester.Digest(), nil -} - -// FromBytes digests the input and returns a Digest. -func (a Algorithm) FromBytes(p []byte) Digest { - digester := a.New() - - if _, err := digester.Hash().Write(p); err != nil { - // Writes to a Hash should never fail. None of the existing - // hash implementations in the stdlib or hashes vendored - // here can return errors from Write. Having a panic in this - // condition instead of having FromBytes return an error value - // avoids unnecessary error handling paths in all callers. - panic("write to hash function returned error: " + err.Error()) - } - - return digester.Digest() -} - -// TODO(stevvooe): Allow resolution of verifiers using the digest type and -// this registration system. - -// Digester calculates the digest of written data. Writes should go directly -// to the return value of Hash, while calling Digest will return the current -// value of the digest. -type Digester interface { - Hash() hash.Hash // provides direct access to underlying hash instance. - Digest() Digest -} - -// digester provides a simple digester definition that embeds a hasher. -type digester struct { - alg Algorithm - hash hash.Hash -} - -func (d *digester) Hash() hash.Hash { - return d.hash -} - -func (d *digester) Digest() Digest { - return NewDigest(d.alg, d.hash) -} diff --git a/src/vendor/github.com/docker/distribution/digest/doc.go b/src/vendor/github.com/docker/distribution/digest/doc.go deleted file mode 100644 index f64b0db32..000000000 --- a/src/vendor/github.com/docker/distribution/digest/doc.go +++ /dev/null @@ -1,42 +0,0 @@ -// Package digest provides a generalized type to opaquely represent message -// digests and their operations within the registry. The Digest type is -// designed to serve as a flexible identifier in a content-addressable system. -// More importantly, it provides tools and wrappers to work with -// hash.Hash-based digests with little effort. -// -// Basics -// -// The format of a digest is simply a string with two parts, dubbed the -// "algorithm" and the "digest", separated by a colon: -// -// : -// -// An example of a sha256 digest representation follows: -// -// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc -// -// In this case, the string "sha256" is the algorithm and the hex bytes are -// the "digest". -// -// Because the Digest type is simply a string, once a valid Digest is -// obtained, comparisons are cheap, quick and simple to express with the -// standard equality operator. -// -// Verification -// -// The main benefit of using the Digest type is simple verification against a -// given digest. The Verifier interface, modeled after the stdlib hash.Hash -// interface, provides a common write sink for digest verification. After -// writing is complete, calling the Verifier.Verified method will indicate -// whether or not the stream of bytes matches the target digest. -// -// Missing Features -// -// In addition to the above, we intend to add the following features to this -// package: -// -// 1. A Digester type that supports write sink digest calculation. -// -// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. -// -package digest diff --git a/src/vendor/github.com/docker/distribution/digest/verifiers.go b/src/vendor/github.com/docker/distribution/digest/verifiers.go deleted file mode 100644 index 9af3be134..000000000 --- a/src/vendor/github.com/docker/distribution/digest/verifiers.go +++ /dev/null @@ -1,44 +0,0 @@ -package digest - -import ( - "hash" - "io" -) - -// Verifier presents a general verification interface to be used with message -// digests and other byte stream verifications. Users instantiate a Verifier -// from one of the various methods, write the data under test to it then check -// the result with the Verified method. -type Verifier interface { - io.Writer - - // Verified will return true if the content written to Verifier matches - // the digest. - Verified() bool -} - -// NewDigestVerifier returns a verifier that compares the written bytes -// against a passed in digest. -func NewDigestVerifier(d Digest) (Verifier, error) { - if err := d.Validate(); err != nil { - return nil, err - } - - return hashVerifier{ - hash: d.Algorithm().Hash(), - digest: d, - }, nil -} - -type hashVerifier struct { - digest Digest - hash hash.Hash -} - -func (hv hashVerifier) Write(p []byte) (n int, err error) { - return hv.hash.Write(p) -} - -func (hv hashVerifier) Verified() bool { - return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) -} diff --git a/src/vendor/github.com/docker/distribution/digest/set.go b/src/vendor/github.com/docker/distribution/digestset/set.go similarity index 90% rename from src/vendor/github.com/docker/distribution/digest/set.go rename to src/vendor/github.com/docker/distribution/digestset/set.go index 4b9313c1a..71327dca7 100644 --- a/src/vendor/github.com/docker/distribution/digest/set.go +++ b/src/vendor/github.com/docker/distribution/digestset/set.go @@ -1,10 +1,12 @@ -package digest +package digestset import ( "errors" "sort" "strings" "sync" + + digest "github.com/opencontainers/go-digest" ) var ( @@ -44,7 +46,7 @@ func NewSet() *Set { // values or short values. This function does not test equality, // rather whether the second value could match against the first // value. -func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool { +func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { if len(hex) == len(shortHex) { if hex != shortHex { return false @@ -64,7 +66,7 @@ func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool { // If no digests could be found ErrDigestNotFound will be returned // with an empty digest value. If multiple matches are found // ErrDigestAmbiguous will be returned with an empty digest value. -func (dst *Set) Lookup(d string) (Digest, error) { +func (dst *Set) Lookup(d string) (digest.Digest, error) { dst.mutex.RLock() defer dst.mutex.RUnlock() if len(dst.entries) == 0 { @@ -72,11 +74,11 @@ func (dst *Set) Lookup(d string) (Digest, error) { } var ( searchFunc func(int) bool - alg Algorithm + alg digest.Algorithm hex string ) - dgst, err := ParseDigest(d) - if err == ErrDigestInvalidFormat { + dgst, err := digest.Parse(d) + if err == digest.ErrDigestInvalidFormat { hex = d searchFunc = func(i int) bool { return dst.entries[i].val >= d @@ -108,7 +110,7 @@ func (dst *Set) Lookup(d string) (Digest, error) { // Add adds the given digest to the set. An error will be returned // if the given digest is invalid. If the digest already exists in the // set, this operation will be a no-op. -func (dst *Set) Add(d Digest) error { +func (dst *Set) Add(d digest.Digest) error { if err := d.Validate(); err != nil { return err } @@ -139,7 +141,7 @@ func (dst *Set) Add(d Digest) error { // Remove removes the given digest from the set. An err will be // returned if the given digest is invalid. If the digest does // not exist in the set, this operation will be a no-op. -func (dst *Set) Remove(d Digest) error { +func (dst *Set) Remove(d digest.Digest) error { if err := d.Validate(); err != nil { return err } @@ -167,10 +169,10 @@ func (dst *Set) Remove(d Digest) error { } // All returns all the digests in the set -func (dst *Set) All() []Digest { +func (dst *Set) All() []digest.Digest { dst.mutex.RLock() defer dst.mutex.RUnlock() - retValues := make([]Digest, len(dst.entries)) + retValues := make([]digest.Digest, len(dst.entries)) for i := range dst.entries { retValues[i] = dst.entries[i].digest } @@ -183,10 +185,10 @@ func (dst *Set) All() []Digest { // entire value of digest if uniqueness cannot be achieved without the // full value. This function will attempt to make short codes as short // as possible to be unique. -func ShortCodeTable(dst *Set, length int) map[Digest]string { +func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { dst.mutex.RLock() defer dst.mutex.RUnlock() - m := make(map[Digest]string, len(dst.entries)) + m := make(map[digest.Digest]string, len(dst.entries)) l := length resetIdx := 0 for i := 0; i < len(dst.entries); i++ { @@ -222,9 +224,9 @@ func ShortCodeTable(dst *Set, length int) map[Digest]string { } type digestEntry struct { - alg Algorithm + alg digest.Algorithm val string - digest Digest + digest digest.Digest } type digestEntries []*digestEntry diff --git a/src/vendor/github.com/docker/distribution/errors.go b/src/vendor/github.com/docker/distribution/errors.go index c20f28113..8e0b788d6 100644 --- a/src/vendor/github.com/docker/distribution/errors.go +++ b/src/vendor/github.com/docker/distribution/errors.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" - "github.com/docker/distribution/digest" + "github.com/opencontainers/go-digest" ) // ErrAccessDenied is returned when an access to a requested resource is @@ -20,6 +20,10 @@ var ErrManifestNotModified = errors.New("manifest not modified") // performed var ErrUnsupported = errors.New("operation unsupported") +// ErrSchemaV1Unsupported is returned when a client tries to upload a schema v1 +// manifest but the registry is configured to reject it +var ErrSchemaV1Unsupported = errors.New("manifest schema v1 unsupported") + // ErrTagUnknown is returned if the given tag is not known by the tag service type ErrTagUnknown struct { Tag string @@ -77,7 +81,7 @@ func (err ErrManifestUnknownRevision) Error() string { type ErrManifestUnverified struct{} func (ErrManifestUnverified) Error() string { - return fmt.Sprintf("unverified manifest") + return "unverified manifest" } // ErrManifestVerification provides a type to collect errors encountered diff --git a/src/vendor/github.com/docker/distribution/health/doc.go b/src/vendor/github.com/docker/distribution/health/doc.go index 8c106b42b..877f4daca 100644 --- a/src/vendor/github.com/docker/distribution/health/doc.go +++ b/src/vendor/github.com/docker/distribution/health/doc.go @@ -24,7 +24,7 @@ // "manual" checks that allow the service to quickly be brought in/out of // rotation. // -// import _ "github.com/docker/distribution/registry/health/api" +// import _ "github.com/docker/distribution/health/api" // // # curl localhost:5001/debug/health // {} @@ -122,6 +122,12 @@ // # curl localhost:5001/debug/health // {"fileChecker":"file exists"} // +// FileChecker only accepts absolute or relative file path. It does not work +// properly with tilde(~). You should make sure that the application has +// proper permission(read and execute permission for directory along with +// the specified file path). Otherwise, the FileChecker will report error +// and file health check is not ok. +// // You could also test the connectivity to a downstream service by using a // "HTTPChecker", but ensure that you only mark the test unhealthy if there // are a minimum of two failures in a row: diff --git a/src/vendor/github.com/docker/distribution/health/health.go b/src/vendor/github.com/docker/distribution/health/health.go index 220282dcd..592bce576 100644 --- a/src/vendor/github.com/docker/distribution/health/health.go +++ b/src/vendor/github.com/docker/distribution/health/health.go @@ -215,7 +215,7 @@ func RegisterFunc(name string, check func() error) { // RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker // from an arbitrary func() error. func (registry *Registry) RegisterPeriodicFunc(name string, period time.Duration, check CheckFunc) { - registry.Register(name, PeriodicChecker(CheckFunc(check), period)) + registry.Register(name, PeriodicChecker(check, period)) } // RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker @@ -227,7 +227,7 @@ func RegisterPeriodicFunc(name string, period time.Duration, check CheckFunc) { // RegisterPeriodicThresholdFunc allows the convenience of registering a // PeriodicChecker from an arbitrary func() error. func (registry *Registry) RegisterPeriodicThresholdFunc(name string, period time.Duration, threshold int, check CheckFunc) { - registry.Register(name, PeriodicThresholdChecker(CheckFunc(check), period, threshold)) + registry.Register(name, PeriodicThresholdChecker(check, period, threshold)) } // RegisterPeriodicThresholdFunc allows the convenience of registering a diff --git a/src/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go b/src/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go new file mode 100644 index 000000000..f4e915eed --- /dev/null +++ b/src/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go @@ -0,0 +1,216 @@ +package manifestlist + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go/v1" +) + +const ( + // MediaTypeManifestList specifies the mediaType for manifest lists. + MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" +) + +// SchemaVersion provides a pre-initialized version structure for this +// packages version of the manifest. +var SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: MediaTypeManifestList, +} + +// OCISchemaVersion provides a pre-initialized version structure for this +// packages OCIschema version of the manifest. +var OCISchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: v1.MediaTypeImageIndex, +} + +func init() { + manifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifestList) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + if m.MediaType != MediaTypeManifestList { + err = fmt.Errorf("mediaType in manifest list should be '%s' not '%s'", + MediaTypeManifestList, m.MediaType) + + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err + } + err := distribution.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } + + imageIndexFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifestList) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + if m.MediaType != "" && m.MediaType != v1.MediaTypeImageIndex { + err = fmt.Errorf("if present, mediaType in image index should be '%s' not '%s'", + v1.MediaTypeImageIndex, m.MediaType) + + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: v1.MediaTypeImageIndex}, err + } + err = distribution.RegisterManifestSchema(v1.MediaTypeImageIndex, imageIndexFunc) + if err != nil { + panic(fmt.Sprintf("Unable to register OCI Image Index: %s", err)) + } +} + +// PlatformSpec specifies a platform where a particular image manifest is +// applicable. +type PlatformSpec struct { + // Architecture field specifies the CPU architecture, for example + // `amd64` or `ppc64`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system + // version, for example `10.0.10586`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Variant is an optional field specifying a variant of the CPU, for + // example `ppc64le` to specify a little-endian version of a PowerPC CPU. + Variant string `json:"variant,omitempty"` + + // Features is an optional field specifying an array of strings, each + // listing a required CPU feature (for example `sse4` or `aes`). + Features []string `json:"features,omitempty"` +} + +// A ManifestDescriptor references a platform-specific manifest. +type ManifestDescriptor struct { + distribution.Descriptor + + // Platform specifies which platform the manifest pointed to by the + // descriptor runs on. + Platform PlatformSpec `json:"platform"` +} + +// ManifestList references manifests for various platforms. +type ManifestList struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Manifests []ManifestDescriptor `json:"manifests"` +} + +// References returns the distribution descriptors for the referenced image +// manifests. +func (m ManifestList) References() []distribution.Descriptor { + dependencies := make([]distribution.Descriptor, len(m.Manifests)) + for i := range m.Manifests { + dependencies[i] = m.Manifests[i].Descriptor + } + + return dependencies +} + +// DeserializedManifestList wraps ManifestList with a copy of the original +// JSON. +type DeserializedManifestList struct { + ManifestList + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromDescriptors takes a slice of descriptors, and returns a +// DeserializedManifestList which contains the resulting manifest list +// and its JSON representation. +func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) { + var mediaType string + if len(descriptors) > 0 && descriptors[0].Descriptor.MediaType == v1.MediaTypeImageManifest { + mediaType = v1.MediaTypeImageIndex + } else { + mediaType = MediaTypeManifestList + } + + return FromDescriptorsWithMediaType(descriptors, mediaType) +} + +// FromDescriptorsWithMediaType is for testing purposes, it's useful to be able to specify the media type explicitly +func FromDescriptorsWithMediaType(descriptors []ManifestDescriptor, mediaType string) (*DeserializedManifestList, error) { + m := ManifestList{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: mediaType, + }, + } + + m.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors)) + copy(m.Manifests, descriptors) + + deserialized := DeserializedManifestList{ + ManifestList: m, + } + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new ManifestList struct from JSON data. +func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b), len(b)) + // store manifest list in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into ManifestList object + var manifestList ManifestList + if err := json.Unmarshal(m.canonical, &manifestList); err != nil { + return err + } + + m.ManifestList = manifestList + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifestList") +} + +// Payload returns the raw content of the manifest list. The contents can be +// used to calculate the content identifier. +func (m DeserializedManifestList) Payload() (string, []byte, error) { + var mediaType string + if m.MediaType == "" { + mediaType = v1.MediaTypeImageIndex + } else { + mediaType = m.MediaType + } + + return mediaType, m.canonical, nil +} diff --git a/src/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go b/src/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go index be0123731..a96dc3d26 100644 --- a/src/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go +++ b/src/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go @@ -1,6 +1,7 @@ package schema1 import ( + "context" "crypto/sha512" "encoding/json" "errors" @@ -8,11 +9,10 @@ import ( "time" "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/reference" "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" ) type diffID digest.Digest @@ -240,8 +240,13 @@ func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, e // AppendReference adds a reference to the current ManifestBuilder func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error { - // todo: verification here? - mb.descriptors = append(mb.descriptors, d.Descriptor()) + descriptor := d.Descriptor() + + if err := descriptor.Digest.Validate(); err != nil { + return err + } + + mb.descriptors = append(mb.descriptors, descriptor) return nil } diff --git a/src/vendor/github.com/docker/distribution/manifest/schema1/manifest.go b/src/vendor/github.com/docker/distribution/manifest/schema1/manifest.go index bff47bde0..5a06b54bc 100644 --- a/src/vendor/github.com/docker/distribution/manifest/schema1/manifest.go +++ b/src/vendor/github.com/docker/distribution/manifest/schema1/manifest.go @@ -5,9 +5,9 @@ import ( "fmt" "github.com/docker/distribution" - "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" ) const ( @@ -138,7 +138,7 @@ func (sm *SignedManifest) UnmarshalJSON(b []byte) error { return nil } -// References returnes the descriptors of this manifests references +// References returns the descriptors of this manifests references func (sm SignedManifest) References() []distribution.Descriptor { dependencies := make([]distribution.Descriptor, len(sm.FSLayers)) for i, fsLayer := range sm.FSLayers { diff --git a/src/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go b/src/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go index fc1045f9e..a4f6032cd 100644 --- a/src/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go +++ b/src/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go @@ -1,15 +1,15 @@ package schema1 import ( + "context" + "errors" "fmt" - "errors" "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/reference" "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" ) // referenceManifestBuilder is a type for constructing manifests from schema1 diff --git a/src/vendor/github.com/docker/distribution/manifest/schema1/verify.go b/src/vendor/github.com/docker/distribution/manifest/schema1/verify.go index fa8daa56f..ef59065cd 100644 --- a/src/vendor/github.com/docker/distribution/manifest/schema1/verify.go +++ b/src/vendor/github.com/docker/distribution/manifest/schema1/verify.go @@ -3,8 +3,8 @@ package schema1 import ( "crypto/x509" - "github.com/Sirupsen/logrus" "github.com/docker/libtrust" + "github.com/sirupsen/logrus" ) // Verify verifies the signature of the signed manifest returning the public diff --git a/src/vendor/github.com/docker/distribution/manifest/schema2/builder.go b/src/vendor/github.com/docker/distribution/manifest/schema2/builder.go index ec0bf858d..3facaae62 100644 --- a/src/vendor/github.com/docker/distribution/manifest/schema2/builder.go +++ b/src/vendor/github.com/docker/distribution/manifest/schema2/builder.go @@ -1,9 +1,10 @@ package schema2 import ( + "context" + "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" + "github.com/opencontainers/go-digest" ) // builder is a type for constructing manifests. @@ -11,21 +12,25 @@ type builder struct { // bs is a BlobService used to publish the configuration blob. bs distribution.BlobService + // configMediaType is media type used to describe configuration + configMediaType string + // configJSON references configJSON []byte - // layers is a list of layer descriptors that gets built by successive - // calls to AppendReference. - layers []distribution.Descriptor + // dependencies is a list of descriptors that gets built by successive + // calls to AppendReference. In case of image configuration these are layers. + dependencies []distribution.Descriptor } // NewManifestBuilder is used to build new manifests for the current schema // version. It takes a BlobService so it can publish the configuration blob // as part of the Build process. -func NewManifestBuilder(bs distribution.BlobService, configJSON []byte) distribution.ManifestBuilder { +func NewManifestBuilder(bs distribution.BlobService, configMediaType string, configJSON []byte) distribution.ManifestBuilder { mb := &builder{ - bs: bs, - configJSON: make([]byte, len(configJSON)), + bs: bs, + configMediaType: configMediaType, + configJSON: make([]byte, len(configJSON)), } copy(mb.configJSON, configJSON) @@ -36,9 +41,9 @@ func NewManifestBuilder(bs distribution.BlobService, configJSON []byte) distribu func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { m := Manifest{ Versioned: SchemaVersion, - Layers: make([]distribution.Descriptor, len(mb.layers)), + Layers: make([]distribution.Descriptor, len(mb.dependencies)), } - copy(m.Layers, mb.layers) + copy(m.Layers, mb.dependencies) configDigest := digest.FromBytes(mb.configJSON) @@ -48,7 +53,7 @@ func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { case nil: // Override MediaType, since Put always replaces the specified media // type with application/octet-stream in the descriptor it returns. - m.Config.MediaType = MediaTypeConfig + m.Config.MediaType = mb.configMediaType return FromStruct(m) case distribution.ErrBlobUnknown: // nop @@ -57,10 +62,10 @@ func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { } // Add config to the blob store - m.Config, err = mb.bs.Put(ctx, MediaTypeConfig, mb.configJSON) + m.Config, err = mb.bs.Put(ctx, mb.configMediaType, mb.configJSON) // Override MediaType, since Put always replaces the specified media // type with application/octet-stream in the descriptor it returns. - m.Config.MediaType = MediaTypeConfig + m.Config.MediaType = mb.configMediaType if err != nil { return nil, err } @@ -70,11 +75,11 @@ func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { // AppendReference adds a reference to the current ManifestBuilder. func (mb *builder) AppendReference(d distribution.Describable) error { - mb.layers = append(mb.layers, d.Descriptor()) + mb.dependencies = append(mb.dependencies, d.Descriptor()) return nil } // References returns the current references added to this builder. func (mb *builder) References() []distribution.Descriptor { - return mb.layers + return mb.dependencies } diff --git a/src/vendor/github.com/docker/distribution/manifest/schema2/manifest.go b/src/vendor/github.com/docker/distribution/manifest/schema2/manifest.go index 741998d04..ee29438fe 100644 --- a/src/vendor/github.com/docker/distribution/manifest/schema2/manifest.go +++ b/src/vendor/github.com/docker/distribution/manifest/schema2/manifest.go @@ -6,16 +6,16 @@ import ( "fmt" "github.com/docker/distribution" - "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" + "github.com/opencontainers/go-digest" ) const ( // MediaTypeManifest specifies the mediaType for the current version. MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json" - // MediaTypeConfig specifies the mediaType for the image configuration. - MediaTypeConfig = "application/vnd.docker.container.image.v1+json" + // MediaTypeImageConfig specifies the mediaType for the image configuration. + MediaTypeImageConfig = "application/vnd.docker.container.image.v1+json" // MediaTypePluginConfig specifies the mediaType for plugin configuration. MediaTypePluginConfig = "application/vnd.docker.plugin.v1+json" @@ -27,6 +27,10 @@ const ( // MediaTypeForeignLayer is the mediaType used for layers that must be // downloaded from foreign URLs. MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" + + // MediaTypeUncompressedLayer is the mediaType used for layers which + // are not compressed. + MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar" ) var ( @@ -67,7 +71,7 @@ type Manifest struct { Layers []distribution.Descriptor `json:"layers"` } -// References returnes the descriptors of this manifests references. +// References returns the descriptors of this manifests references. func (m Manifest) References() []distribution.Descriptor { references := make([]distribution.Descriptor, 0, 1+len(m.Layers)) references = append(references, m.Config) @@ -75,7 +79,7 @@ func (m Manifest) References() []distribution.Descriptor { return references } -// Target returns the target of this signed manifest. +// Target returns the target of this manifest. func (m Manifest) Target() distribution.Descriptor { return m.Config } @@ -112,6 +116,12 @@ func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { return err } + if manifest.MediaType != MediaTypeManifest { + return fmt.Errorf("mediaType in manifest should be '%s' not '%s'", + MediaTypeManifest, manifest.MediaType) + + } + m.Manifest = manifest return nil diff --git a/src/vendor/github.com/docker/distribution/manifests.go b/src/vendor/github.com/docker/distribution/manifests.go index c4fb63450..1816baea1 100644 --- a/src/vendor/github.com/docker/distribution/manifests.go +++ b/src/vendor/github.com/docker/distribution/manifests.go @@ -1,11 +1,11 @@ package distribution import ( + "context" "fmt" "mime" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" + "github.com/opencontainers/go-digest" ) // Manifest represents a registry object specifying a set of @@ -22,8 +22,8 @@ type Manifest interface { References() []Descriptor // Payload provides the serialized format of the manifest, in addition to - // the mediatype. - Payload() (mediatype string, payload []byte, err error) + // the media type. + Payload() (mediaType string, payload []byte, err error) } // ManifestBuilder creates a manifest allowing one to include dependencies. @@ -94,20 +94,20 @@ var mappings = make(map[string]UnmarshalFunc, 0) func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { // Need to look up by the actual media type, not the raw contents of // the header. Strip semicolons and anything following them. - var mediatype string + var mediaType string if ctHeader != "" { var err error - mediatype, _, err = mime.ParseMediaType(ctHeader) + mediaType, _, err = mime.ParseMediaType(ctHeader) if err != nil { return nil, Descriptor{}, err } } - unmarshalFunc, ok := mappings[mediatype] + unmarshalFunc, ok := mappings[mediaType] if !ok { unmarshalFunc, ok = mappings[""] if !ok { - return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype and no default available: %s", mediatype) + return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType) } } @@ -116,10 +116,10 @@ func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) // RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This // should be called from specific -func RegisterManifestSchema(mediatype string, u UnmarshalFunc) error { - if _, ok := mappings[mediatype]; ok { - return fmt.Errorf("manifest mediatype registration would overwrite existing: %s", mediatype) +func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error { + if _, ok := mappings[mediaType]; ok { + return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType) } - mappings[mediatype] = u + mappings[mediaType] = u return nil } diff --git a/src/vendor/github.com/docker/distribution/reference/helpers.go b/src/vendor/github.com/docker/distribution/reference/helpers.go new file mode 100644 index 000000000..978df7eab --- /dev/null +++ b/src/vendor/github.com/docker/distribution/reference/helpers.go @@ -0,0 +1,42 @@ +package reference + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/src/vendor/github.com/docker/distribution/reference/normalize.go b/src/vendor/github.com/docker/distribution/reference/normalize.go new file mode 100644 index 000000000..2d71fc5e9 --- /dev/null +++ b/src/vendor/github.com/docker/distribution/reference/normalize.go @@ -0,0 +1,170 @@ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/distribution/digestset" + "github.com/opencontainers/go-digest" +) + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, errors.New("invalid reference format: repository name must be lowercase") + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} + +// ParseAnyReferenceWithSet parses a reference string as a possible short +// identifier to be matched in a digest set, a full digest, or familiar name. +func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { + if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { + dgst, err := ds.Lookup(ref) + if err == nil { + return digestReference(dgst), nil + } + } else { + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + } + + return ParseNormalizedNamed(ref) +} diff --git a/src/vendor/github.com/docker/distribution/reference/reference.go b/src/vendor/github.com/docker/distribution/reference/reference.go index 02786628e..2f66cca87 100644 --- a/src/vendor/github.com/docker/distribution/reference/reference.go +++ b/src/vendor/github.com/docker/distribution/reference/reference.go @@ -4,30 +4,32 @@ // Grammar // // reference := name [ ":" tag ] [ "@" digest ] -// name := [hostname '/'] component ['/' component]* -// hostname := hostcomponent ['.' hostcomponent]* [':' port-number] -// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// name := [domain '/'] path-component ['/' path-component]* +// domain := domain-component ['.' domain-component]* [':' port-number] +// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // port-number := /[0-9]+/ -// component := alpha-numeric [separator alpha-numeric]* +// path-component := alpha-numeric [separator alpha-numeric]* // alpha-numeric := /[a-z0-9]+/ // separator := /[_.]|__|[-]*/ // // tag := /[\w][\w.-]{0,127}/ // // digest := digest-algorithm ":" digest-hex -// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ] +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* // digest-algorithm-separator := /[+.-_]/ // digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ // digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +// +// identifier := /[a-f0-9]{64}/ +// short-identifier := /[a-f0-9]{6,64}/ package reference import ( "errors" "fmt" - "path" "strings" - "github.com/docker/distribution/digest" + "github.com/opencontainers/go-digest" ) const ( @@ -53,6 +55,9 @@ var ( // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + + // ErrNameNotCanonical is returned when a name is not canonical. + ErrNameNotCanonical = errors.New("repository name must be canonical") ) // Reference is an opaque object reference identifier that may include @@ -126,23 +131,56 @@ type Digested interface { } // Canonical reference is an object with a fully unique -// name including a name with hostname and digest +// name including a name with domain and digest type Canonical interface { Named Digest() digest.Digest } +// namedRepository is a reference to a repository with a name. +// A namedRepository has both domain and path components. +type namedRepository interface { + Named + Domain() string + Path() string +} + +// Domain returns the domain part of the Named reference +func Domain(named Named) string { + if r, ok := named.(namedRepository); ok { + return r.Domain() + } + domain, _ := splitDomain(named.Name()) + return domain +} + +// Path returns the name without the domain part of the Named reference +func Path(named Named) (name string) { + if r, ok := named.(namedRepository); ok { + return r.Path() + } + _, path := splitDomain(named.Name()) + return path +} + +func splitDomain(name string) (string, string) { + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + // SplitHostname splits a named reference into a // hostname and name string. If no valid hostname is // found, the hostname is empty and the full value // is returned as name +// DEPRECATED: Use Domain or Path func SplitHostname(named Named) (string, string) { - name := named.Name() - match := anchoredNameRegexp.FindStringSubmatch(name) - if len(match) != 3 { - return "", name + if r, ok := named.(namedRepository); ok { + return r.Domain(), r.Path() } - return match[1], match[2] + return splitDomain(named.Name()) } // Parse parses s and returns a syntactically valid Reference. @@ -164,13 +202,24 @@ func Parse(s string) (Reference, error) { return nil, ErrNameTooLong } + var repo repository + + nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) + if nameMatch != nil && len(nameMatch) == 3 { + repo.domain = nameMatch[1] + repo.path = nameMatch[2] + } else { + repo.domain = "" + repo.path = matches[1] + } + ref := reference{ - name: matches[1], - tag: matches[2], + namedRepository: repo, + tag: matches[2], } if matches[3] != "" { var err error - ref.digest, err = digest.ParseDigest(matches[3]) + ref.digest, err = digest.Parse(matches[3]) if err != nil { return nil, err } @@ -185,18 +234,17 @@ func Parse(s string) (Reference, error) { } // ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name, otherwise an error is -// returned. +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. // If an error was encountered it is returned, along with a nil Reference. // NOTE: ParseNamed will not handle short digests. func ParseNamed(s string) (Named, error) { - ref, err := Parse(s) + named, err := ParseNormalizedNamed(s) if err != nil { return nil, err } - named, isNamed := ref.(Named) - if !isNamed { - return nil, fmt.Errorf("reference %s has no name", ref.String()) + if named.String() != s { + return nil, ErrNameNotCanonical } return named, nil } @@ -207,10 +255,15 @@ func WithName(name string) (Named, error) { if len(name) > NameTotalLengthMax { return nil, ErrNameTooLong } - if !anchoredNameRegexp.MatchString(name) { + + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { return nil, ErrReferenceInvalidFormat } - return repository(name), nil + return repository{ + domain: match[1], + path: match[2], + }, nil } // WithTag combines the name from "name" and the tag from "tag" to form a @@ -219,16 +272,23 @@ func WithTag(name Named, tag string) (NamedTagged, error) { if !anchoredTagRegexp.MatchString(tag) { return nil, ErrTagInvalidFormat } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } if canonical, ok := name.(Canonical); ok { return reference{ - name: name.Name(), - tag: tag, - digest: canonical.Digest(), + namedRepository: repo, + tag: tag, + digest: canonical.Digest(), }, nil } return taggedReference{ - name: name.Name(), - tag: tag, + namedRepository: repo, + tag: tag, }, nil } @@ -238,36 +298,37 @@ func WithDigest(name Named, digest digest.Digest) (Canonical, error) { if !anchoredDigestRegexp.MatchString(digest.String()) { return nil, ErrDigestInvalidFormat } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } if tagged, ok := name.(Tagged); ok { return reference{ - name: name.Name(), - tag: tagged.Tag(), - digest: digest, + namedRepository: repo, + tag: tagged.Tag(), + digest: digest, }, nil } return canonicalReference{ - name: name.Name(), - digest: digest, + namedRepository: repo, + digest: digest, }, nil } -// Match reports whether ref matches the specified pattern. -// See https://godoc.org/path#Match for supported patterns. -func Match(pattern string, ref Reference) (bool, error) { - matched, err := path.Match(pattern, ref.String()) - if namedRef, isNamed := ref.(Named); isNamed && !matched { - matched, _ = path.Match(pattern, namedRef.Name()) - } - return matched, err -} - // TrimNamed removes any tag or digest from the named reference. func TrimNamed(ref Named) Named { - return repository(ref.Name()) + domain, path := SplitHostname(ref) + return repository{ + domain: domain, + path: path, + } } func getBestReferenceType(ref reference) Reference { - if ref.name == "" { + if ref.Name() == "" { // Allow digest only references if ref.digest != "" { return digestReference(ref.digest) @@ -277,16 +338,16 @@ func getBestReferenceType(ref reference) Reference { if ref.tag == "" { if ref.digest != "" { return canonicalReference{ - name: ref.name, - digest: ref.digest, + namedRepository: ref.namedRepository, + digest: ref.digest, } } - return repository(ref.name) + return ref.namedRepository } if ref.digest == "" { return taggedReference{ - name: ref.name, - tag: ref.tag, + namedRepository: ref.namedRepository, + tag: ref.tag, } } @@ -294,17 +355,13 @@ func getBestReferenceType(ref reference) Reference { } type reference struct { - name string + namedRepository tag string digest digest.Digest } func (r reference) String() string { - return r.name + ":" + r.tag + "@" + r.digest.String() -} - -func (r reference) Name() string { - return r.name + return r.Name() + ":" + r.tag + "@" + r.digest.String() } func (r reference) Tag() string { @@ -315,20 +372,34 @@ func (r reference) Digest() digest.Digest { return r.digest } -type repository string +type repository struct { + domain string + path string +} func (r repository) String() string { - return string(r) + return r.Name() } func (r repository) Name() string { - return string(r) + if r.domain == "" { + return r.path + } + return r.domain + "/" + r.path +} + +func (r repository) Domain() string { + return r.domain +} + +func (r repository) Path() string { + return r.path } type digestReference digest.Digest func (d digestReference) String() string { - return d.String() + return digest.Digest(d).String() } func (d digestReference) Digest() digest.Digest { @@ -336,16 +407,12 @@ func (d digestReference) Digest() digest.Digest { } type taggedReference struct { - name string - tag string + namedRepository + tag string } func (t taggedReference) String() string { - return t.name + ":" + t.tag -} - -func (t taggedReference) Name() string { - return t.name + return t.Name() + ":" + t.tag } func (t taggedReference) Tag() string { @@ -353,16 +420,12 @@ func (t taggedReference) Tag() string { } type canonicalReference struct { - name string + namedRepository digest digest.Digest } func (c canonicalReference) String() string { - return c.name + "@" + c.digest.String() -} - -func (c canonicalReference) Name() string { - return c.name + return c.Name() + "@" + c.digest.String() } func (c canonicalReference) Digest() digest.Digest { diff --git a/src/vendor/github.com/docker/distribution/reference/regexp.go b/src/vendor/github.com/docker/distribution/reference/regexp.go index 9a7d366bc..786034932 100644 --- a/src/vendor/github.com/docker/distribution/reference/regexp.go +++ b/src/vendor/github.com/docker/distribution/reference/regexp.go @@ -19,18 +19,18 @@ var ( alphaNumericRegexp, optional(repeated(separatorRegexp, alphaNumericRegexp))) - // hostnameComponentRegexp restricts the registry hostname component of a - // repository name to start with a component as defined by hostnameRegexp + // domainComponentRegexp restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp // and followed by an optional port. - hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) - // hostnameRegexp defines the structure of potential hostname components + // DomainRegexp defines the structure of potential domain components // that may be part of image names. This is purposely a subset of what is // allowed by DNS to ensure backwards compatibility with Docker image // names. - hostnameRegexp = expression( - hostnameComponentRegexp, - optional(repeated(literal(`.`), hostnameComponentRegexp)), + DomainRegexp = expression( + domainComponentRegexp, + optional(repeated(literal(`.`), domainComponentRegexp)), optional(literal(`:`), match(`[0-9]+`))) // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. @@ -48,17 +48,17 @@ var ( anchoredDigestRegexp = anchored(DigestRegexp) // NameRegexp is the format for the name component of references. The - // regexp has capturing groups for the hostname and name part omitting + // regexp has capturing groups for the domain and name part omitting // the separating forward slash from either. NameRegexp = expression( - optional(hostnameRegexp, literal(`/`)), + optional(DomainRegexp, literal(`/`)), nameComponentRegexp, optional(repeated(literal(`/`), nameComponentRegexp))) // anchoredNameRegexp is used to parse a name value, capturing the - // hostname and trailing components. + // domain and trailing components. anchoredNameRegexp = anchored( - optional(capture(hostnameRegexp), literal(`/`)), + optional(capture(DomainRegexp), literal(`/`)), capture(nameComponentRegexp, optional(repeated(literal(`/`), nameComponentRegexp)))) @@ -68,6 +68,25 @@ var ( ReferenceRegexp = anchored(capture(NameRegexp), optional(literal(":"), capture(TagRegexp)), optional(literal("@"), capture(DigestRegexp))) + + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = match(`([a-f0-9]{64})`) + + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) + + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = anchored(IdentifierRegexp) + + // anchoredShortIdentifierRegexp is used to check if a value + // is a possible identifier prefix, anchored at start and end + // of string. + anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) ) // match compiles the string to a regular expression. diff --git a/src/vendor/github.com/docker/distribution/registry.go b/src/vendor/github.com/docker/distribution/registry.go index 1ede31ebb..6c3210989 100644 --- a/src/vendor/github.com/docker/distribution/registry.go +++ b/src/vendor/github.com/docker/distribution/registry.go @@ -1,7 +1,8 @@ package distribution import ( - "github.com/docker/distribution/context" + "context" + "github.com/docker/distribution/reference" ) @@ -35,7 +36,7 @@ type Namespace interface { // reference. Repository(ctx context.Context, name reference.Named) (Repository, error) - // Repositories fills 'repos' with a lexigraphically sorted catalog of repositories + // Repositories fills 'repos' with a lexicographically sorted catalog of repositories // up to the size of 'repos' and returns the value 'n' for the number of entries // which were filled. 'last' contains an offset in the catalog, and 'err' will be // set to io.EOF if there are no more entries to obtain. @@ -53,6 +54,11 @@ type RepositoryEnumerator interface { Enumerate(ctx context.Context, ingester func(string) error) error } +// RepositoryRemover removes given repository +type RepositoryRemover interface { + Remove(ctx context.Context, name reference.Named) error +} + // ManifestServiceOption is a function argument for Manifest Service methods type ManifestServiceOption interface { Apply(ManifestService) error @@ -72,6 +78,21 @@ func (o WithTagOption) Apply(m ManifestService) error { return nil } +// WithManifestMediaTypes lists the media types the client wishes +// the server to provide. +func WithManifestMediaTypes(mediaTypes []string) ManifestServiceOption { + return WithManifestMediaTypesOption{mediaTypes} +} + +// WithManifestMediaTypesOption holds a list of accepted media types +type WithManifestMediaTypesOption struct{ MediaTypes []string } + +// Apply conforms to the ManifestServiceOption interface +func (o WithManifestMediaTypesOption) Apply(m ManifestService) error { + // no implementation + return nil +} + // Repository is a named collection of manifests and layers. type Repository interface { // Named returns the name of the repository. diff --git a/src/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/src/vendor/github.com/docker/distribution/registry/api/errcode/handler.go index 49a64a86e..d77e70473 100644 --- a/src/vendor/github.com/docker/distribution/registry/api/errcode/handler.go +++ b/src/vendor/github.com/docker/distribution/registry/api/errcode/handler.go @@ -36,9 +36,5 @@ func ServeJSON(w http.ResponseWriter, err error) error { w.WriteHeader(sc) - if err := json.NewEncoder(w).Encode(err); err != nil { - return err - } - - return nil + return json.NewEncoder(w).Encode(err) } diff --git a/src/vendor/github.com/docker/distribution/registry/auth/auth.go b/src/vendor/github.com/docker/distribution/registry/auth/auth.go index 1c9af8821..835eff73d 100644 --- a/src/vendor/github.com/docker/distribution/registry/auth/auth.go +++ b/src/vendor/github.com/docker/distribution/registry/auth/auth.go @@ -21,7 +21,7 @@ // if ctx, err := accessController.Authorized(ctx, access); err != nil { // if challenge, ok := err.(auth.Challenge) { // // Let the challenge write the response. -// challenge.SetHeaders(w) +// challenge.SetHeaders(r, w) // w.WriteHeader(http.StatusUnauthorized) // return // } else { @@ -33,11 +33,10 @@ package auth import ( + "context" "errors" "fmt" "net/http" - - "github.com/docker/distribution/context" ) const ( @@ -88,7 +87,7 @@ type Challenge interface { // adding the an HTTP challenge header on the response message. Callers // are expected to set the appropriate HTTP status code (e.g. 401) // themselves. - SetHeaders(w http.ResponseWriter) + SetHeaders(r *http.Request, w http.ResponseWriter) } // AccessController controls access to registry resources based on a request diff --git a/src/vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go b/src/vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go index 4e8b7f1ce..fa924f0be 100644 --- a/src/vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go +++ b/src/vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go @@ -1,6 +1,7 @@ package token import ( + "context" "crypto" "crypto/x509" "encoding/pem" @@ -11,7 +12,7 @@ import ( "os" "strings" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/registry/auth" "github.com/docker/libtrust" ) @@ -75,10 +76,11 @@ var ( // authChallenge implements the auth.Challenge interface. type authChallenge struct { - err error - realm string - service string - accessSet accessSet + err error + realm string + autoRedirect bool + service string + accessSet accessSet } var _ auth.Challenge = authChallenge{} @@ -96,8 +98,14 @@ func (ac authChallenge) Status() int { // challengeParams constructs the value to be used in // the WWW-Authenticate response challenge header. // See https://tools.ietf.org/html/rfc6750#section-3 -func (ac authChallenge) challengeParams() string { - str := fmt.Sprintf("Bearer realm=%q,service=%q", ac.realm, ac.service) +func (ac authChallenge) challengeParams(r *http.Request) string { + var realm string + if ac.autoRedirect { + realm = fmt.Sprintf("https://%s/auth/token", r.Host) + } else { + realm = ac.realm + } + str := fmt.Sprintf("Bearer realm=%q,service=%q", realm, ac.service) if scope := ac.accessSet.scopeParam(); scope != "" { str = fmt.Sprintf("%s,scope=%q", str, scope) @@ -113,23 +121,25 @@ func (ac authChallenge) challengeParams() string { } // SetChallenge sets the WWW-Authenticate value for the response. -func (ac authChallenge) SetHeaders(w http.ResponseWriter) { - w.Header().Add("WWW-Authenticate", ac.challengeParams()) +func (ac authChallenge) SetHeaders(r *http.Request, w http.ResponseWriter) { + w.Header().Add("WWW-Authenticate", ac.challengeParams(r)) } // accessController implements the auth.AccessController interface. type accessController struct { - realm string - issuer string - service string - rootCerts *x509.CertPool - trustedKeys map[string]libtrust.PublicKey + realm string + autoRedirect bool + issuer string + service string + rootCerts *x509.CertPool + trustedKeys map[string]libtrust.PublicKey } // tokenAccessOptions is a convenience type for handling // options to the contstructor of an accessController. type tokenAccessOptions struct { realm string + autoRedirect bool issuer string service string rootCertBundle string @@ -152,6 +162,15 @@ func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) { opts.realm, opts.issuer, opts.service, opts.rootCertBundle = vals[0], vals[1], vals[2], vals[3] + autoRedirectVal, ok := options["autoredirect"] + if ok { + autoRedirect, ok := autoRedirectVal.(bool) + if !ok { + return opts, fmt.Errorf("token auth requires a valid option bool: autoredirect") + } + opts.autoRedirect = autoRedirect + } + return opts, nil } @@ -204,11 +223,12 @@ func newAccessController(options map[string]interface{}) (auth.AccessController, } return &accessController{ - realm: config.realm, - issuer: config.issuer, - service: config.service, - rootCerts: rootPool, - trustedKeys: trustedKeys, + realm: config.realm, + autoRedirect: config.autoRedirect, + issuer: config.issuer, + service: config.service, + rootCerts: rootPool, + trustedKeys: trustedKeys, }, nil } @@ -216,12 +236,13 @@ func newAccessController(options map[string]interface{}) (auth.AccessController, // for actions on resources described by the given access items. func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth.Access) (context.Context, error) { challenge := &authChallenge{ - realm: ac.realm, - service: ac.service, - accessSet: newAccessSet(accessItems...), + realm: ac.realm, + autoRedirect: ac.autoRedirect, + service: ac.service, + accessSet: newAccessSet(accessItems...), } - req, err := context.GetRequest(ctx) + req, err := dcontext.GetRequest(ctx) if err != nil { return nil, err } diff --git a/src/vendor/github.com/docker/distribution/registry/auth/token/token.go b/src/vendor/github.com/docker/distribution/registry/auth/token/token.go index 850f5813f..7f87d496f 100644 --- a/src/vendor/github.com/docker/distribution/registry/auth/token/token.go +++ b/src/vendor/github.com/docker/distribution/registry/auth/token/token.go @@ -10,8 +10,8 @@ import ( "strings" "time" - log "github.com/Sirupsen/logrus" "github.com/docker/libtrust" + log "github.com/sirupsen/logrus" "github.com/docker/distribution/registry/auth" ) diff --git a/src/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/src/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go index c9bdfc355..6e3f1ccc4 100644 --- a/src/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go +++ b/src/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go @@ -45,13 +45,13 @@ type Manager interface { // to a backend. func NewSimpleManager() Manager { return &simpleManager{ - Challanges: make(map[string][]Challenge), + Challenges: make(map[string][]Challenge), } } type simpleManager struct { sync.RWMutex - Challanges map[string][]Challenge + Challenges map[string][]Challenge } func normalizeURL(endpoint *url.URL) { @@ -64,7 +64,7 @@ func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { m.RLock() defer m.RUnlock() - challenges := m.Challanges[endpoint.String()] + challenges := m.Challenges[endpoint.String()] return challenges, nil } @@ -82,7 +82,7 @@ func (m *simpleManager) AddResponse(resp *http.Response) error { m.Lock() defer m.Unlock() - m.Challanges[urlCopy.String()] = challenges + m.Challenges[urlCopy.String()] = challenges return nil } diff --git a/src/vendor/github.com/docker/distribution/tags.go b/src/vendor/github.com/docker/distribution/tags.go index 503056596..f22df2b85 100644 --- a/src/vendor/github.com/docker/distribution/tags.go +++ b/src/vendor/github.com/docker/distribution/tags.go @@ -1,7 +1,7 @@ package distribution import ( - "github.com/docker/distribution/context" + "context" ) // TagService provides access to information about tagged objects. diff --git a/src/vendor/github.com/docker/distribution/vendor.conf b/src/vendor/github.com/docker/distribution/vendor.conf new file mode 100644 index 000000000..a249caf26 --- /dev/null +++ b/src/vendor/github.com/docker/distribution/vendor.conf @@ -0,0 +1,51 @@ +github.com/Azure/azure-sdk-for-go 4650843026a7fdec254a8d9cf893693a254edd0b +github.com/Azure/go-autorest eaa7994b2278094c904d31993d26f56324db3052 +github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4 +github.com/aws/aws-sdk-go f831d5a0822a1ad72420ab18c6269bca1ddaf490 +github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a +github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 +github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 +github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 +github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 +github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 +github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04 +github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab +github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 +github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 +github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c +github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 +github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b +github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604 +github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d +github.com/marstr/guid 8bd9a64bf37eb297b492a4101fb28e80ac0b290f +github.com/satori/go.uuid f58768cc1a7a7e77a3bd49e98cdd21419399b6a3 +github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c +github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 +github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef +github.com/ncw/swift a0320860b16212c2b59b4912bb6508cda1d7cee6 +github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564 +github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c +github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 +github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd +github.com/Shopify/logrus-bugsnag 577dee27f20dd8f1a529f82210094af593be12bd +github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 +github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 +github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985 +github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e +github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128 +github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6 +golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b +golang.org/x/net 4876518f9e71663000c348837735820161a42df7 +golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf +golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb +google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54 +google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19 +google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2 +google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994 +gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673 +gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b +gopkg.in/yaml.v2 v2.2.1 +rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git +github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb +github.com/opencontainers/image-spec ab7389ef9f50030c9b245bc16b981c7ddf192882 diff --git a/src/vendor/github.com/docker/notary/NOTARY_VERSION b/src/vendor/github.com/docker/notary/NOTARY_VERSION deleted file mode 100644 index 8f0916f76..000000000 --- a/src/vendor/github.com/docker/notary/NOTARY_VERSION +++ /dev/null @@ -1 +0,0 @@ -0.5.0 diff --git a/src/vendor/github.com/docker/notary/server.minimal.Dockerfile b/src/vendor/github.com/docker/notary/server.minimal.Dockerfile deleted file mode 100644 index 1b92b6eed..000000000 --- a/src/vendor/github.com/docker/notary/server.minimal.Dockerfile +++ /dev/null @@ -1,19 +0,0 @@ -FROM busybox:latest -MAINTAINER David Lawrence "david.lawrence@docker.com" - -# the ln is for compatibility with the docker-compose.yml, making these -# images a straight swap for the those built in the compose file. -RUN mkdir -p /usr/bin /var/lib && ln -s /bin/env /usr/bin/env - -COPY ./bin/notary-server /usr/bin/notary-server -COPY ./bin/migrate /usr/bin/migrate -COPY ./bin/ld-musl-x86_64.so.1 /lib/ld-musl-x86_64.so.1 -COPY ./fixtures /var/lib/notary/fixtures -COPY ./migrations /var/lib/notary/migrations - -WORKDIR /var/lib/notary -ENV SERVICE_NAME=notary_server -EXPOSE 4443 - -ENTRYPOINT [ "/usr/bin/notary-server" ] -CMD [ "-config=/var/lib/notary/fixtures/server-config-local.json" ] diff --git a/src/vendor/github.com/docker/notary/signer.minimal.Dockerfile b/src/vendor/github.com/docker/notary/signer.minimal.Dockerfile deleted file mode 100644 index cb4033dc8..000000000 --- a/src/vendor/github.com/docker/notary/signer.minimal.Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -FROM busybox:latest -MAINTAINER David Lawrence "david.lawrence@docker.com" - -# the ln is for compatibility with the docker-compose.yml, making these -# images a straight swap for the those built in the compose file. -RUN mkdir -p /usr/bin /var/lib && ln -s /bin/env /usr/bin/env - -COPY ./bin/notary-signer /usr/bin/notary-signer -COPY ./bin/migrate /usr/bin/migrate -COPY ./bin/ld-musl-x86_64.so.1 /lib/ld-musl-x86_64.so.1 -COPY ./fixtures /var/lib/notary/fixtures -COPY ./migrations /var/lib/notary/migrations - -WORKDIR /var/lib/notary -ENV SERVICE_NAME=notary_signer -ENV NOTARY_SIGNER_DEFAULT_ALIAS="timestamp_1" -ENV NOTARY_SIGNER_TIMESTAMP_1="testpassword" - -ENTRYPOINT [ "/usr/bin/notary-signer" ] -CMD [ "-config=/var/lib/notary/fixtures/signer-config-local.json" ] diff --git a/src/vendor/github.com/docker/notary/vendor.conf b/src/vendor/github.com/docker/notary/vendor.conf deleted file mode 100644 index f949db07e..000000000 --- a/src/vendor/github.com/docker/notary/vendor.conf +++ /dev/null @@ -1,30 +0,0 @@ -github.com/Shopify/logrus-bugsnag 5a46080c635f13e8b60c24765c19d62e1ca8d0fb -github.com/Sirupsen/logrus 6d9ae300aaf85d6acd2e5424081c7fcddb21dab8 -github.com/agl/ed25519 278e1ec8e8a6e017cd07577924d6766039146ced -github.com/bugsnag/bugsnag-go 13fd6b8acda029830ef9904df6b63be0a83369d0 -github.com/coreos/etcd 6acb3d67fbe131b3b2d5d010e00ec80182be4628 -github.com/docker/distribution v2.6.0 -github.com/docker/go-connections f549a9393d05688dff0992ef3efd8bbe6c628aeb -github.com/docker/go/canonical d30aec9fd63c35133f8f79c3412ad91a3b08be06 -github.com/dvsekhvalnov/jose2go v1.2 -github.com/go-sql-driver/mysql 0cc29e9fe8e25c2c58cf47bcab566e029bbaa88b -github.com/golang/protobuf c3cefd437628a0b7d31b34fe44b3a7a540e98527 -github.com/gorilla/mux e444e69cbd2e2e3e0749a2f3c717cec491552bbf -github.com/jinzhu/gorm 82d726bbfd8cefbe2dcdc7f7f0484551c0d40433 -github.com/lib/pq 0dad96c0b94f8dee039aa40467f767467392a0af -github.com/mattn/go-sqlite3 v1.0.0 -github.com/miekg/pkcs11 ba39b9c6300b7e0be41b115330145ef8afdff7d6 -github.com/mitchellh/go-homedir df55a15e5ce646808815381b3db47a8c66ea62f4 -github.com/prometheus/client_golang 449ccefff16c8e2b7229f6be1921ba22f62461fe -github.com/golang/protobuf c3cefd437628a0b7d31b34fe44b3a7a540e98527 -github.com/spf13/cobra f368244301305f414206f889b1735a54cfc8bde8 -github.com/spf13/viper be5ff3e4840cf692388bde7a057595a474ef379e -github.com/stretchr/testify 089c7181b8c728499929ff09b62d3fdd8df8adff -golang.org/x/crypto 5bcd134fee4dd1475da17714aac19c0aa0142e2f -golang.org/x/net 6a513affb38dc9788b449d59ffed099b8de18fa0 -google.golang.org/grpc v1.0.5 - -gopkg.in/dancannon/gorethink.v3 v3.0.0 -# dependencies of gorethink.v3 -gopkg.in/gorethink/gorethink.v2 v2.2.2 -github.com/cenk/backoff v1.0.0 diff --git a/src/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE b/src/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE new file mode 100644 index 000000000..14127cd83 --- /dev/null +++ b/src/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE @@ -0,0 +1,9 @@ +(The MIT License) + +Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md b/src/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md new file mode 100644 index 000000000..195333e51 --- /dev/null +++ b/src/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md @@ -0,0 +1,41 @@ +# Windows Terminal Sequences + +This library allow for enabling Windows terminal color support for Go. + +See [Console Virtual Terminal Sequences](https://docs.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences) for details. + +## Usage + +```go +import ( + "syscall" + + sequences "github.com/konsorten/go-windows-terminal-sequences" +) + +func main() { + sequences.EnableVirtualTerminalProcessing(syscall.Stdout, true) +} + +``` + +## Authors + +The tool is sponsored by the [marvin + konsorten GmbH](http://www.konsorten.de). + +We thank all the authors who provided code to this library: + +* Felix Kollmann +* Nicolas Perraut + +## License + +(The MIT License) + +Copyright (c) 2018 marvin + konsorten GmbH (open-source@konsorten.de) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod b/src/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod new file mode 100644 index 000000000..716c61312 --- /dev/null +++ b/src/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod @@ -0,0 +1 @@ +module github.com/konsorten/go-windows-terminal-sequences diff --git a/src/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go b/src/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go new file mode 100644 index 000000000..ef18d8f97 --- /dev/null +++ b/src/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go @@ -0,0 +1,36 @@ +// +build windows + +package sequences + +import ( + "syscall" + "unsafe" +) + +var ( + kernel32Dll *syscall.LazyDLL = syscall.NewLazyDLL("Kernel32.dll") + setConsoleMode *syscall.LazyProc = kernel32Dll.NewProc("SetConsoleMode") +) + +func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error { + const ENABLE_VIRTUAL_TERMINAL_PROCESSING uint32 = 0x4 + + var mode uint32 + err := syscall.GetConsoleMode(syscall.Stdout, &mode) + if err != nil { + return err + } + + if enable { + mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING + } else { + mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING + } + + ret, _, err := setConsoleMode.Call(uintptr(unsafe.Pointer(stream)), uintptr(mode)) + if ret == 0 { + return err + } + + return nil +} diff --git a/src/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go b/src/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go new file mode 100644 index 000000000..df61a6f2f --- /dev/null +++ b/src/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go @@ -0,0 +1,11 @@ +// +build linux darwin + +package sequences + +import ( + "fmt" +) + +func EnableVirtualTerminalProcessing(stream uintptr, enable bool) error { + return fmt.Errorf("windows only package") +} diff --git a/src/vendor/github.com/opencontainers/image-spec/LICENSE b/src/vendor/github.com/opencontainers/image-spec/LICENSE new file mode 100644 index 000000000..9fdc20fdb --- /dev/null +++ b/src/vendor/github.com/opencontainers/image-spec/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2016 The Linux Foundation. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go new file mode 100644 index 000000000..35d810895 --- /dev/null +++ b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go @@ -0,0 +1,56 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // AnnotationCreated is the annotation key for the date and time on which the image was built (date-time string as defined by RFC 3339). + AnnotationCreated = "org.opencontainers.image.created" + + // AnnotationAuthors is the annotation key for the contact details of the people or organization responsible for the image (freeform string). + AnnotationAuthors = "org.opencontainers.image.authors" + + // AnnotationURL is the annotation key for the URL to find more information on the image. + AnnotationURL = "org.opencontainers.image.url" + + // AnnotationDocumentation is the annotation key for the URL to get documentation on the image. + AnnotationDocumentation = "org.opencontainers.image.documentation" + + // AnnotationSource is the annotation key for the URL to get source code for building the image. + AnnotationSource = "org.opencontainers.image.source" + + // AnnotationVersion is the annotation key for the version of the packaged software. + // The version MAY match a label or tag in the source code repository. + // The version MAY be Semantic versioning-compatible. + AnnotationVersion = "org.opencontainers.image.version" + + // AnnotationRevision is the annotation key for the source control revision identifier for the packaged software. + AnnotationRevision = "org.opencontainers.image.revision" + + // AnnotationVendor is the annotation key for the name of the distributing entity, organization or individual. + AnnotationVendor = "org.opencontainers.image.vendor" + + // AnnotationLicenses is the annotation key for the license(s) under which contained software is distributed as an SPDX License Expression. + AnnotationLicenses = "org.opencontainers.image.licenses" + + // AnnotationRefName is the annotation key for the name of the reference for a target. + // SHOULD only be considered valid when on descriptors on `index.json` within image layout. + AnnotationRefName = "org.opencontainers.image.ref.name" + + // AnnotationTitle is the annotation key for the human-readable title of the image. + AnnotationTitle = "org.opencontainers.image.title" + + // AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image. + AnnotationDescription = "org.opencontainers.image.description" +) diff --git a/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go new file mode 100644 index 000000000..fe799bd69 --- /dev/null +++ b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go @@ -0,0 +1,103 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "time" + + digest "github.com/opencontainers/go-digest" +) + +// ImageConfig defines the execution parameters which should be used as a base when running a container using an image. +type ImageConfig struct { + // User defines the username or UID which the process in the container should run as. + User string `json:"User,omitempty"` + + // ExposedPorts a set of ports to expose from a container running this image. + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + + // Env is a list of environment variables to be used in a container. + Env []string `json:"Env,omitempty"` + + // Entrypoint defines a list of arguments to use as the command to execute when the container starts. + Entrypoint []string `json:"Entrypoint,omitempty"` + + // Cmd defines the default arguments to the entrypoint of the container. + Cmd []string `json:"Cmd,omitempty"` + + // Volumes is a set of directories describing where the process is likely write data specific to a container instance. + Volumes map[string]struct{} `json:"Volumes,omitempty"` + + // WorkingDir sets the current working directory of the entrypoint process in the container. + WorkingDir string `json:"WorkingDir,omitempty"` + + // Labels contains arbitrary metadata for the container. + Labels map[string]string `json:"Labels,omitempty"` + + // StopSignal contains the system call signal that will be sent to the container to exit. + StopSignal string `json:"StopSignal,omitempty"` +} + +// RootFS describes a layer content addresses +type RootFS struct { + // Type is the type of the rootfs. + Type string `json:"type"` + + // DiffIDs is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. + DiffIDs []digest.Digest `json:"diff_ids"` +} + +// History describes the history of a layer. +type History struct { + // Created is the combined date and time at which the layer was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // CreatedBy is the command which created the layer. + CreatedBy string `json:"created_by,omitempty"` + + // Author is the author of the build point. + Author string `json:"author,omitempty"` + + // Comment is a custom message set when creating the layer. + Comment string `json:"comment,omitempty"` + + // EmptyLayer is used to mark if the history item created a filesystem diff. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Image is the JSON structure which describes some basic information about the image. +// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. +type Image struct { + // Created is the combined date and time at which the image was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. + Author string `json:"author,omitempty"` + + // Architecture is the CPU architecture which the binaries in this image are built to run on. + Architecture string `json:"architecture"` + + // OS is the name of the operating system which the image is built to run on. + OS string `json:"os"` + + // Config defines the execution parameters which should be used as a base when running a container using the image. + Config ImageConfig `json:"config,omitempty"` + + // RootFS references the layer content addresses used by the image. + RootFS RootFS `json:"rootfs"` + + // History describes the history of each layer. + History []History `json:"history,omitempty"` +} diff --git a/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go new file mode 100644 index 000000000..6e442a085 --- /dev/null +++ b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go @@ -0,0 +1,64 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import digest "github.com/opencontainers/go-digest" + +// Descriptor describes the disposition of targeted content. +// This structure provides `application/vnd.oci.descriptor.v1+json` mediatype +// when marshalled to JSON. +type Descriptor struct { + // MediaType is the media type of the object this schema refers to. + MediaType string `json:"mediaType,omitempty"` + + // Digest is the digest of the targeted content. + Digest digest.Digest `json:"digest"` + + // Size specifies the size in bytes of the blob. + Size int64 `json:"size"` + + // URLs specifies a list of URLs from which this object MAY be downloaded + URLs []string `json:"urls,omitempty"` + + // Annotations contains arbitrary metadata relating to the targeted content. + Annotations map[string]string `json:"annotations,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + // + // This should only be used when referring to a manifest. + Platform *Platform `json:"platform,omitempty"` +} + +// Platform describes the platform which the image in the manifest runs on. +type Platform struct { + // Architecture field specifies the CPU architecture, for example + // `amd64` or `ppc64`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system + // version, for example on Windows `10.0.14393.1066`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Variant is an optional field specifying a variant of the CPU, for + // example `v7` to specify ARMv7 when architecture is `arm`. + Variant string `json:"variant,omitempty"` +} diff --git a/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go new file mode 100644 index 000000000..4e6c4b236 --- /dev/null +++ b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import "github.com/opencontainers/image-spec/specs-go" + +// Index references manifests for various platforms. +// This structure provides `application/vnd.oci.image.index.v1+json` mediatype when marshalled to JSON. +type Index struct { + specs.Versioned + + // Manifests references platform specific manifests. + Manifests []Descriptor `json:"manifests"` + + // Annotations contains arbitrary metadata for the image index. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go new file mode 100644 index 000000000..fc79e9e0d --- /dev/null +++ b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go @@ -0,0 +1,28 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // ImageLayoutFile is the file name of oci image layout file + ImageLayoutFile = "oci-layout" + // ImageLayoutVersion is the version of ImageLayout + ImageLayoutVersion = "1.0.0" +) + +// ImageLayout is the structure in the "oci-layout" file, found in the root +// of an OCI Image-layout directory. +type ImageLayout struct { + Version string `json:"imageLayoutVersion"` +} diff --git a/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go new file mode 100644 index 000000000..7ff32c40b --- /dev/null +++ b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import "github.com/opencontainers/image-spec/specs-go" + +// Manifest provides `application/vnd.oci.image.manifest.v1+json` mediatype structure when marshalled to JSON. +type Manifest struct { + specs.Versioned + + // Config references a configuration object for a container, by digest. + // The referenced configuration object is a JSON blob that the runtime uses to set up the container. + Config Descriptor `json:"config"` + + // Layers is an indexed list of layers referenced by the manifest. + Layers []Descriptor `json:"layers"` + + // Annotations contains arbitrary metadata for the image manifest. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go new file mode 100644 index 000000000..bad7bb97f --- /dev/null +++ b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -0,0 +1,48 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // MediaTypeDescriptor specifies the media type for a content descriptor. + MediaTypeDescriptor = "application/vnd.oci.descriptor.v1+json" + + // MediaTypeLayoutHeader specifies the media type for the oci-layout. + MediaTypeLayoutHeader = "application/vnd.oci.layout.header.v1+json" + + // MediaTypeImageManifest specifies the media type for an image manifest. + MediaTypeImageManifest = "application/vnd.oci.image.manifest.v1+json" + + // MediaTypeImageIndex specifies the media type for an image index. + MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" + + // MediaTypeImageLayer is the media type used for layers referenced by the manifest. + MediaTypeImageLayer = "application/vnd.oci.image.layer.v1.tar" + + // MediaTypeImageLayerGzip is the media type used for gzipped layers + // referenced by the manifest. + MediaTypeImageLayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip" + + // MediaTypeImageLayerNonDistributable is the media type for layers referenced by + // the manifest but with distribution restrictions. + MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" + + // MediaTypeImageLayerNonDistributableGzip is the media type for + // gzipped layers referenced by the manifest but with distribution + // restrictions. + MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + + // MediaTypeImageConfig specifies the media type for the image configuration. + MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" +) diff --git a/src/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/src/vendor/github.com/opencontainers/image-spec/specs-go/version.go new file mode 100644 index 000000000..5d493df23 --- /dev/null +++ b/src/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specs + +import "fmt" + +const ( + // VersionMajor is for an API incompatible changes + VersionMajor = 1 + // VersionMinor is for functionality in a backwards-compatible manner + VersionMinor = 0 + // VersionPatch is for backwards-compatible bug fixes + VersionPatch = 1 + + // VersionDev indicates development branch. Releases will be empty string. + VersionDev = "" +) + +// Version is the specification version that the package types support. +var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/src/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go b/src/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go new file mode 100644 index 000000000..58a1510f3 --- /dev/null +++ b/src/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go @@ -0,0 +1,23 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specs + +// Versioned provides a struct with the manifest schemaVersion and mediaType. +// Incoming content with unknown schema version can be decoded against this +// struct to check the version. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` +} diff --git a/src/vendor/github.com/Sirupsen/logrus/.gitignore b/src/vendor/github.com/sirupsen/logrus/.gitignore similarity index 50% rename from src/vendor/github.com/Sirupsen/logrus/.gitignore rename to src/vendor/github.com/sirupsen/logrus/.gitignore index 66be63a00..6b7d7d1e8 100644 --- a/src/vendor/github.com/Sirupsen/logrus/.gitignore +++ b/src/vendor/github.com/sirupsen/logrus/.gitignore @@ -1 +1,2 @@ logrus +vendor diff --git a/src/vendor/github.com/sirupsen/logrus/.travis.yml b/src/vendor/github.com/sirupsen/logrus/.travis.yml new file mode 100644 index 000000000..7e54dc6e3 --- /dev/null +++ b/src/vendor/github.com/sirupsen/logrus/.travis.yml @@ -0,0 +1,21 @@ +language: go +go_import_path: github.com/sirupsen/logrus +git: + depth: 1 +env: + - GO111MODULE=on + - GO111MODULE=off +go: [ 1.10.x, 1.11.x, 1.12.x ] +os: [ linux, osx, windows ] +matrix: + exclude: + - env: GO111MODULE=on + go: 1.10.x +install: + - if [[ "$GO111MODULE" == "on" ]]; then go mod download; fi + - if [[ "$GO111MODULE" == "off" ]]; then go get github.com/stretchr/testify/assert golang.org/x/sys/unix github.com/konsorten/go-windows-terminal-sequences; fi +script: + - export GOMAXPROCS=4 + - export GORACE=halt_on_error=1 + - go test -race -v ./... + - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then go test -race -v -tags appengine ./... ; fi diff --git a/src/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/src/vendor/github.com/sirupsen/logrus/CHANGELOG.md new file mode 100644 index 000000000..f62cbd24a --- /dev/null +++ b/src/vendor/github.com/sirupsen/logrus/CHANGELOG.md @@ -0,0 +1,198 @@ +# 1.4.1 +This new release introduces: + * Enhance TextFormatter to not print caller information when they are empty (#944) + * Remove dependency on golang.org/x/crypto (#932, #943) + +Fixes: + * Fix Entry.WithContext method to return a copy of the initial entry (#941) + +# 1.4.0 +This new release introduces: + * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). + * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter (#909, #911) + * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). + +Fixes: + * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893). + * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903) + * Fix infinite recursion on unknown `Level.String()` (#907) + * Fix race condition in `getCaller` (#916). + + +# 1.3.0 +This new release introduces: + * Log, Logf, Logln functions for Logger and Entry that take a Level + +Fixes: + * Building prometheus node_exporter on AIX (#840) + * Race condition in TextFormatter (#468) + * Travis CI import path (#868) + * Remove coloured output on Windows (#862) + * Pointer to func as field in JSONFormatter (#870) + * Properly marshal Levels (#873) + +# 1.2.0 +This new release introduces: + * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued + * A new trace level named `Trace` whose level is below `Debug` + * A configurable exit function to be called upon a Fatal trace + * The `Level` object now implements `encoding.TextUnmarshaler` interface + +# 1.1.1 +This is a bug fix release. + * fix the build break on Solaris + * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized + +# 1.1.0 +This new release introduces: + * several fixes: + * a fix for a race condition on entry formatting + * proper cleanup of previously used entries before putting them back in the pool + * the extra new line at the end of message in text formatter has been removed + * a new global public API to check if a level is activated: IsLevelEnabled + * the following methods have been added to the Logger object + * IsLevelEnabled + * SetFormatter + * SetOutput + * ReplaceHooks + * introduction of go module + * an indent configuration for the json formatter + * output colour support for windows + * the field sort function is now configurable for text formatter + * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater + +# 1.0.6 + +This new release introduces: + * a new api WithTime which allows to easily force the time of the log entry + which is mostly useful for logger wrapper + * a fix reverting the immutability of the entry given as parameter to the hooks + a new configuration field of the json formatter in order to put all the fields + in a nested dictionnary + * a new SetOutput method in the Logger + * a new configuration of the textformatter to configure the name of the default keys + * a new configuration of the text formatter to disable the level truncation + +# 1.0.5 + +* Fix hooks race (#707) +* Fix panic deadlock (#695) + +# 1.0.4 + +* Fix race when adding hooks (#612) +* Fix terminal check in AppEngine (#635) + +# 1.0.3 + +* Replace example files with testable examples + +# 1.0.2 + +* bug: quote non-string values in text formatter (#583) +* Make (*Logger) SetLevel a public method + +# 1.0.1 + +* bug: fix escaping in text formatter (#575) + +# 1.0.0 + +* Officially changed name to lower-case +* bug: colors on Windows 10 (#541) +* bug: fix race in accessing level (#512) + +# 0.11.5 + +* feature: add writer and writerlevel to entry (#372) + +# 0.11.4 + +* bug: fix undefined variable on solaris (#493) + +# 0.11.3 + +* formatter: configure quoting of empty values (#484) +* formatter: configure quoting character (default is `"`) (#484) +* bug: fix not importing io correctly in non-linux environments (#481) + +# 0.11.2 + +* bug: fix windows terminal detection (#476) + +# 0.11.1 + +* bug: fix tty detection with custom out (#471) + +# 0.11.0 + +* performance: Use bufferpool to allocate (#370) +* terminal: terminal detection for app-engine (#343) +* feature: exit handler (#375) + +# 0.10.0 + +* feature: Add a test hook (#180) +* feature: `ParseLevel` is now case-insensitive (#326) +* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) +* performance: avoid re-allocations on `WithFields` (#335) + +# 0.9.0 + +* logrus/text_formatter: don't emit empty msg +* logrus/hooks/airbrake: move out of main repository +* logrus/hooks/sentry: move out of main repository +* logrus/hooks/papertrail: move out of main repository +* logrus/hooks/bugsnag: move out of main repository +* logrus/core: run tests with `-race` +* logrus/core: detect TTY based on `stderr` +* logrus/core: support `WithError` on logger +* logrus/core: Solaris support + +# 0.8.7 + +* logrus/core: fix possible race (#216) +* logrus/doc: small typo fixes and doc improvements + + +# 0.8.6 + +* hooks/raven: allow passing an initialized client + +# 0.8.5 + +* logrus/core: revert #208 + +# 0.8.4 + +* formatter/text: fix data race (#218) + +# 0.8.3 + +* logrus/core: fix entry log level (#208) +* logrus/core: improve performance of text formatter by 40% +* logrus/core: expose `LevelHooks` type +* logrus/core: add support for DragonflyBSD and NetBSD +* formatter/text: print structs more verbosely + +# 0.8.2 + +* logrus: fix more Fatal family functions + +# 0.8.1 + +* logrus: fix not exiting on `Fatalf` and `Fatalln` + +# 0.8.0 + +* logrus: defaults to stderr instead of stdout +* hooks/sentry: add special field for `*http.Request` +* formatter/text: ignore Windows for colors + +# 0.7.3 + +* formatter/\*: allow configuration of timestamp layout + +# 0.7.2 + +* formatter/text: Add configuration option for time format (#158) diff --git a/src/vendor/github.com/Sirupsen/logrus/LICENSE b/src/vendor/github.com/sirupsen/logrus/LICENSE similarity index 100% rename from src/vendor/github.com/Sirupsen/logrus/LICENSE rename to src/vendor/github.com/sirupsen/logrus/LICENSE diff --git a/src/vendor/github.com/Sirupsen/logrus/README.md b/src/vendor/github.com/sirupsen/logrus/README.md similarity index 71% rename from src/vendor/github.com/Sirupsen/logrus/README.md rename to src/vendor/github.com/sirupsen/logrus/README.md index f77819b16..a4796eb07 100644 --- a/src/vendor/github.com/Sirupsen/logrus/README.md +++ b/src/vendor/github.com/sirupsen/logrus/README.md @@ -56,8 +56,39 @@ time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -exit status 1 ``` +To ensure this behaviour even if a TTY is attached, set your formatter as follows: + +```go + log.SetFormatter(&log.TextFormatter{ + DisableColors: true, + FullTimestamp: true, + }) +``` + +#### Logging Method Name + +If you wish to add the calling method as a field, instruct the logger via: +```go +log.SetReportCaller(true) +``` +This adds the caller as 'method' like so: + +```json +{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", +"time":"2014-03-10 19:57:38.562543129 -0400 EDT"} +``` + +```text +time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin +``` +Note that this does add measurable overhead - the cost will depend on the version of Go, but is +between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your +environment via benchmarks: +``` +go test -bench=.*CallerTracing +``` + #### Case-sensitivity @@ -241,66 +272,15 @@ func init() { ``` Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). -| Hook | Description | -| ----- | ----------- | -| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | -| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) | -| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) | -| [Application Insights](https://github.com/jjcollinge/logrus-appinsights) | Hook for logging to [Application Insights](https://azure.microsoft.com/en-us/services/application-insights/) -| [AzureTableHook](https://github.com/kpfaulkner/azuretablehook/) | Hook for logging to Azure Table Storage| -| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | -| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) | -| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch| -| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/) -| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | -| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) | -| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | -| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | -| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | -| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | -| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) | -| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [KafkaLogrus](https://github.com/tracer0tong/kafkalogrus) | Hook for logging to Kafka | -| [Kafka REST Proxy](https://github.com/Nordstrom/logrus-kafka-rest-proxy) | Hook for logging to [Kafka REST Proxy](https://docs.confluent.io/current/kafka-rest/docs) | -| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | -| [Logbeat](https://github.com/macandmia/logbeat) | Hook for logging to [Opbeat](https://opbeat.com/) | -| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) | -| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) | -| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) | -| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | -| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | -| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | -| [Mattermost](https://github.com/shuLhan/mattermost-integration/tree/master/hooks/logrus) | Hook for logging to [Mattermost](https://mattermost.com/) | -| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | -| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) | -| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | -| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | -| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) | -| [Promrus](https://github.com/weaveworks/promrus) | Expose number of log messages as [Prometheus](https://prometheus.io/) metrics | -| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) | -| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | -| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | -| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | -| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)| -| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | -| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | -| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) | -| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| -| [Syslog](https://github.com/sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. | -| [Telegram](https://github.com/rossmcdonald/telegram_hook) | Hook for logging errors to [Telegram](https://telegram.org/) | -| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) | -| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | -| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash | -| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) | +A list of currently known of service hook can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) + #### Level logging -Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. +Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. ```go +log.Trace("Something very low level.") log.Debug("Useful debugging information.") log.Info("Something noteworthy happened!") log.Warn("You should probably take a look at this.") @@ -372,6 +352,8 @@ The built-in logging formatters are: field to `true`. To force no colored output even if there is a TTY set the `DisableColors` field to `true`. For Windows, see [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). + * When colors are enabled, levels are truncated to 4 characters by default. To disable + truncation set the `DisableLevelTruncation` field to `true`. * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). * `logrus.JSONFormatter`. Logs fields as JSON. * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). @@ -379,9 +361,11 @@ The built-in logging formatters are: Third party logging formatters: * [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. +* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). * [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. * [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. * [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. +* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. You can define your formatter by implementing the `Formatter` interface, requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a @@ -495,7 +479,7 @@ logrus.RegisterExitHandler(handler) #### Thread safety -By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs. +By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. Situation when locking is not needed includes: diff --git a/src/vendor/github.com/Sirupsen/logrus/alt_exit.go b/src/vendor/github.com/sirupsen/logrus/alt_exit.go similarity index 74% rename from src/vendor/github.com/Sirupsen/logrus/alt_exit.go rename to src/vendor/github.com/sirupsen/logrus/alt_exit.go index 8af90637a..8fd189e1c 100644 --- a/src/vendor/github.com/Sirupsen/logrus/alt_exit.go +++ b/src/vendor/github.com/sirupsen/logrus/alt_exit.go @@ -51,9 +51,9 @@ func Exit(code int) { os.Exit(code) } -// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke -// all handlers. The handlers will also be invoked when any Fatal log entry is -// made. +// RegisterExitHandler appends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. // // This method is useful when a caller wishes to use logrus to log a fatal // message but also needs to gracefully shutdown. An example usecase could be @@ -62,3 +62,15 @@ func Exit(code int) { func RegisterExitHandler(handler func()) { handlers = append(handlers, handler) } + +// DeferExitHandler prepends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func DeferExitHandler(handler func()) { + handlers = append([]func(){handler}, handlers...) +} diff --git a/src/vendor/github.com/Sirupsen/logrus/appveyor.yml b/src/vendor/github.com/sirupsen/logrus/appveyor.yml similarity index 100% rename from src/vendor/github.com/Sirupsen/logrus/appveyor.yml rename to src/vendor/github.com/sirupsen/logrus/appveyor.yml diff --git a/src/vendor/github.com/Sirupsen/logrus/doc.go b/src/vendor/github.com/sirupsen/logrus/doc.go similarity index 100% rename from src/vendor/github.com/Sirupsen/logrus/doc.go rename to src/vendor/github.com/sirupsen/logrus/doc.go diff --git a/src/vendor/github.com/sirupsen/logrus/entry.go b/src/vendor/github.com/sirupsen/logrus/entry.go new file mode 100644 index 000000000..63e25583c --- /dev/null +++ b/src/vendor/github.com/sirupsen/logrus/entry.go @@ -0,0 +1,407 @@ +package logrus + +import ( + "bytes" + "context" + "fmt" + "os" + "reflect" + "runtime" + "strings" + "sync" + "time" +) + +var ( + bufferPool *sync.Pool + + // qualified package name, cached at first use + logrusPackage string + + // Positions in the call stack when tracing to report the calling method + minimumCallerDepth int + + // Used for caller information initialisation + callerInitOnce sync.Once +) + +const ( + maximumCallerDepth int = 25 + knownLogrusFrames int = 4 +) + +func init() { + bufferPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + } + + // start at the bottom of the stack before the package-name cache is primed + minimumCallerDepth = 1 +} + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Trace, Debug, +// Info, Warn, Error, Fatal or Panic is called on it. These objects can be +// reused and passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic + // This field will be set on entry firing and the value will be equal to the one in Logger struct field. + Level Level + + // Calling method, with package name + Caller *runtime.Frame + + // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic + Message string + + // When formatter is called in entry.log(), a Buffer may be set to entry + Buffer *bytes.Buffer + + // Contains the context set by the user. Useful for hook processing etc. + Context context.Context + + // err may contain a field formatting error + err string +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, plus one optional. Give a little extra room. + Data: make(Fields, 6), + } +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + return "", err + } + str := string(serialized) + return str, nil +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a context to the Entry. +func (entry *Entry) WithContext(ctx context.Context) *Entry { + return &Entry{Logger: entry.Logger, Data: entry.Data, Time: entry.Time, err: entry.err, Context: ctx} +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := make(Fields, len(entry.Data)+len(fields)) + for k, v := range entry.Data { + data[k] = v + } + fieldErr := entry.err + for k, v := range fields { + isErrField := false + if t := reflect.TypeOf(v); t != nil { + switch t.Kind() { + case reflect.Func: + isErrField = true + case reflect.Ptr: + isErrField = t.Elem().Kind() == reflect.Func + } + } + if isErrField { + tmp := fmt.Sprintf("can not add field %q", k) + if fieldErr != "" { + fieldErr = entry.err + ", " + tmp + } else { + fieldErr = tmp + } + } else { + data[k] = v + } + } + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} +} + +// Overrides the time of the Entry. +func (entry *Entry) WithTime(t time.Time) *Entry { + return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t, err: entry.err, Context: entry.Context} +} + +// getPackageName reduces a fully qualified function name to the package name +// There really ought to be to be a better way... +func getPackageName(f string) string { + for { + lastPeriod := strings.LastIndex(f, ".") + lastSlash := strings.LastIndex(f, "/") + if lastPeriod > lastSlash { + f = f[:lastPeriod] + } else { + break + } + } + + return f +} + +// getCaller retrieves the name of the first non-logrus calling function +func getCaller() *runtime.Frame { + + // cache this package's fully-qualified name + callerInitOnce.Do(func() { + pcs := make([]uintptr, 2) + _ = runtime.Callers(0, pcs) + logrusPackage = getPackageName(runtime.FuncForPC(pcs[1]).Name()) + + // now that we have the cache, we can skip a minimum count of known-logrus functions + // XXX this is dubious, the number of frames may vary + minimumCallerDepth = knownLogrusFrames + }) + + // Restrict the lookback frames to avoid runaway lookups + pcs := make([]uintptr, maximumCallerDepth) + depth := runtime.Callers(minimumCallerDepth, pcs) + frames := runtime.CallersFrames(pcs[:depth]) + + for f, again := frames.Next(); again; f, again = frames.Next() { + pkg := getPackageName(f.Function) + + // If the caller isn't part of this package, we're done + if pkg != logrusPackage { + return &f + } + } + + // if we got here, we failed to find the caller's context + return nil +} + +func (entry Entry) HasCaller() (has bool) { + return entry.Logger != nil && + entry.Logger.ReportCaller && + entry.Caller != nil +} + +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) log(level Level, msg string) { + var buffer *bytes.Buffer + + // Default to now, but allow users to override if they want. + // + // We don't have to worry about polluting future calls to Entry#log() + // with this assignment because this function is declared with a + // non-pointer receiver. + if entry.Time.IsZero() { + entry.Time = time.Now() + } + + entry.Level = level + entry.Message = msg + if entry.Logger.ReportCaller { + entry.Caller = getCaller() + } + + entry.fireHooks() + + buffer = bufferPool.Get().(*bytes.Buffer) + buffer.Reset() + defer bufferPool.Put(buffer) + entry.Buffer = buffer + + entry.write() + + entry.Buffer = nil + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(&entry) + } +} + +func (entry *Entry) fireHooks() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + err := entry.Logger.Hooks.Fire(entry.Level, entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + } +} + +func (entry *Entry) write() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + } else { + _, err = entry.Logger.Out.Write(serialized) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } + } +} + +func (entry *Entry) Log(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.log(level, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Trace(args ...interface{}) { + entry.Log(TraceLevel, args...) +} + +func (entry *Entry) Debug(args ...interface{}) { + entry.Log(DebugLevel, args...) +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + entry.Log(InfoLevel, args...) +} + +func (entry *Entry) Warn(args ...interface{}) { + entry.Log(WarnLevel, args...) +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + entry.Log(ErrorLevel, args...) +} + +func (entry *Entry) Fatal(args ...interface{}) { + entry.Log(FatalLevel, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + entry.Log(PanicLevel, args...) + panic(fmt.Sprint(args...)) +} + +// Entry Printf family functions + +func (entry *Entry) Logf(level Level, format string, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Tracef(format string, args ...interface{}) { + entry.Logf(TraceLevel, format, args...) +} + +func (entry *Entry) Debugf(format string, args ...interface{}) { + entry.Logf(DebugLevel, format, args...) +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + entry.Logf(InfoLevel, format, args...) +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + entry.Logf(WarnLevel, format, args...) +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + entry.Logf(ErrorLevel, format, args...) +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + entry.Logf(FatalLevel, format, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + entry.Logf(PanicLevel, format, args...) +} + +// Entry Println family functions + +func (entry *Entry) Logln(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Traceln(args ...interface{}) { + entry.Logln(TraceLevel, args...) +} + +func (entry *Entry) Debugln(args ...interface{}) { + entry.Logln(DebugLevel, args...) +} + +func (entry *Entry) Infoln(args ...interface{}) { + entry.Logln(InfoLevel, args...) +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + entry.Logln(WarnLevel, args...) +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + entry.Logln(ErrorLevel, args...) +} + +func (entry *Entry) Fatalln(args ...interface{}) { + entry.Logln(FatalLevel, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + entry.Logln(PanicLevel, args...) +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/src/vendor/github.com/Sirupsen/logrus/exported.go b/src/vendor/github.com/sirupsen/logrus/exported.go similarity index 73% rename from src/vendor/github.com/Sirupsen/logrus/exported.go rename to src/vendor/github.com/sirupsen/logrus/exported.go index 013183eda..62fc2f219 100644 --- a/src/vendor/github.com/Sirupsen/logrus/exported.go +++ b/src/vendor/github.com/sirupsen/logrus/exported.go @@ -1,7 +1,9 @@ package logrus import ( + "context" "io" + "time" ) var ( @@ -15,37 +17,38 @@ func StandardLogger() *Logger { // SetOutput sets the standard logger output. func SetOutput(out io.Writer) { - std.mu.Lock() - defer std.mu.Unlock() - std.Out = out + std.SetOutput(out) } // SetFormatter sets the standard logger formatter. func SetFormatter(formatter Formatter) { - std.mu.Lock() - defer std.mu.Unlock() - std.Formatter = formatter + std.SetFormatter(formatter) +} + +// SetReportCaller sets whether the standard logger will include the calling +// method as a field. +func SetReportCaller(include bool) { + std.SetReportCaller(include) } // SetLevel sets the standard logger level. func SetLevel(level Level) { - std.mu.Lock() - defer std.mu.Unlock() std.SetLevel(level) } // GetLevel returns the standard logger level. func GetLevel() Level { - std.mu.Lock() - defer std.mu.Unlock() - return std.level() + return std.GetLevel() +} + +// IsLevelEnabled checks if the log level of the standard logger is greater than the level param +func IsLevelEnabled(level Level) bool { + return std.IsLevelEnabled(level) } // AddHook adds a hook to the standard logger hooks. func AddHook(hook Hook) { - std.mu.Lock() - defer std.mu.Unlock() - std.Hooks.Add(hook) + std.AddHook(hook) } // WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. @@ -53,6 +56,11 @@ func WithError(err error) *Entry { return std.WithField(ErrorKey, err) } +// WithContext creates an entry from the standard logger and adds a context to it. +func WithContext(ctx context.Context) *Entry { + return std.WithContext(ctx) +} + // WithField creates an entry from the standard logger and adds a field to // it. If you want multiple fields, use `WithFields`. // @@ -72,6 +80,20 @@ func WithFields(fields Fields) *Entry { return std.WithFields(fields) } +// WithTime creats an entry from the standard logger and overrides the time of +// logs generated with it. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithTime(t time.Time) *Entry { + return std.WithTime(t) +} + +// Trace logs a message at level Trace on the standard logger. +func Trace(args ...interface{}) { + std.Trace(args...) +} + // Debug logs a message at level Debug on the standard logger. func Debug(args ...interface{}) { std.Debug(args...) @@ -107,11 +129,16 @@ func Panic(args ...interface{}) { std.Panic(args...) } -// Fatal logs a message at level Fatal on the standard logger. +// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. func Fatal(args ...interface{}) { std.Fatal(args...) } +// Tracef logs a message at level Trace on the standard logger. +func Tracef(format string, args ...interface{}) { + std.Tracef(format, args...) +} + // Debugf logs a message at level Debug on the standard logger. func Debugf(format string, args ...interface{}) { std.Debugf(format, args...) @@ -147,11 +174,16 @@ func Panicf(format string, args ...interface{}) { std.Panicf(format, args...) } -// Fatalf logs a message at level Fatal on the standard logger. +// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. func Fatalf(format string, args ...interface{}) { std.Fatalf(format, args...) } +// Traceln logs a message at level Trace on the standard logger. +func Traceln(args ...interface{}) { + std.Traceln(args...) +} + // Debugln logs a message at level Debug on the standard logger. func Debugln(args ...interface{}) { std.Debugln(args...) @@ -187,7 +219,7 @@ func Panicln(args ...interface{}) { std.Panicln(args...) } -// Fatalln logs a message at level Fatal on the standard logger. +// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. func Fatalln(args ...interface{}) { std.Fatalln(args...) } diff --git a/src/vendor/github.com/sirupsen/logrus/formatter.go b/src/vendor/github.com/sirupsen/logrus/formatter.go new file mode 100644 index 000000000..408883773 --- /dev/null +++ b/src/vendor/github.com/sirupsen/logrus/formatter.go @@ -0,0 +1,78 @@ +package logrus + +import "time" + +// Default key names for the default fields +const ( + defaultTimestampFormat = time.RFC3339 + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" + FieldKeyLogrusError = "logrus_error" + FieldKeyFunc = "func" + FieldKeyFile = "file" +) + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { + timeKey := fieldMap.resolve(FieldKeyTime) + if t, ok := data[timeKey]; ok { + data["fields."+timeKey] = t + delete(data, timeKey) + } + + msgKey := fieldMap.resolve(FieldKeyMsg) + if m, ok := data[msgKey]; ok { + data["fields."+msgKey] = m + delete(data, msgKey) + } + + levelKey := fieldMap.resolve(FieldKeyLevel) + if l, ok := data[levelKey]; ok { + data["fields."+levelKey] = l + delete(data, levelKey) + } + + logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) + if l, ok := data[logrusErrKey]; ok { + data["fields."+logrusErrKey] = l + delete(data, logrusErrKey) + } + + // If reportCaller is not set, 'func' will not conflict. + if reportCaller { + funcKey := fieldMap.resolve(FieldKeyFunc) + if l, ok := data[funcKey]; ok { + data["fields."+funcKey] = l + } + fileKey := fieldMap.resolve(FieldKeyFile) + if l, ok := data[fileKey]; ok { + data["fields."+fileKey] = l + } + } +} diff --git a/src/vendor/github.com/sirupsen/logrus/go.mod b/src/vendor/github.com/sirupsen/logrus/go.mod new file mode 100644 index 000000000..8261a2b3a --- /dev/null +++ b/src/vendor/github.com/sirupsen/logrus/go.mod @@ -0,0 +1,10 @@ +module github.com/sirupsen/logrus + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/konsorten/go-windows-terminal-sequences v1.0.1 + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/objx v0.1.1 // indirect + github.com/stretchr/testify v1.2.2 + golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 +) diff --git a/src/vendor/github.com/sirupsen/logrus/go.sum b/src/vendor/github.com/sirupsen/logrus/go.sum new file mode 100644 index 000000000..2d787be60 --- /dev/null +++ b/src/vendor/github.com/sirupsen/logrus/go.sum @@ -0,0 +1,13 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs= +github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/src/vendor/github.com/Sirupsen/logrus/hooks.go b/src/vendor/github.com/sirupsen/logrus/hooks.go similarity index 100% rename from src/vendor/github.com/Sirupsen/logrus/hooks.go rename to src/vendor/github.com/sirupsen/logrus/hooks.go diff --git a/src/vendor/github.com/sirupsen/logrus/json_formatter.go b/src/vendor/github.com/sirupsen/logrus/json_formatter.go new file mode 100644 index 000000000..098a21a06 --- /dev/null +++ b/src/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -0,0 +1,121 @@ +package logrus + +import ( + "bytes" + "encoding/json" + "fmt" + "runtime" +) + +type fieldKey string + +// FieldMap allows customization of the key names for default fields. +type FieldMap map[fieldKey]string + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +// JSONFormatter formats logs into parsable json +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string + + // DisableTimestamp allows disabling automatic timestamps in output + DisableTimestamp bool + + // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. + DataKey string + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &JSONFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message", + // FieldKeyFunc: "@caller", + // }, + // } + FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the json data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from json fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + // PrettyPrint will indent all json logs + PrettyPrint bool +} + +// Format renders a single log entry +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+4) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + + if f.DataKey != "" { + newData := make(Fields, 4) + newData[f.DataKey] = data + data = newData + } + + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + + if entry.err != "" { + data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err + } + if !f.DisableTimestamp { + data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) + } + data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message + data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + if entry.HasCaller() { + funcVal := entry.Caller.Function + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + if funcVal != "" { + data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal + } + if fileVal != "" { + data[f.FieldMap.resolve(FieldKeyFile)] = fileVal + } + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + encoder := json.NewEncoder(b) + if f.PrettyPrint { + encoder.SetIndent("", " ") + } + if err := encoder.Encode(data); err != nil { + return nil, fmt.Errorf("failed to marshal fields to JSON, %v", err) + } + + return b.Bytes(), nil +} diff --git a/src/vendor/github.com/Sirupsen/logrus/logger.go b/src/vendor/github.com/sirupsen/logrus/logger.go similarity index 59% rename from src/vendor/github.com/Sirupsen/logrus/logger.go rename to src/vendor/github.com/sirupsen/logrus/logger.go index fdaf8a653..c0c0b1e55 100644 --- a/src/vendor/github.com/Sirupsen/logrus/logger.go +++ b/src/vendor/github.com/sirupsen/logrus/logger.go @@ -1,16 +1,18 @@ package logrus import ( + "context" "io" "os" "sync" "sync/atomic" + "time" ) type Logger struct { // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a // file, or leave it default which is `os.Stderr`. You can also set this to - // something more adventorous, such as logging to Kafka. + // something more adventurous, such as logging to Kafka. Out io.Writer // Hooks for the logger instance. These allow firing events based on logging // levels and log entries. For example, to send errors to an error tracking @@ -23,6 +25,10 @@ type Logger struct { // own that implements the `Formatter` interface, see the `README` or included // formatters for examples. Formatter Formatter + + // Flag for whether to log caller info (off by default) + ReportCaller bool + // The logging level the logger should log at. This is typically (and defaults // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be // logged. @@ -31,8 +37,12 @@ type Logger struct { mu MutexWrap // Reusable empty entry entryPool sync.Pool + // Function to exit the application, defaults to `os.Exit()` + ExitFunc exitFunc } +type exitFunc func(int) + type MutexWrap struct { lock sync.Mutex disabled bool @@ -68,10 +78,12 @@ func (mw *MutexWrap) Disable() { // It's recommended to make this a global instance called `log`. func New() *Logger { return &Logger{ - Out: os.Stderr, - Formatter: new(TextFormatter), - Hooks: make(LevelHooks), - Level: InfoLevel, + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + ExitFunc: os.Exit, + ReportCaller: false, } } @@ -84,11 +96,12 @@ func (logger *Logger) newEntry() *Entry { } func (logger *Logger) releaseEntry(entry *Entry) { + entry.Data = map[string]interface{}{} logger.entryPool.Put(entry) } // Adds a field to the log entry, note that it doesn't log until you call -// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. +// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry. // If you want multiple fields, use `WithFields`. func (logger *Logger) WithField(key string, value interface{}) *Entry { entry := logger.newEntry() @@ -112,20 +125,38 @@ func (logger *Logger) WithError(err error) *Entry { return entry.WithError(err) } -func (logger *Logger) Debugf(format string, args ...interface{}) { - if logger.level() >= DebugLevel { +// Add a context to the log entry. +func (logger *Logger) WithContext(ctx context.Context) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithContext(ctx) +} + +// Overrides the time of the log entry. +func (logger *Logger) WithTime(t time.Time) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithTime(t) +} + +func (logger *Logger) Logf(level Level, format string, args ...interface{}) { + if logger.IsLevelEnabled(level) { entry := logger.newEntry() - entry.Debugf(format, args...) + entry.Logf(level, format, args...) logger.releaseEntry(entry) } } +func (logger *Logger) Tracef(format string, args ...interface{}) { + logger.Logf(TraceLevel, format, args...) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + logger.Logf(DebugLevel, format, args...) +} + func (logger *Logger) Infof(format string, args ...interface{}) { - if logger.level() >= InfoLevel { - entry := logger.newEntry() - entry.Infof(format, args...) - logger.releaseEntry(entry) - } + logger.Logf(InfoLevel, format, args...) } func (logger *Logger) Printf(format string, args ...interface{}) { @@ -135,123 +166,91 @@ func (logger *Logger) Printf(format string, args ...interface{}) { } func (logger *Logger) Warnf(format string, args ...interface{}) { - if logger.level() >= WarnLevel { - entry := logger.newEntry() - entry.Warnf(format, args...) - logger.releaseEntry(entry) - } + logger.Logf(WarnLevel, format, args...) } func (logger *Logger) Warningf(format string, args ...interface{}) { - if logger.level() >= WarnLevel { - entry := logger.newEntry() - entry.Warnf(format, args...) - logger.releaseEntry(entry) - } + logger.Warnf(format, args...) } func (logger *Logger) Errorf(format string, args ...interface{}) { - if logger.level() >= ErrorLevel { - entry := logger.newEntry() - entry.Errorf(format, args...) - logger.releaseEntry(entry) - } + logger.Logf(ErrorLevel, format, args...) } func (logger *Logger) Fatalf(format string, args ...interface{}) { - if logger.level() >= FatalLevel { - entry := logger.newEntry() - entry.Fatalf(format, args...) - logger.releaseEntry(entry) - } - Exit(1) + logger.Logf(FatalLevel, format, args...) + logger.Exit(1) } func (logger *Logger) Panicf(format string, args ...interface{}) { - if logger.level() >= PanicLevel { + logger.Logf(PanicLevel, format, args...) +} + +func (logger *Logger) Log(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { entry := logger.newEntry() - entry.Panicf(format, args...) + entry.Log(level, args...) logger.releaseEntry(entry) } } +func (logger *Logger) Trace(args ...interface{}) { + logger.Log(TraceLevel, args...) +} + func (logger *Logger) Debug(args ...interface{}) { - if logger.level() >= DebugLevel { - entry := logger.newEntry() - entry.Debug(args...) - logger.releaseEntry(entry) - } + logger.Log(DebugLevel, args...) } func (logger *Logger) Info(args ...interface{}) { - if logger.level() >= InfoLevel { - entry := logger.newEntry() - entry.Info(args...) - logger.releaseEntry(entry) - } + logger.Log(InfoLevel, args...) } func (logger *Logger) Print(args ...interface{}) { entry := logger.newEntry() - entry.Info(args...) + entry.Print(args...) logger.releaseEntry(entry) } func (logger *Logger) Warn(args ...interface{}) { - if logger.level() >= WarnLevel { - entry := logger.newEntry() - entry.Warn(args...) - logger.releaseEntry(entry) - } + logger.Log(WarnLevel, args...) } func (logger *Logger) Warning(args ...interface{}) { - if logger.level() >= WarnLevel { - entry := logger.newEntry() - entry.Warn(args...) - logger.releaseEntry(entry) - } + logger.Warn(args...) } func (logger *Logger) Error(args ...interface{}) { - if logger.level() >= ErrorLevel { - entry := logger.newEntry() - entry.Error(args...) - logger.releaseEntry(entry) - } + logger.Log(ErrorLevel, args...) } func (logger *Logger) Fatal(args ...interface{}) { - if logger.level() >= FatalLevel { - entry := logger.newEntry() - entry.Fatal(args...) - logger.releaseEntry(entry) - } - Exit(1) + logger.Log(FatalLevel, args...) + logger.Exit(1) } func (logger *Logger) Panic(args ...interface{}) { - if logger.level() >= PanicLevel { + logger.Log(PanicLevel, args...) +} + +func (logger *Logger) Logln(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { entry := logger.newEntry() - entry.Panic(args...) + entry.Logln(level, args...) logger.releaseEntry(entry) } } +func (logger *Logger) Traceln(args ...interface{}) { + logger.Logln(TraceLevel, args...) +} + func (logger *Logger) Debugln(args ...interface{}) { - if logger.level() >= DebugLevel { - entry := logger.newEntry() - entry.Debugln(args...) - logger.releaseEntry(entry) - } + logger.Logln(DebugLevel, args...) } func (logger *Logger) Infoln(args ...interface{}) { - if logger.level() >= InfoLevel { - entry := logger.newEntry() - entry.Infoln(args...) - logger.releaseEntry(entry) - } + logger.Logln(InfoLevel, args...) } func (logger *Logger) Println(args ...interface{}) { @@ -261,44 +260,32 @@ func (logger *Logger) Println(args ...interface{}) { } func (logger *Logger) Warnln(args ...interface{}) { - if logger.level() >= WarnLevel { - entry := logger.newEntry() - entry.Warnln(args...) - logger.releaseEntry(entry) - } + logger.Logln(WarnLevel, args...) } func (logger *Logger) Warningln(args ...interface{}) { - if logger.level() >= WarnLevel { - entry := logger.newEntry() - entry.Warnln(args...) - logger.releaseEntry(entry) - } + logger.Warnln(args...) } func (logger *Logger) Errorln(args ...interface{}) { - if logger.level() >= ErrorLevel { - entry := logger.newEntry() - entry.Errorln(args...) - logger.releaseEntry(entry) - } + logger.Logln(ErrorLevel, args...) } func (logger *Logger) Fatalln(args ...interface{}) { - if logger.level() >= FatalLevel { - entry := logger.newEntry() - entry.Fatalln(args...) - logger.releaseEntry(entry) - } - Exit(1) + logger.Logln(FatalLevel, args...) + logger.Exit(1) } func (logger *Logger) Panicln(args ...interface{}) { - if logger.level() >= PanicLevel { - entry := logger.newEntry() - entry.Panicln(args...) - logger.releaseEntry(entry) + logger.Logln(PanicLevel, args...) +} + +func (logger *Logger) Exit(code int) { + runHandlers() + if logger.ExitFunc == nil { + logger.ExitFunc = os.Exit } + logger.ExitFunc(code) } //When file is opened with appending mode, it's safe to @@ -312,12 +299,53 @@ func (logger *Logger) level() Level { return Level(atomic.LoadUint32((*uint32)(&logger.Level))) } +// SetLevel sets the logger level. func (logger *Logger) SetLevel(level Level) { atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) } +// GetLevel returns the logger level. +func (logger *Logger) GetLevel() Level { + return logger.level() +} + +// AddHook adds a hook to the logger hooks. func (logger *Logger) AddHook(hook Hook) { logger.mu.Lock() defer logger.mu.Unlock() logger.Hooks.Add(hook) } + +// IsLevelEnabled checks if the log level of the logger is greater than the level param +func (logger *Logger) IsLevelEnabled(level Level) bool { + return logger.level() >= level +} + +// SetFormatter sets the logger formatter. +func (logger *Logger) SetFormatter(formatter Formatter) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Formatter = formatter +} + +// SetOutput sets the logger output. +func (logger *Logger) SetOutput(output io.Writer) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Out = output +} + +func (logger *Logger) SetReportCaller(reportCaller bool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.ReportCaller = reportCaller +} + +// ReplaceHooks replaces the logger hooks and returns the old ones +func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { + logger.mu.Lock() + oldHooks := logger.Hooks + logger.Hooks = hooks + logger.mu.Unlock() + return oldHooks +} diff --git a/src/vendor/github.com/Sirupsen/logrus/logrus.go b/src/vendor/github.com/sirupsen/logrus/logrus.go similarity index 72% rename from src/vendor/github.com/Sirupsen/logrus/logrus.go rename to src/vendor/github.com/sirupsen/logrus/logrus.go index dd3899974..8644761f7 100644 --- a/src/vendor/github.com/Sirupsen/logrus/logrus.go +++ b/src/vendor/github.com/sirupsen/logrus/logrus.go @@ -14,22 +14,11 @@ type Level uint32 // Convert the Level to a string. E.g. PanicLevel becomes "panic". func (level Level) String() string { - switch level { - case DebugLevel: - return "debug" - case InfoLevel: - return "info" - case WarnLevel: - return "warning" - case ErrorLevel: - return "error" - case FatalLevel: - return "fatal" - case PanicLevel: - return "panic" + if b, err := level.MarshalText(); err == nil { + return string(b) + } else { + return "unknown" } - - return "unknown" } // ParseLevel takes a string level and returns the Logrus log level constant. @@ -47,12 +36,47 @@ func ParseLevel(lvl string) (Level, error) { return InfoLevel, nil case "debug": return DebugLevel, nil + case "trace": + return TraceLevel, nil } var l Level return l, fmt.Errorf("not a valid logrus Level: %q", lvl) } +// UnmarshalText implements encoding.TextUnmarshaler. +func (level *Level) UnmarshalText(text []byte) error { + l, err := ParseLevel(string(text)) + if err != nil { + return err + } + + *level = Level(l) + + return nil +} + +func (level Level) MarshalText() ([]byte, error) { + switch level { + case TraceLevel: + return []byte("trace"), nil + case DebugLevel: + return []byte("debug"), nil + case InfoLevel: + return []byte("info"), nil + case WarnLevel: + return []byte("warning"), nil + case ErrorLevel: + return []byte("error"), nil + case FatalLevel: + return []byte("fatal"), nil + case PanicLevel: + return []byte("panic"), nil + } + + return nil, fmt.Errorf("not a valid logrus level %d", level) +} + // A constant exposing all logging levels var AllLevels = []Level{ PanicLevel, @@ -61,6 +85,7 @@ var AllLevels = []Level{ WarnLevel, InfoLevel, DebugLevel, + TraceLevel, } // These are the different logging levels. You can set the logging level to log @@ -69,7 +94,7 @@ const ( // PanicLevel level, highest level of severity. Logs and then calls panic with the // message passed to Debug, Info, ... PanicLevel Level = iota - // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the + // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the // logging level is set to Panic. FatalLevel // ErrorLevel level. Logs. Used for errors that should definitely be noted. @@ -82,6 +107,8 @@ const ( InfoLevel // DebugLevel level. Usually only enabled when debugging. Very verbose logging. DebugLevel + // TraceLevel level. Designates finer-grained informational events than the Debug. + TraceLevel ) // Won't compile if StdLogger can't be realized by a log.Logger @@ -140,4 +167,20 @@ type FieldLogger interface { Errorln(args ...interface{}) Fatalln(args ...interface{}) Panicln(args ...interface{}) + + // IsDebugEnabled() bool + // IsInfoEnabled() bool + // IsWarnEnabled() bool + // IsErrorEnabled() bool + // IsFatalEnabled() bool + // IsPanicEnabled() bool +} + +// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is +// here for consistancy. Do not use. Use Logger or Entry instead. +type Ext1FieldLogger interface { + FieldLogger + Tracef(format string, args ...interface{}) + Trace(args ...interface{}) + Traceln(args ...interface{}) } diff --git a/src/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go b/src/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go similarity index 75% rename from src/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go rename to src/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go index 3de08e802..2403de981 100644 --- a/src/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go +++ b/src/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go @@ -1,4 +1,4 @@ -// +build appengine gopherjs +// +build appengine package logrus diff --git a/src/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/src/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go new file mode 100644 index 000000000..3c4f43f91 --- /dev/null +++ b/src/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go @@ -0,0 +1,13 @@ +// +build darwin dragonfly freebsd netbsd openbsd + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TIOCGETA + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} + diff --git a/src/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/src/vendor/github.com/sirupsen/logrus/terminal_check_js.go new file mode 100644 index 000000000..0c209750a --- /dev/null +++ b/src/vendor/github.com/sirupsen/logrus/terminal_check_js.go @@ -0,0 +1,11 @@ +// +build js + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return false +} diff --git a/src/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go b/src/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go similarity index 58% rename from src/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go rename to src/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go index 067047a12..7be2d87c5 100644 --- a/src/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go +++ b/src/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go @@ -1,18 +1,16 @@ -// +build !appengine,!gopherjs +// +build !appengine,!js,!windows package logrus import ( "io" "os" - - "golang.org/x/crypto/ssh/terminal" ) func checkIfTerminal(w io.Writer) bool { switch v := w.(type) { case *os.File: - return terminal.IsTerminal(int(v.Fd())) + return isTerminal(int(v.Fd())) default: return false } diff --git a/src/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/src/vendor/github.com/sirupsen/logrus/terminal_check_unix.go new file mode 100644 index 000000000..355dc966f --- /dev/null +++ b/src/vendor/github.com/sirupsen/logrus/terminal_check_unix.go @@ -0,0 +1,13 @@ +// +build linux aix + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} + diff --git a/src/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/src/vendor/github.com/sirupsen/logrus/terminal_check_windows.go new file mode 100644 index 000000000..3b9d2864c --- /dev/null +++ b/src/vendor/github.com/sirupsen/logrus/terminal_check_windows.go @@ -0,0 +1,20 @@ +// +build !appengine,!js,windows + +package logrus + +import ( + "io" + "os" + "syscall" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + var mode uint32 + err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode) + return err == nil + default: + return false + } +} diff --git a/src/vendor/github.com/sirupsen/logrus/terminal_notwindows.go b/src/vendor/github.com/sirupsen/logrus/terminal_notwindows.go new file mode 100644 index 000000000..3dbd23720 --- /dev/null +++ b/src/vendor/github.com/sirupsen/logrus/terminal_notwindows.go @@ -0,0 +1,8 @@ +// +build !windows + +package logrus + +import "io" + +func initTerminal(w io.Writer) { +} diff --git a/src/vendor/github.com/sirupsen/logrus/terminal_windows.go b/src/vendor/github.com/sirupsen/logrus/terminal_windows.go new file mode 100644 index 000000000..b4ef5286c --- /dev/null +++ b/src/vendor/github.com/sirupsen/logrus/terminal_windows.go @@ -0,0 +1,18 @@ +// +build !appengine,!js,windows + +package logrus + +import ( + "io" + "os" + "syscall" + + sequences "github.com/konsorten/go-windows-terminal-sequences" +) + +func initTerminal(w io.Writer) { + switch v := w.(type) { + case *os.File: + sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true) + } +} diff --git a/src/vendor/github.com/sirupsen/logrus/text_formatter.go b/src/vendor/github.com/sirupsen/logrus/text_formatter.go new file mode 100644 index 000000000..1569161eb --- /dev/null +++ b/src/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -0,0 +1,299 @@ +package logrus + +import ( + "bytes" + "fmt" + "os" + "runtime" + "sort" + "strings" + "sync" + "time" +) + +const ( + red = 31 + yellow = 33 + blue = 36 + gray = 37 +) + +var baseTimestamp time.Time + +func init() { + baseTimestamp = time.Now() +} + +// TextFormatter formats logs into text +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ + EnvironmentOverrideColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool + + // The keys sorting function, when uninitialized it uses sort.Strings. + SortingFunc func([]string) + + // Disables the truncation of the level text to 4 characters. + DisableLevelTruncation bool + + // QuoteEmptyFields will wrap empty fields in quotes if true + QuoteEmptyFields bool + + // Whether the logger's out is to a terminal + isTerminal bool + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &TextFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message"}} + FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + terminalInitOnce sync.Once +} + +func (f *TextFormatter) init(entry *Entry) { + if entry.Logger != nil { + f.isTerminal = checkIfTerminal(entry.Logger.Out) + + if f.isTerminal { + initTerminal(entry.Logger.Out) + } + } +} + +func (f *TextFormatter) isColored() bool { + isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) + + if f.EnvironmentOverrideColors { + if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" { + isColored = true + } else if ok && force == "0" { + isColored = false + } else if os.Getenv("CLICOLOR") == "0" { + isColored = false + } + } + + return isColored && !f.DisableColors +} + +// Format renders a single log entry +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields) + for k, v := range entry.Data { + data[k] = v + } + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + keys := make([]string, 0, len(data)) + for k := range data { + keys = append(keys, k) + } + + var funcVal, fileVal string + + fixedKeys := make([]string, 0, 4+len(data)) + if !f.DisableTimestamp { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) + } + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) + if entry.Message != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) + } + if entry.err != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) + } + if entry.HasCaller() { + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } else { + funcVal = entry.Caller.Function + fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + } + + if funcVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc)) + } + if fileVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile)) + } + } + + if !f.DisableSorting { + if f.SortingFunc == nil { + sort.Strings(keys) + fixedKeys = append(fixedKeys, keys...) + } else { + if !f.isColored() { + fixedKeys = append(fixedKeys, keys...) + f.SortingFunc(fixedKeys) + } else { + f.SortingFunc(keys) + } + } + } else { + fixedKeys = append(fixedKeys, keys...) + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + f.terminalInitOnce.Do(func() { f.init(entry) }) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + if f.isColored() { + f.printColored(b, entry, keys, data, timestampFormat) + } else { + + for _, key := range fixedKeys { + var value interface{} + switch { + case key == f.FieldMap.resolve(FieldKeyTime): + value = entry.Time.Format(timestampFormat) + case key == f.FieldMap.resolve(FieldKeyLevel): + value = entry.Level.String() + case key == f.FieldMap.resolve(FieldKeyMsg): + value = entry.Message + case key == f.FieldMap.resolve(FieldKeyLogrusError): + value = entry.err + case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): + value = funcVal + case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): + value = fileVal + default: + value = data[key] + } + f.appendKeyValue(b, key, value) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel, TraceLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String()) + if !f.DisableLevelTruncation { + levelText = levelText[0:4] + } + + // Remove a single newline if it already exists in the message to keep + // the behavior of logrus text_formatter the same as the stdlib log package + entry.Message = strings.TrimSuffix(entry.Message, "\n") + + caller := "" + if entry.HasCaller() { + funcVal := fmt.Sprintf("%s()", entry.Caller.Function) + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + + if fileVal == "" { + caller = funcVal + } else if funcVal == "" { + caller = fileVal + } else { + caller = fileVal + " " + funcVal + } + } + + if f.DisableTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) + } else if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) + } else { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) + } + for _, k := range keys { + v := data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) + f.appendValue(b, v) + } +} + +func (f *TextFormatter) needsQuoting(text string) bool { + if f.QuoteEmptyFields && len(text) == 0 { + return true + } + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { + return true + } + } + return false +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + if b.Len() > 0 { + b.WriteByte(' ') + } + b.WriteString(key) + b.WriteByte('=') + f.appendValue(b, value) +} + +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + stringVal, ok := value.(string) + if !ok { + stringVal = fmt.Sprint(value) + } + + if !f.needsQuoting(stringVal) { + b.WriteString(stringVal) + } else { + b.WriteString(fmt.Sprintf("%q", stringVal)) + } +} diff --git a/src/vendor/github.com/Sirupsen/logrus/writer.go b/src/vendor/github.com/sirupsen/logrus/writer.go similarity index 96% rename from src/vendor/github.com/Sirupsen/logrus/writer.go rename to src/vendor/github.com/sirupsen/logrus/writer.go index 7bdebedc6..9e1f75135 100644 --- a/src/vendor/github.com/Sirupsen/logrus/writer.go +++ b/src/vendor/github.com/sirupsen/logrus/writer.go @@ -24,6 +24,8 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { var printFunc func(args ...interface{}) switch level { + case TraceLevel: + printFunc = entry.Trace case DebugLevel: printFunc = entry.Debug case InfoLevel: diff --git a/src/vendor/github.com/docker/notary/.gitignore b/src/vendor/github.com/theupdateframework/notary/.gitignore similarity index 100% rename from src/vendor/github.com/docker/notary/.gitignore rename to src/vendor/github.com/theupdateframework/notary/.gitignore diff --git a/src/vendor/github.com/docker/notary/CHANGELOG.md b/src/vendor/github.com/theupdateframework/notary/CHANGELOG.md similarity index 51% rename from src/vendor/github.com/docker/notary/CHANGELOG.md rename to src/vendor/github.com/theupdateframework/notary/CHANGELOG.md index d1708ff69..140221dc0 100644 --- a/src/vendor/github.com/docker/notary/CHANGELOG.md +++ b/src/vendor/github.com/theupdateframework/notary/CHANGELOG.md @@ -1,5 +1,55 @@ # Changelog +## [v0.6.1](https://github.com/docker/notary/releases/tag/v0.6.0) 04/10/2018 ++ Fixed bug where CLI requested admin privileges for all metadata operations, including listing targets on a repo [#1315](https://github.com/theupdateframework/notary/pull/1315) ++ Prevented notary signer from being dumpable or ptraceable in Linux, except in debug mode [#1327](https://github.com/theupdateframework/notary/pull/1327) ++ Bumped JWT dependency to fix potential Invalid Curve Attack on NIST curves within ECDH key management [#1334](https://github.com/theupdateframework/notary/pull/1334) ++ If the home directory cannot be found, log a warning instead of erroring out [#1318](https://github.com/theupdateframework/notary/pull/1318) ++ Bumped go version and various dependencies [#1323](https://github.com/theupdateframework/notary/pull/1323) [#1332](https://github.com/theupdateframework/notary/pull/1332) [#1335](https://github.com/theupdateframework/notary/pull/1335) [#1336](https://github.com/theupdateframework/notary/pull/1336) ++ Various internal and documentation fixes [#1312](https://github.com/theupdateframework/notary/pull/1312) [#1313](https://github.com/theupdateframework/notary/pull/1313) [#1319](https://github.com/theupdateframework/notary/pull/1319) [#1320](https://github.com/theupdateframework/notary/pull/1320) [#1324](https://github.com/theupdateframework/notary/pull/1324) [#1326](https://github.com/theupdateframework/notary/pull/1326) [#1328](https://github.com/theupdateframework/notary/pull/1328) [#1329](https://github.com/theupdateframework/notary/pull/1329) [#1333](https://github.com/theupdateframework/notary/pull/1333) + +## [v0.6.0](https://github.com/docker/notary/releases/tag/v0.6.0) 02/28/2018 ++ **The project has been moved from https://github.com/docker/notary to https://github.com/theupdateframework/notary, as it has been accepted into the CNCF. Downstream users should update their go imports.** ++ Removed support for RSA-key exchange ciphers supported by the server and signer and require TLS >= 1.2 for the server and signer. [#1307](https://github.com/theupdateframework/notary/pull/1307) ++ `libykcs11` can be found in several additional locations on Fedora. [#1286](https://github.com/theupdateframework/notary/pull/1286/) ++ If a certificate is used as a delegation public key, notary no longer warns if the certificate has expired, since notary should be relying on the role expiry instead. [#1263](https://github.com/theupdateframework/notary/pull/1263) ++ An error is now returned when importing keys if there were invalid PEM blocks. [#1260](https://github.com/theupdateframework/notary/pull/1260) ++ Notary server authentication credentials can now be provided as an environment variable `NOTARY_AUTH`, which should contain a base64-encoded "username:password" value. [#1246](https://github.com/theupdateframework/notary/pull/1246) ++ Changefeeds are now supported for RethinkDB as well as SQL servers. [#1214](https://github.com/theupdateframework/notary/pull/1214) ++ Notary CLI will now time out after 30 seconds if a username and password are not provided when authenticating to anotary server, fixing an issue where scripts for the notary CLI may hang forever. [#1200](https://github.com/theupdateframework/notary/pull/1200) ++ Fixed potential race condition in the signer keystore. [#1198](https://github.com/theupdateframework/notary/pull/1198) ++ Notary now no longer provides the option to generate RSA keys for a repository, but externally generated RSA keys can still be imported as keys for a repository. [#1191](https://github.com/theupdateframework/notary/pull/1191) ++ Fixed bug where the notary client would `ioutil.ReadAll` responses from the server without limiting the size. [#1186](https://github.com/theupdateframework/notary/pull/1186) ++ Default notary CLI log level is now `warn`, and if the `-v` option is passed, it is at `info`. [#1179](https://github.com/theupdateframework/notary/pull/1179) ++ Example Postgres config now includes an example of mutual TLS authentication between the server/signer and Postgres. [#1160](https://github.com/theupdateframework/notary/pull/1160) [#1163](https://github.com/theupdateframework/notary/pull/1163/) ++ Fixed an error where piping the server authentication credentials via STDIN when scripting the notary CLI did not work. [#1155](https://github.com/theupdateframework/notary/pull/1155) ++ If the server and signer configurations forget to specify `parseTime=true` when using MySQL, notary server and signer will automatically add the option. [#1150](https://github.com/theupdateframework/notary/pull/1150) ++ Custom metadata can now be provided and read on a target when using the notary client as a library (not yet exposed on the CLI). [#1146](https://github.com/theupdateframework/notary/pull/1146) ++ `notary init` now accepts a `--root-cert` and `--root-key` flag for use with privately generated certificates and keys. [#1144](https://github.com/theupdateframework/notary/pull/1144) ++ `notary key generate` now accepts a `--role` flag as well as a `--output` flag. This means it can generate new targets or delegation keys, and it can also output keys to a file instead of storing it in the default notary key store. [#1134](https://github.com/theupdateframework/notary/pull/1134) ++ Newly generated keys are now stored encrypted and encoded in PKCS#8 format. **This is not forwards-compatible against notary<0.6.0 and docker<17.12.x. Also please note that docker>=17.12.x is not forwards compatible with notary<0.6.0.**. [#1130](https://github.com/theupdateframework/notary/pull/1130) [#1201](https://github.com/theupdateframework/notary/pull/1201) ++ Added support for wildcarded certificate IDs in the trustpinning configuration [#1126](https://github.com/theupdateframework/notary/pull/1126) ++ Added support using the client against notary servers which are hosted as subpath under another server (e.g. https://domain.com/notary instead of https://notary.com) [#1108](https://github.com/theupdateframework/notary/pull/1108) ++ If no changes were made to the targets file, you are no longer required to sign the target [#1104](https://github.com/theupdateframework/notary/pull/1104) ++ escrow placeholder [#1096](https://github.com/theupdateframework/notary/pull/1096) ++ Added support for wildcard suffixes for root certificates CNs for root keys, so that a single root certificate would be valid for multiple repositories [#1088](https://github.com/theupdateframework/notary/pull/1088) ++ Root key rotations now do not require all previous root keys sign new root metadata. [#942](https://github.com/theupdateframework/notary/pull/942). + + New keys are trusted if the root metadata file specifying the new key was signed by the previous root key/threshold + + Root metadata can now be requested by version from the server, allowing clients with older root metadata to validate each new version one by one up to the current metadata ++ `notary key rotate` now accepts a flag specifying which key to rotate to [#942](https://github.com/theupdateframework/notary/pull/942) ++ Refactoring of the client to make it easier to use as a library and to inject dependencies: + + References to GUN have now been changed to "imagename". [#1081](https://github.com/theupdateframework/notary/pull/1081) + + `NewNotaryRepository` can now be provided with a remote store and changelist, as opposed to always constructing its own. [#1094](https://github.com/theupdateframework/notary/pull/1094) + + If needed, the notary repository will be initialized first when publishing. [#1105](https://github.com/theupdateframework/notary/pull/1105) + + `NewNotaryReository` now requires a non-nil cache store. [#1185](https://github.com/theupdateframework/notary/pull/1185) + + The "No valid trust data" error is now typed. [#1212](https://github.com/theupdateframework/notary/pull/1212) + + `TUFClient` was previously mistakenly exported, and is now unexported. [#1215](https://github.com/theupdateframework/notary/pull/1215) + + The notary client now has a `Repository` interface type to standardize `client.NotaryRepository`. [#1220](https://github.com/theupdateframework/notary/pull/1220) + + The constructor functions `NewFileCachedNotaryRepository` and `NewNotaryRepository` have been renamed, respectively, to `NewFileCachedRepository` and `NewRepository` to reduce redundancy. [#1226](https://github.com/theupdateframework/notary/pull/1226) + + `NewRepository` returns an interface as opposed to the concrete type `NotaryRepository` it previously did. `NotaryRepository` is also now an unexported concrete type. [#1226](https://github.com/theupdateframework/notary/pull/1226) + + Key import/export logic has been moved from the `utils` package to the `trustmanager` package. [#1250](https://github.com/theupdateframework/notary/pull/1250) + + ## [v0.5.0](https://github.com/docker/notary/releases/tag/v0.5.0) 11/14/2016 + Non-certificate public keys in PEM format can now be added to delegation roles [#965](https://github.com/docker/notary/pull/965) + PostgreSQL support as a storage backend for Server and Signer [#920](https://github.com/docker/notary/pull/920) diff --git a/src/vendor/github.com/docker/notary/CONTRIBUTING.md b/src/vendor/github.com/theupdateframework/notary/CONTRIBUTING.md similarity index 98% rename from src/vendor/github.com/docker/notary/CONTRIBUTING.md rename to src/vendor/github.com/theupdateframework/notary/CONTRIBUTING.md index c650e3005..64d2326f9 100644 --- a/src/vendor/github.com/docker/notary/CONTRIBUTING.md +++ b/src/vendor/github.com/theupdateframework/notary/CONTRIBUTING.md @@ -1,6 +1,6 @@ # Contributing to notary -## Before reporting an issue... +## Before reporting an issue... ### If your problem is with... @@ -26,7 +26,7 @@ By following these simple rules you will get better and faster feedback on your - search the bugtracker for an already reported issue -### If you found an issue that describes your problem: +### If you found an issue that describes your problem: - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments - please refrain from adding "same thing here" or "+1" comments diff --git a/src/vendor/github.com/docker/notary/CONTRIBUTORS b/src/vendor/github.com/theupdateframework/notary/CONTRIBUTORS similarity index 100% rename from src/vendor/github.com/docker/notary/CONTRIBUTORS rename to src/vendor/github.com/theupdateframework/notary/CONTRIBUTORS diff --git a/src/vendor/github.com/docker/notary/Dockerfile b/src/vendor/github.com/theupdateframework/notary/Dockerfile similarity index 74% rename from src/vendor/github.com/docker/notary/Dockerfile rename to src/vendor/github.com/theupdateframework/notary/Dockerfile index 4576d330a..d42f90094 100644 --- a/src/vendor/github.com/docker/notary/Dockerfile +++ b/src/vendor/github.com/theupdateframework/notary/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.7.3 +FROM golang:1.10.1 RUN apt-get update && apt-get install -y \ curl \ @@ -10,6 +10,7 @@ RUN apt-get update && apt-get install -y \ xz-utils \ python \ python-pip \ + python-setuptools \ --no-install-recommends \ && rm -rf /var/lib/apt/lists/* @@ -17,9 +18,9 @@ RUN useradd -ms /bin/bash notary \ && pip install codecov \ && go get github.com/golang/lint/golint github.com/fzipp/gocyclo github.com/client9/misspell/cmd/misspell github.com/gordonklaus/ineffassign github.com/HewlettPackard/gas -ENV NOTARYDIR /go/src/github.com/docker/notary +ENV NOTARYDIR /go/src/github.com/theupdateframework/notary COPY . ${NOTARYDIR} -RUN chmod -R a+rw /go +RUN chmod -R a+rw /go && chmod 0600 ${NOTARYDIR}/fixtures/database/* WORKDIR ${NOTARYDIR} diff --git a/src/vendor/github.com/docker/notary/Jenkinsfile b/src/vendor/github.com/theupdateframework/notary/Jenkinsfile similarity index 55% rename from src/vendor/github.com/docker/notary/Jenkinsfile rename to src/vendor/github.com/theupdateframework/notary/Jenkinsfile index fa29520b5..ef323b0e6 100644 --- a/src/vendor/github.com/docker/notary/Jenkinsfile +++ b/src/vendor/github.com/theupdateframework/notary/Jenkinsfile @@ -1,8 +1,7 @@ // Only run on Linux atm -wrappedNode(label: 'docker') { +wrappedNode(label: 'ubuntu && ec2 && docker-edge') { deleteDir() stage "checkout" checkout scm - documentationChecker("docs") } diff --git a/src/vendor/github.com/docker/notary/LICENSE b/src/vendor/github.com/theupdateframework/notary/LICENSE similarity index 99% rename from src/vendor/github.com/docker/notary/LICENSE rename to src/vendor/github.com/theupdateframework/notary/LICENSE index 6daf85e9d..ad9500955 100644 --- a/src/vendor/github.com/docker/notary/LICENSE +++ b/src/vendor/github.com/theupdateframework/notary/LICENSE @@ -1,4 +1,4 @@ -Apache License + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ diff --git a/src/vendor/github.com/docker/notary/MAINTAINERS b/src/vendor/github.com/theupdateframework/notary/MAINTAINERS similarity index 87% rename from src/vendor/github.com/docker/notary/MAINTAINERS rename to src/vendor/github.com/theupdateframework/notary/MAINTAINERS index 74cd2c2d4..12c1582fc 100644 --- a/src/vendor/github.com/docker/notary/MAINTAINERS +++ b/src/vendor/github.com/theupdateframework/notary/MAINTAINERS @@ -1,6 +1,6 @@ # Notary maintainers file # -# This file describes who runs the docker/notary project and how. +# This file describes who runs the theupdateframework/notary project and how. # This is a living document - if you see something out of date or missing, speak up! # # It is structured to be consumable by both humans and programs. @@ -13,7 +13,6 @@ people = [ "cyli", "diogomonica", - "dmcgowan", "endophage", "ecordell", "hukeping", @@ -39,11 +38,6 @@ Email = "diogo@docker.com" GitHub = "diogomonica" - [people.dmcgowan] - Name = "Derek McGowan" - Email = "derek@docker.com" - GitHub = "dmcgowan" - [people.endophage] Name = "David Lawrence" Email = "david.lawrence@docker.com" @@ -66,5 +60,5 @@ [people.riyazdf] Name = "Riyaz Faizullabhoy" - Email = "riyaz@docker.com" + Email = "riyazdf@berkeley.edu" GitHub = "riyazdf" diff --git a/src/vendor/github.com/theupdateframework/notary/MAINTAINERS.ALUMNI b/src/vendor/github.com/theupdateframework/notary/MAINTAINERS.ALUMNI new file mode 100644 index 000000000..986fa880b --- /dev/null +++ b/src/vendor/github.com/theupdateframework/notary/MAINTAINERS.ALUMNI @@ -0,0 +1,22 @@ +# Notary maintainers alumni file +# +# This file describes past maintainers who have stepped down from the role. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +[Org] + [Org."Notary Alumni"] + people = [ + "dmcgowan", + ] + +[people] + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.dmcgowan] + Name = "Derek McGowan" + Email = "derek@docker.com" + GitHub = "dmcgowan" diff --git a/src/vendor/github.com/theupdateframework/notary/MAINTAINERS_RULES.md b/src/vendor/github.com/theupdateframework/notary/MAINTAINERS_RULES.md new file mode 100644 index 000000000..be76d9105 --- /dev/null +++ b/src/vendor/github.com/theupdateframework/notary/MAINTAINERS_RULES.md @@ -0,0 +1,39 @@ +# Maintainers Rules + +This document lays out some basic rules and guidelines all maintainers are expected to follow. +Changes to the [Acceptance Criteria](#hard-acceptance-criteria-for-merging-a-pr) for merging PRs require a ceiling(two-thirds) supermajority from the maintainers. +Changes to the [Repo Guidelines](#repo-guidelines) require a simple majority. + +## Hard Acceptance Criteria for merging a PR: + +- 2 LGTMs are required when merging a PR +- If there is obviously still discussion going on in the PR, even with 2 LGTMs, let the discussion resolve before merging. If you’re not sure, reach out to the maintainers involved in the discussion. +- All checks must be green + - There are limited mitigating circumstances for this, like if the docs builds are just broken and that’s the only test failing. + - Adding or removing a check requires simple majority approval from the maintainers. + +## Repo Guidelines: + +- Consistency is vital to keep complexity low and understandable. +- Automate as much as possible (we don’t have guidelines about coding style for example because we’ve automated fmt, vet, lint, etc…). +- Try to keep PRs small and focussed (this is not always possible, i.e. builder refactor, storage refactor, etc… but a good target). + +## Process for becoming a maintainer: + +- Invitation is proposed by an existing maintainer. +- Ceiling(two-thirds) supermajority approval from existing maintainers (including vote of proposing maintainer) required to accept proposal. +- Newly approved maintainer submits PR adding themselves to the MAINTAINERS file. +- Existing maintainers publicly mark their approval on the PR. +- Existing maintainer updates repository permissions to grant write access to new maintainer. +- New maintainer merges their PR. + +## Removing maintainers + +It is preferrable that a maintainer gracefully removes themselves from the MAINTAINERS file if they are +aware they will no longer have the time or motivation to contribute to the project. Maintainers that +have been inactive in the repo for a period of at least one year should be contacted to ask if they +wish to be removed. + +In the case that an inactive maintainer is unresponsive for any reason, a ceiling(two-thirds) supermajority +vote of the existing maintainers can be used to approve their removal from the MAINTAINERS file, and revoke +their merge permissions on the repository. \ No newline at end of file diff --git a/src/vendor/github.com/docker/notary/Makefile b/src/vendor/github.com/theupdateframework/notary/Makefile similarity index 95% rename from src/vendor/github.com/docker/notary/Makefile rename to src/vendor/github.com/theupdateframework/notary/Makefile index ba28925a1..ab794165b 100644 --- a/src/vendor/github.com/docker/notary/Makefile +++ b/src/vendor/github.com/theupdateframework/notary/Makefile @@ -3,7 +3,7 @@ PREFIX?=$(shell pwd) # Populate version variables # Add to compile time flags -NOTARY_PKG := github.com/docker/notary +NOTARY_PKG := github.com/theupdateframework/notary NOTARY_VERSION := $(shell cat NOTARY_VERSION) GITCOMMIT := $(shell git rev-parse --short HEAD) GITUNTRACKEDCHANGES := $(shell git status --porcelain --untracked-files=no) @@ -15,9 +15,9 @@ GO_LDFLAGS=-ldflags "-w $(CTIMEVAR)" GO_LDFLAGS_STATIC=-ldflags "-w $(CTIMEVAR) -extldflags -static" GOOSES = darwin linux windows NOTARY_BUILDTAGS ?= pkcs11 -NOTARYDIR := /go/src/github.com/docker/notary +NOTARYDIR := /go/src/github.com/theupdateframework/notary -GO_VERSION := $(shell go version | grep "1\.[7-9]\(\.[0-9]+\)*\|devel") +GO_VERSION := $(shell go version | grep "1\.\(7\|8\|9\|10\)\(\.[0-9]+\)*\|devel") # check to make sure we have the right version. development versions of Go are # not officially supported, but allowed for building @@ -45,10 +45,7 @@ PKGS ?= $(shell go list -tags "${NOTARY_BUILDTAGS}" ./... | grep -v /vendor/ | t .DELETE_ON_ERROR: cover .DEFAULT: default -all: AUTHORS clean lint build test binaries - -AUTHORS: .git/HEAD - git log --format='%aN <%aE>' | sort -fu > $@ +all: clean lint build test binaries # This only needs to be generated by hand when cutting full releases. version/version.go: @@ -164,7 +161,7 @@ ci: override TESTOPTS = -race # Codecov knows how to merge multiple coverage files, so covmerge is not needed ci: gen-cover -yubikey-tests: override PKGS = github.com/docker/notary/cmd/notary github.com/docker/notary/trustmanager/yubikey +yubikey-tests: override PKGS = github.com/theupdateframework/notary/cmd/notary github.com/theupdateframework/notary/trustmanager/yubikey yubikey-tests: ci covmerge: diff --git a/src/vendor/github.com/theupdateframework/notary/NOTARY_VERSION b/src/vendor/github.com/theupdateframework/notary/NOTARY_VERSION new file mode 100644 index 000000000..ee6cdce3c --- /dev/null +++ b/src/vendor/github.com/theupdateframework/notary/NOTARY_VERSION @@ -0,0 +1 @@ +0.6.1 diff --git a/src/vendor/github.com/docker/notary/README.md b/src/vendor/github.com/theupdateframework/notary/README.md similarity index 55% rename from src/vendor/github.com/docker/notary/README.md rename to src/vendor/github.com/theupdateframework/notary/README.md index 01af5d9b3..fb44a4ae8 100644 --- a/src/vendor/github.com/docker/notary/README.md +++ b/src/vendor/github.com/theupdateframework/notary/README.md @@ -1,11 +1,24 @@ -# Notary -[![Circle CI](https://circleci.com/gh/docker/notary/tree/master.svg?style=shield)](https://circleci.com/gh/docker/notary/tree/master) [![CodeCov](https://codecov.io/github/docker/notary/coverage.svg?branch=master)](https://codecov.io/github/docker/notary) [![GoReportCard](https://goreportcard.com/badge/docker/notary)](https://goreportcard.com/report/github.com/docker/notary) +Notary + +[![GoDoc](https://godoc.org/github.com/theupdateframework/notary?status.svg)](https://godoc.org/github.com/theupdateframework/notary) +[![Circle CI](https://circleci.com/gh/theupdateframework/notary/tree/master.svg?style=shield)](https://circleci.com/gh/theupdateframework/notary/tree/master) [![CodeCov](https://codecov.io/github/theupdateframework/notary/coverage.svg?branch=master)](https://codecov.io/github/theupdateframework/notary) [![GoReportCard](https://goreportcard.com/badge/theupdateframework/notary)](https://goreportcard.com/report/github.com/theupdateframework/notary) +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Ftheupdateframework%2Fnotary.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Ftheupdateframework%2Fnotary?ref=badge_shield) + +# Notice + +The Notary project has officially been accepted in to the Cloud Native Computing Foundation (CNCF). +It has moved to https://github.com/theupdateframework/notary. Any downstream consumers should update +their Go imports to use this new location, which will be the canonical location going forward. + +We have moved the repo in GitHub, which will allow existing importers to continue using the old +location via GitHub's redirect. + +# Overview The Notary project comprises a [server](cmd/notary-server) and a [client](cmd/notary) for running and interacting -with trusted collections. Please see the [service architecture](docs/service_architecture.md) documentation +with trusted collections. See the [service architecture](docs/service_architecture.md) documentation for more information. - Notary aims to make the internet more secure by making it easy for people to publish and verify content. We often rely on TLS to secure our communications with a web server which is inherently flawed, as any compromise of the server @@ -16,16 +29,16 @@ secure. Once the publisher is ready to make the content available, they can push their signed trusted collection to a Notary Server. Consumers, having acquired the publisher's public key through a secure channel, -can then communicate with any notary server or (insecure) mirror, relying +can then communicate with any Notary server or (insecure) mirror, relying only on the publisher's key to determine the validity and integrity of the received content. ## Goals -Notary is based on [The Update Framework](https://www.theupdateframework.com/), a secure general design for the problem of software distribution and updates. By using TUF, notary achieves a number of key advantages: +Notary is based on [The Update Framework](https://www.theupdateframework.com/), a secure general design for the problem of software distribution and updates. By using TUF, Notary achieves a number of key advantages: * **Survivable Key Compromise**: Content publishers must manage keys in order to sign their content. Signing keys may be compromised or lost so systems must be designed in order to be flexible and recoverable in the case of key compromise. TUF's notion of key roles is utilized to separate responsibilities across a hierarchy of keys such that loss of any particular key (except the root role) by itself is not fatal to the security of the system. -* **Freshness Guarantees**: Replay attacks are a common problem in designing secure systems, where previously valid payloads are replayed to trick another system. The same problem exists in the software update systems, where old signed can be presented as the most recent. notary makes use of timestamping on publishing so that consumers can know that they are receiving the most up to date content. This is particularly important when dealing with software update where old vulnerable versions could be used to attack users. +* **Freshness Guarantees**: Replay attacks are a common problem in designing secure systems, where previously valid payloads are replayed to trick another system. The same problem exists in the software update systems, where old signed can be presented as the most recent. Notary makes use of timestamping on publishing so that consumers can know that they are receiving the most up to date content. This is particularly important when dealing with software update where old vulnerable versions could be used to attack users. * **Configurable Trust Thresholds**: Oftentimes there are a large number of publishers that are allowed to publish a particular piece of content. For example, open source projects where there are a number of core maintainers. Trust thresholds can be used so that content consumers require a configurable number of signatures on a piece of content in order to trust it. Using thresholds increases security so that loss of individual signing keys doesn't allow publishing of malicious content. * **Signing Delegation**: To allow for flexible publishing of trusted collections, a content publisher can delegate part of their collection to another signer. This delegation is represented as signed metadata so that a consumer of the content can verify both the content and the delegation. * **Use of Existing Distribution**: Notary's trust guarantees are not tied at all to particular distribution channels from which content is delivered. Therefore, trust can be added to any existing content delivery mechanism. @@ -33,29 +46,29 @@ Notary is based on [The Update Framework](https://www.theupdateframework.com/), ## Security -Please see our [service architecture docs](docs/service_architecture.md#threat-model) for more information about our threat model, which details the varying survivability and severities for key compromise as well as mitigations. +See Notary's [service architecture docs](docs/service_architecture.md#threat-model) for more information about our threat model, which details the varying survivability and severities for key compromise as well as mitigations. -Our last security audit was on July 31, 2015 by NCC ([results](docs/resources/ncc_docker_notary_audit_2015_07_31.pdf)). +Notary's last security audit was on July 31, 2015 by NCC ([results](docs/resources/ncc_docker_notary_audit_2015_07_31.pdf)). Any security vulnerabilities can be reported to security@docker.com. # Getting started with the Notary CLI -Please get the Notary Client CLI binary from [the official releases page](https://github.com/docker/notary/releases) or you can [build one yourself](#building-notary). -The version of Notary server and signer should be greater than or equal to Notary CLI's version to ensure feature compatibility (ex: CLI version 0.2, server/signer version >= 0.2), and all official releases are associated with GitHub tags. +Get the Notary Client CLI binary from [the official releases page](https://github.com/theupdateframework/notary/releases) or you can [build one yourself](#building-notary). +The version of the Notary server and signer should be greater than or equal to Notary CLI's version to ensure feature compatibility (ex: CLI version 0.2, server/signer version >= 0.2), and all official releases are associated with GitHub tags. -To use the Notary CLI with Docker hub images, please have a look at our +To use the Notary CLI with Docker hub images, have a look at Notary's [getting started docs](docs/getting_started.md). -For more advanced usage, please see the +For more advanced usage, see the [advanced usage docs](docs/advanced_usage.md). To use the CLI against a local Notary server rather than against Docker Hub: -1. Please ensure that you have [docker and docker-compose](http://docs.docker.com/compose/install/) installed. -1. `git clone https://github.com/docker/notary.git` and from the cloned repository path, +1. Ensure that you have [docker and docker-compose](http://docs.docker.com/compose/install/) installed. +1. `git clone https://github.com/theupdateframework/notary.git` and from the cloned repository path, start up a local Notary server and signer and copy the config file and testing certs to your - local notary config directory: + local Notary config directory: ```sh $ docker-compose build @@ -78,28 +91,31 @@ to use `notary` with Docker images. ## Building Notary -Note that our [latest stable release](https://github.com/docker/notary/releases) is at the head of the -[releases branch](https://github.com/docker/notary/tree/releases). The master branch is the development +Note that Notary's [latest stable release](https://github.com/theupdateframework/notary/releases) is at the head of the +[releases branch](https://github.com/theupdateframework/notary/tree/releases). The master branch is the development branch and contains features for the next release. Prerequisites: - Go >= 1.7.1 -- [godep](https://github.com/tools/godep) installed + - Fedora: `dnf install golang` - libtool development headers installed - Ubuntu: `apt-get install libltdl-dev` - CentOS/RedHat: `yum install libtool-ltdl-devel` + - Fedora: `dnf install libtool-ltdl-devel` - Mac OS ([Homebrew](http://brew.sh/)): `brew install libtool` -Run `make client`, which creates the Notary Client CLI binary at `bin/notary`. -Note that `make client` assumes a standard Go directory structure, in which -Notary is checked out to the `src` directory in your `GOPATH`. For example: -``` -$GOPATH/ - src/ - github.com/ - docker/ - notary/ +Set [```GOPATH```](https://golang.org/doc/code.html#GOPATH). Then, run: + +```bash +$ go get github.com/theupdateframework/notary +# build with pcks11 support by default to support yubikey +$ go install -tags pkcs11 github.com/theupdateframework/notary/cmd/notary +$ notary ``` -To build the server and signer, please run `docker-compose build`. \ No newline at end of file +To build the server and signer, run `docker-compose build`. + + +## License +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Ftheupdateframework%2Fnotary.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Ftheupdateframework%2Fnotary?ref=badge_large) diff --git a/src/vendor/github.com/docker/notary/ROADMAP.md b/src/vendor/github.com/theupdateframework/notary/ROADMAP.md similarity index 100% rename from src/vendor/github.com/docker/notary/ROADMAP.md rename to src/vendor/github.com/theupdateframework/notary/ROADMAP.md diff --git a/src/vendor/github.com/docker/notary/circle.yml b/src/vendor/github.com/theupdateframework/notary/circle.yml similarity index 100% rename from src/vendor/github.com/docker/notary/circle.yml rename to src/vendor/github.com/theupdateframework/notary/circle.yml diff --git a/src/vendor/github.com/docker/notary/client/changelist/change.go b/src/vendor/github.com/theupdateframework/notary/client/changelist/change.go similarity index 94% rename from src/vendor/github.com/docker/notary/client/changelist/change.go rename to src/vendor/github.com/theupdateframework/notary/client/changelist/change.go index f9fa552d0..8242ec003 100644 --- a/src/vendor/github.com/docker/notary/client/changelist/change.go +++ b/src/vendor/github.com/theupdateframework/notary/client/changelist/change.go @@ -1,7 +1,7 @@ package changelist import ( - "github.com/docker/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/data" ) // Scopes for TUFChanges are simply the TUF roles. @@ -82,8 +82,8 @@ func (c TUFChange) Content() []byte { // unexpected race conditions between humans modifying the same delegation type TUFDelegation struct { NewName data.RoleName `json:"new_name,omitempty"` - NewThreshold int `json:"threshold, omitempty"` - AddKeys data.KeyList `json:"add_keys, omitempty"` + NewThreshold int `json:"threshold,omitempty"` + AddKeys data.KeyList `json:"add_keys,omitempty"` RemoveKeys []string `json:"remove_keys,omitempty"` AddPaths []string `json:"add_paths,omitempty"` RemovePaths []string `json:"remove_paths,omitempty"` diff --git a/src/vendor/github.com/docker/notary/client/changelist/changelist.go b/src/vendor/github.com/theupdateframework/notary/client/changelist/changelist.go similarity index 100% rename from src/vendor/github.com/docker/notary/client/changelist/changelist.go rename to src/vendor/github.com/theupdateframework/notary/client/changelist/changelist.go diff --git a/src/vendor/github.com/docker/notary/client/changelist/file_changelist.go b/src/vendor/github.com/theupdateframework/notary/client/changelist/file_changelist.go similarity index 99% rename from src/vendor/github.com/docker/notary/client/changelist/file_changelist.go rename to src/vendor/github.com/theupdateframework/notary/client/changelist/file_changelist.go index 7e128a194..ab1b200e2 100644 --- a/src/vendor/github.com/docker/notary/client/changelist/file_changelist.go +++ b/src/vendor/github.com/theupdateframework/notary/client/changelist/file_changelist.go @@ -5,12 +5,12 @@ import ( "fmt" "io/ioutil" "os" + "path/filepath" "sort" "time" - "github.com/Sirupsen/logrus" "github.com/docker/distribution/uuid" - "path/filepath" + "github.com/sirupsen/logrus" ) // FileChangelist stores all the changes as files diff --git a/src/vendor/github.com/docker/notary/client/changelist/interface.go b/src/vendor/github.com/theupdateframework/notary/client/changelist/interface.go similarity index 97% rename from src/vendor/github.com/docker/notary/client/changelist/interface.go rename to src/vendor/github.com/theupdateframework/notary/client/changelist/interface.go index 70dc0a2d0..e8fb82477 100644 --- a/src/vendor/github.com/docker/notary/client/changelist/interface.go +++ b/src/vendor/github.com/theupdateframework/notary/client/changelist/interface.go @@ -1,6 +1,6 @@ package changelist -import "github.com/docker/notary/tuf/data" +import "github.com/theupdateframework/notary/tuf/data" // Changelist is the interface for all TUF change lists type Changelist interface { diff --git a/src/vendor/github.com/docker/notary/client/client.go b/src/vendor/github.com/theupdateframework/notary/client/client.go similarity index 76% rename from src/vendor/github.com/docker/notary/client/client.go rename to src/vendor/github.com/theupdateframework/notary/client/client.go index 2a27e32a0..efdc6ce4a 100644 --- a/src/vendor/github.com/docker/notary/client/client.go +++ b/src/vendor/github.com/theupdateframework/notary/client/client.go @@ -1,3 +1,4 @@ +//Package client implements everything required for interacting with a Notary repository. package client import ( @@ -12,16 +13,17 @@ import ( "regexp" "time" - "github.com/Sirupsen/logrus" - "github.com/docker/notary" - "github.com/docker/notary/client/changelist" - "github.com/docker/notary/cryptoservice" - store "github.com/docker/notary/storage" - "github.com/docker/notary/trustpinning" - "github.com/docker/notary/tuf" - "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/signed" - "github.com/docker/notary/tuf/utils" + canonicaljson "github.com/docker/go/canonical/json" + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/client/changelist" + "github.com/theupdateframework/notary/cryptoservice" + store "github.com/theupdateframework/notary/storage" + "github.com/theupdateframework/notary/trustpinning" + "github.com/theupdateframework/notary/tuf" + "github.com/theupdateframework/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/signed" + "github.com/theupdateframework/notary/tuf/utils" ) const ( @@ -35,16 +37,15 @@ func init() { data.SetDefaultExpiryTimes(data.NotaryDefaultExpiries) } -// NotaryRepository stores all the information needed to operate on a notary -// repository. -type NotaryRepository struct { +// repository stores all the information needed to operate on a notary repository. +type repository struct { baseDir string gun data.GUN baseURL string changelist changelist.Changelist cache store.MetadataStore remoteStore store.RemoteStore - CryptoService signed.CryptoService + cryptoService signed.CryptoService tufRepo *tuf.Repo invalid *tuf.Repo // known data that was parsable but deemed invalid roundTrip http.RoundTripper @@ -52,13 +53,14 @@ type NotaryRepository struct { LegacyVersions int // number of versions back to fetch roots to sign with } -// NewFileCachedNotaryRepository is a wrapper for NewNotaryRepository that initializes +// NewFileCachedRepository is a wrapper for NewRepository that initializes // a file cache from the provided repository, local config information and a crypto service. // It also retrieves the remote store associated to the base directory under where all the // trust files will be stored and the specified GUN. -func NewFileCachedNotaryRepository(baseDir string, gun data.GUN, baseURL string, rt http.RoundTripper, - retriever notary.PassRetriever, trustPinning trustpinning.TrustPinConfig) ( - *NotaryRepository, error) { +// +// In case of a nil RoundTripper, a default offline store is used instead. +func NewFileCachedRepository(baseDir string, gun data.GUN, baseURL string, rt http.RoundTripper, + retriever notary.PassRetriever, trustPinning trustpinning.TrustPinConfig) (Repository, error) { cache, err := store.NewFileStore( filepath.Join(baseDir, tufDir, filepath.FromSlash(gun.String()), "metadata"), @@ -88,31 +90,35 @@ func NewFileCachedNotaryRepository(baseDir string, gun data.GUN, baseURL string, return nil, err } - return NewNotaryRepository(baseDir, gun, baseURL, remoteStore, cache, trustPinning, cryptoService, cl) + return NewRepository(baseDir, gun, baseURL, remoteStore, cache, trustPinning, cryptoService, cl) } -// NewNotaryRepository is the base method that returns a new notary repository. +// NewRepository is the base method that returns a new notary repository. // It takes the base directory under where all the trust files will be stored // (This is normally defaults to "~/.notary" or "~/.docker/trust" when enabling // docker content trust). -// It expects an initialized remote store and cache. -func NewNotaryRepository(baseDir string, gun data.GUN, baseURL string, remoteStore store.RemoteStore, cache store.MetadataStore, - trustPinning trustpinning.TrustPinConfig, cryptoService signed.CryptoService, cl changelist.Changelist) ( - *NotaryRepository, error) { +// It expects an initialized cache. In case of a nil remote store, a default +// offline store is used. +func NewRepository(baseDir string, gun data.GUN, baseURL string, remoteStore store.RemoteStore, cache store.MetadataStore, + trustPinning trustpinning.TrustPinConfig, cryptoService signed.CryptoService, cl changelist.Changelist) (Repository, error) { // Repo's remote store is either a valid remote store or an OfflineStore if remoteStore == nil { remoteStore = store.OfflineStore{} } - nRepo := &NotaryRepository{ + if cache == nil { + return nil, fmt.Errorf("got an invalid cache (nil metadata store)") + } + + nRepo := &repository{ gun: gun, baseURL: baseURL, baseDir: baseDir, changelist: cl, cache: cache, remoteStore: remoteStore, - CryptoService: cryptoService, + cryptoService: cryptoService, trustPinning: trustPinning, LegacyVersions: 0, // By default, don't sign with legacy roles } @@ -120,12 +126,18 @@ func NewNotaryRepository(baseDir string, gun data.GUN, baseURL string, remoteSto return nRepo, nil } +// GetGUN is a getter for the GUN object from a Repository +func (r *repository) GetGUN() data.GUN { + return r.gun +} + // Target represents a simplified version of the data TUF operates on, so external // applications don't have to depend on TUF data types. type Target struct { - Name string // the name of the target - Hashes data.Hashes // the hash of the target - Length int64 // the size in bytes of the target + Name string // the name of the target + Hashes data.Hashes // the hash of the target + Length int64 // the size in bytes of the target + Custom *canonicaljson.RawMessage // the custom data provided to describe the file at TARGETPATH } // TargetWithRole represents a Target that exists in a particular role - this is @@ -136,7 +148,7 @@ type TargetWithRole struct { } // NewTarget is a helper method that returns a Target -func NewTarget(targetName string, targetPath string) (*Target, error) { +func NewTarget(targetName, targetPath string, targetCustom *canonicaljson.RawMessage) (*Target, error) { b, err := ioutil.ReadFile(targetPath) if err != nil { return nil, err @@ -147,9 +159,10 @@ func NewTarget(targetName string, targetPath string) (*Target, error) { return nil, err } - return &Target{Name: targetName, Hashes: meta.Hashes, Length: meta.Length}, nil + return &Target{Name: targetName, Hashes: meta.Hashes, Length: meta.Length, Custom: targetCustom}, nil } +// rootCertKey generates the corresponding certificate for the private key given the privKey and repo's GUN func rootCertKey(gun data.GUN, privKey data.PrivateKey) (data.PublicKey, error) { // Hard-coded policy: the generated certificate expires in 10 years. startTime := time.Now() @@ -161,27 +174,19 @@ func rootCertKey(gun data.GUN, privKey data.PrivateKey) (data.PublicKey, error) x509PublicKey := utils.CertToKey(cert) if x509PublicKey == nil { - return nil, fmt.Errorf( - "cannot use regenerated certificate: format %s", cert.PublicKeyAlgorithm) + return nil, fmt.Errorf("cannot generate public key from private key with id: %v and algorithm: %v", privKey.ID(), privKey.Algorithm()) } return x509PublicKey, nil } -// Initialize creates a new repository by using rootKey as the root Key for the -// TUF repository. The server must be reachable (and is asked to generate a -// timestamp key and possibly other serverManagedRoles), but the created repository -// result is only stored on local disk, not published to the server. To do that, -// use r.Publish() eventually. -func (r *NotaryRepository) Initialize(rootKeyIDs []string, serverManagedRoles ...data.RoleName) error { - privKeys := make([]data.PrivateKey, 0, len(rootKeyIDs)) - for _, keyID := range rootKeyIDs { - privKey, _, err := r.CryptoService.GetPrivateKey(keyID) - if err != nil { - return err - } - privKeys = append(privKeys, privKey) - } +// GetCryptoService is the getter for the repository's CryptoService +func (r *repository) GetCryptoService() signed.CryptoService { + return r.cryptoService +} + +// initialize initializes the notary repository with a set of rootkeys, root certificates and roles. +func (r *repository) initialize(rootKeyIDs []string, rootCerts []data.PublicKey, serverManagedRoles ...data.RoleName) error { // currently we only support server managing timestamps and snapshots, and // nothing else - timestamps are always managed by the server, and implicit @@ -210,17 +215,21 @@ func (r *NotaryRepository) Initialize(rootKeyIDs []string, serverManagedRoles .. } } - rootKeys := make([]data.PublicKey, 0, len(privKeys)) - for _, privKey := range privKeys { - rootKey, err := rootCertKey(r.gun, privKey) - if err != nil { - return err - } - rootKeys = append(rootKeys, rootKey) + // gets valid public keys corresponding to the rootKeyIDs or generate if necessary + var publicKeys []data.PublicKey + var err error + if len(rootCerts) == 0 { + publicKeys, err = r.createNewPublicKeyFromKeyIDs(rootKeyIDs) + } else { + publicKeys, err = r.publicKeysOfKeyIDs(rootKeyIDs, rootCerts) + } + if err != nil { + return err } + //initialize repo with public keys rootRole, targetsRole, snapshotRole, timestampRole, err := r.initializeRoles( - rootKeys, + publicKeys, locallyManagedKeys, remotelyManagedKeys, ) @@ -228,7 +237,7 @@ func (r *NotaryRepository) Initialize(rootKeyIDs []string, serverManagedRoles .. return err } - r.tufRepo = tuf.NewRepo(r.CryptoService) + r.tufRepo = tuf.NewRepo(r.GetCryptoService()) if err := r.tufRepo.InitRoot( rootRole, @@ -252,9 +261,113 @@ func (r *NotaryRepository) Initialize(rootKeyIDs []string, serverManagedRoles .. return r.saveMetadata(serverManagesSnapshot) } -func (r *NotaryRepository) initializeRoles(rootKeys []data.PublicKey, localRoles, remoteRoles []data.RoleName) ( - root, targets, snapshot, timestamp data.BaseRole, err error) { +// createNewPublicKeyFromKeyIDs generates a set of public keys corresponding to the given list of +// key IDs existing in the repository's CryptoService. +// the public keys returned are ordered to correspond to the keyIDs +func (r *repository) createNewPublicKeyFromKeyIDs(keyIDs []string) ([]data.PublicKey, error) { + publicKeys := []data.PublicKey{} + privKeys, err := getAllPrivKeys(keyIDs, r.GetCryptoService()) + if err != nil { + return nil, err + } + + for _, privKey := range privKeys { + rootKey, err := rootCertKey(r.gun, privKey) + if err != nil { + return nil, err + } + publicKeys = append(publicKeys, rootKey) + } + return publicKeys, nil +} + +// publicKeysOfKeyIDs confirms that the public key and private keys (by Key IDs) forms valid, strictly ordered key pairs +// (eg. keyIDs[0] must match pubKeys[0] and keyIDs[1] must match certs[1] and so on). +// Or throw error when they mismatch. +func (r *repository) publicKeysOfKeyIDs(keyIDs []string, pubKeys []data.PublicKey) ([]data.PublicKey, error) { + if len(keyIDs) != len(pubKeys) { + err := fmt.Errorf("require matching number of keyIDs and public keys but got %d IDs and %d public keys", len(keyIDs), len(pubKeys)) + return nil, err + } + + if err := matchKeyIdsWithPubKeys(r, keyIDs, pubKeys); err != nil { + return nil, fmt.Errorf("could not obtain public key from IDs: %v", err) + } + return pubKeys, nil +} + +// matchKeyIdsWithPubKeys validates that the private keys (represented by their IDs) and the public keys +// forms matching key pairs +func matchKeyIdsWithPubKeys(r *repository, ids []string, pubKeys []data.PublicKey) error { + for i := 0; i < len(ids); i++ { + privKey, _, err := r.GetCryptoService().GetPrivateKey(ids[i]) + if err != nil { + return fmt.Errorf("could not get the private key matching id %v: %v", ids[i], err) + } + + pubKey := pubKeys[i] + err = signed.VerifyPublicKeyMatchesPrivateKey(privKey, pubKey) + if err != nil { + return err + } + } + return nil +} + +// Initialize creates a new repository by using rootKey as the root Key for the +// TUF repository. The server must be reachable (and is asked to generate a +// timestamp key and possibly other serverManagedRoles), but the created repository +// result is only stored on local disk, not published to the server. To do that, +// use r.Publish() eventually. +func (r *repository) Initialize(rootKeyIDs []string, serverManagedRoles ...data.RoleName) error { + return r.initialize(rootKeyIDs, nil, serverManagedRoles...) +} + +type errKeyNotFound struct{} + +func (errKeyNotFound) Error() string { + return fmt.Sprintf("cannot find matching private key id") +} + +// keyExistsInList returns the id of the private key in ids that matches the public key +// otherwise return empty string +func keyExistsInList(cert data.PublicKey, ids map[string]bool) error { + pubKeyID, err := utils.CanonicalKeyID(cert) + if err != nil { + return fmt.Errorf("failed to obtain the public key id from the given certificate: %v", err) + } + if _, ok := ids[pubKeyID]; ok { + return nil + } + return errKeyNotFound{} +} + +// InitializeWithCertificate initializes the repository with root keys and their corresponding certificates +func (r *repository) InitializeWithCertificate(rootKeyIDs []string, rootCerts []data.PublicKey, + serverManagedRoles ...data.RoleName) error { + + // If we explicitly pass in certificate(s) but not key, then look keys up using certificate + if len(rootKeyIDs) == 0 && len(rootCerts) != 0 { + rootKeyIDs = []string{} + availableRootKeyIDs := make(map[string]bool) + for _, k := range r.GetCryptoService().ListKeys(data.CanonicalRootRole) { + availableRootKeyIDs[k] = true + } + + for _, cert := range rootCerts { + if err := keyExistsInList(cert, availableRootKeyIDs); err != nil { + return fmt.Errorf("error initializing repository with certificate: %v", err) + } + keyID, _ := utils.CanonicalKeyID(cert) + rootKeyIDs = append(rootKeyIDs, keyID) + } + } + return r.initialize(rootKeyIDs, rootCerts, serverManagedRoles...) +} + +func (r *repository) initializeRoles(rootKeys []data.PublicKey, localRoles, remoteRoles []data.RoleName) ( + root, targets, snapshot, timestamp data.BaseRole, err error) { root = data.NewBaseRole( data.CanonicalRootRole, notary.MinThreshold, @@ -266,7 +379,7 @@ func (r *NotaryRepository) initializeRoles(rootKeys []data.PublicKey, localRoles for _, role := range localRoles { // This is currently hardcoding the keys to ECDSA. var key data.PublicKey - key, err = r.CryptoService.Create(role, r.gun, data.ECDSAKey) + key, err = r.GetCryptoService().Create(role, r.gun, data.ECDSAKey) if err != nil { return } @@ -352,14 +465,13 @@ func addChange(cl changelist.Changelist, c changelist.Change, roles ...data.Role // AddTarget creates new changelist entries to add a target to the given roles // in the repository when the changelist gets applied at publish time. // If roles are unspecified, the default role is "targets" -func (r *NotaryRepository) AddTarget(target *Target, roles ...data.RoleName) error { - +func (r *repository) AddTarget(target *Target, roles ...data.RoleName) error { if len(target.Hashes) == 0 { return fmt.Errorf("no hashes specified for target \"%s\"", target.Name) } logrus.Debugf("Adding target \"%s\" with sha256 \"%x\" and size %d bytes.\n", target.Name, target.Hashes["sha256"], target.Length) - meta := data.FileMeta{Length: target.Length, Hashes: target.Hashes} + meta := data.FileMeta{Length: target.Length, Hashes: target.Hashes, Custom: target.Custom} metaJSON, err := json.Marshal(meta) if err != nil { return err @@ -374,7 +486,7 @@ func (r *NotaryRepository) AddTarget(target *Target, roles ...data.RoleName) err // RemoveTarget creates new changelist entries to remove a target from the given // roles in the repository when the changelist gets applied at publish time. // If roles are unspecified, the default role is "target". -func (r *NotaryRepository) RemoveTarget(targetName string, roles ...data.RoleName) error { +func (r *repository) RemoveTarget(targetName string, roles ...data.RoleName) error { logrus.Debugf("Removing target \"%s\"", targetName) template := changelist.NewTUFChange(changelist.ActionDelete, "", changelist.TypeTargetsTarget, targetName, nil) @@ -389,7 +501,7 @@ func (r *NotaryRepository) RemoveTarget(targetName string, roles ...data.RoleNam // its entries will be strictly shadowed by those in other parts of the "targets/a" // subtree and also the "targets/x" subtree, as we will defer parsing it until // we explicitly reach it in our iteration of the provided list of roles. -func (r *NotaryRepository) ListTargets(roles ...data.RoleName) ([]*TargetWithRole, error) { +func (r *repository) ListTargets(roles ...data.RoleName) ([]*TargetWithRole, error) { if err := r.Update(false); err != nil { return nil, err } @@ -416,6 +528,7 @@ func (r *NotaryRepository) ListTargets(roles ...data.RoleName) ([]*TargetWithRol Name: targetName, Hashes: targetMeta.Hashes, Length: targetMeta.Length, + Custom: targetMeta.Custom, }, Role: validRole.Name, } @@ -441,7 +554,7 @@ func (r *NotaryRepository) ListTargets(roles ...data.RoleName) ([]*TargetWithRol // the target entry found in the subtree of the highest priority role // will be returned. // See the IMPORTANT section on ListTargets above. Those roles also apply here. -func (r *NotaryRepository) GetTargetByName(name string, roles ...data.RoleName) (*TargetWithRole, error) { +func (r *repository) GetTargetByName(name string, roles ...data.RoleName) (*TargetWithRole, error) { if err := r.Update(false); err != nil { return nil, err } @@ -471,10 +584,10 @@ func (r *NotaryRepository) GetTargetByName(name string, roles ...data.RoleName) } // Check that we didn't error, and that we assigned to our target if err := r.tufRepo.WalkTargets(name, role, getTargetVisitorFunc, skipRoles...); err == nil && foundTarget { - return &TargetWithRole{Target: Target{Name: name, Hashes: resultMeta.Hashes, Length: resultMeta.Length}, Role: resultRoleName}, nil + return &TargetWithRole{Target: Target{Name: name, Hashes: resultMeta.Hashes, Length: resultMeta.Length, Custom: resultMeta.Custom}, Role: resultRoleName}, nil } } - return nil, fmt.Errorf("No trust data for %s", name) + return nil, ErrNoSuchTarget(name) } @@ -485,10 +598,17 @@ type TargetSignedStruct struct { Signatures []data.Signature } +//ErrNoSuchTarget is returned when no valid trust data is found. +type ErrNoSuchTarget string + +func (f ErrNoSuchTarget) Error() string { + return fmt.Sprintf("No valid trust data for %s", string(f)) +} + // GetAllTargetMetadataByName searches the entire delegation role tree to find the specified target by name for all // roles, and returns a list of TargetSignedStructs for each time it finds the specified target. // If given an empty string for a target name, it will return back all targets signed into the repository in every role -func (r *NotaryRepository) GetAllTargetMetadataByName(name string) ([]TargetSignedStruct, error) { +func (r *repository) GetAllTargetMetadataByName(name string) ([]TargetSignedStruct, error) { if err := r.Update(false); err != nil { return nil, err } @@ -515,7 +635,7 @@ func (r *NotaryRepository) GetAllTargetMetadataByName(name string) ([]TargetSign for targetName, resultMeta := range targetMetaToAdd { targetInfo := TargetSignedStruct{ Role: validRole, - Target: Target{Name: targetName, Hashes: resultMeta.Hashes, Length: resultMeta.Length}, + Target: Target{Name: targetName, Hashes: resultMeta.Hashes, Length: resultMeta.Length, Custom: resultMeta.Custom}, Signatures: tgt.Signatures, } targetInfoList = append(targetInfoList, targetInfo) @@ -529,19 +649,19 @@ func (r *NotaryRepository) GetAllTargetMetadataByName(name string) ([]TargetSign return nil, err } if len(targetInfoList) == 0 { - return nil, fmt.Errorf("No valid trust data for %s", name) + return nil, ErrNoSuchTarget(name) } return targetInfoList, nil } // GetChangelist returns the list of the repository's unpublished changes -func (r *NotaryRepository) GetChangelist() (changelist.Changelist, error) { +func (r *repository) GetChangelist() (changelist.Changelist, error) { return r.changelist, nil } // getRemoteStore returns the remoteStore of a repository if valid or // or an OfflineStore otherwise -func (r *NotaryRepository) getRemoteStore() store.RemoteStore { +func (r *repository) getRemoteStore() store.RemoteStore { if r.remoteStore != nil { return r.remoteStore } @@ -559,7 +679,7 @@ type RoleWithSignatures struct { // ListRoles returns a list of RoleWithSignatures objects for this repo // This represents the latest metadata for each role in this repo -func (r *NotaryRepository) ListRoles() ([]RoleWithSignatures, error) { +func (r *repository) ListRoles() ([]RoleWithSignatures, error) { // Update to latest repo state if err := r.Update(false); err != nil { return nil, err @@ -598,7 +718,7 @@ func (r *NotaryRepository) ListRoles() ([]RoleWithSignatures, error) { // Publish pushes the local changes in signed material to the remote notary-server // Conceptually it performs an operation similar to a `git rebase` -func (r *NotaryRepository) Publish() error { +func (r *repository) Publish() error { if err := r.publish(r.changelist); err != nil { return err } @@ -613,22 +733,24 @@ func (r *NotaryRepository) Publish() error { // publish pushes the changes in the given changelist to the remote notary-server // Conceptually it performs an operation similar to a `git rebase` -func (r *NotaryRepository) publish(cl changelist.Changelist) error { +func (r *repository) publish(cl changelist.Changelist) error { var initialPublish bool // update first before publishing if err := r.Update(true); err != nil { // If the remote is not aware of the repo, then this is being published - // for the first time. Try to load from disk instead for publishing. + // for the first time. Try to initialize the repository before publishing. if _, ok := err.(ErrRepositoryNotExist); ok { err := r.bootstrapRepo() + if _, ok := err.(store.ErrMetaNotFound); ok { + logrus.Infof("No TUF data found locally or remotely - initializing repository %s for the first time", r.gun.String()) + err = r.Initialize(nil) + } + if err != nil { - logrus.Debugf("Unable to load repository from local files: %s", - err.Error()) - if _, ok := err.(store.ErrMetaNotFound); ok { - return ErrRepoNotInitialized{} - } + logrus.WithError(err).Debugf("Unable to load or initialize repository during first publish: %s", err.Error()) return err } + // Ensure we will push the initial root and targets file. Either or // both of the root and targets may not be marked as Dirty, since // there may not be any changes that update them, so use a @@ -718,7 +840,7 @@ func signRootIfNecessary(updates map[data.RoleName][]byte, repo *tuf.Repo, extra // Fetch back a `legacyVersions` number of roots files, collect the root public keys // This includes old `root` roles as well as legacy versioned root roles, e.g. `1.root` -func (r *NotaryRepository) oldKeysForLegacyClientSupport(legacyVersions int, initialPublish bool) (data.KeyList, error) { +func (r *repository) oldKeysForLegacyClientSupport(legacyVersions int, initialPublish bool) (data.KeyList, error) { if initialPublish { return nil, nil } @@ -808,8 +930,8 @@ func signTargets(updates map[data.RoleName][]byte, repo *tuf.Repo, initialPublis // r.tufRepo. This attempts to load metadata for all roles. Since server // snapshots are supported, if the snapshot metadata fails to load, that's ok. // This assumes that bootstrapRepo is only used by Publish() or RotateKey() -func (r *NotaryRepository) bootstrapRepo() error { - b := tuf.NewRepoBuilder(r.gun, r.CryptoService, r.trustPinning) +func (r *repository) bootstrapRepo() error { + b := tuf.NewRepoBuilder(r.gun, r.GetCryptoService(), r.trustPinning) logrus.Debugf("Loading trusted collection.") @@ -839,7 +961,7 @@ func (r *NotaryRepository) bootstrapRepo() error { // saveMetadata saves contents of r.tufRepo onto the local disk, creating // signatures as necessary, possibly prompting for passphrases. -func (r *NotaryRepository) saveMetadata(ignoreSnapshot bool) error { +func (r *repository) saveMetadata(ignoreSnapshot bool) error { logrus.Debugf("Saving changes to Trusted Collection.") rootJSON, err := serializeCanonicalRole(r.tufRepo, data.CanonicalRootRole, nil) @@ -883,7 +1005,7 @@ func (r *NotaryRepository) saveMetadata(ignoreSnapshot bool) error { // returns a properly constructed ErrRepositoryNotExist error based on this // repo's information -func (r *NotaryRepository) errRepositoryNotExist() error { +func (r *repository) errRepositoryNotExist() error { host := r.baseURL parsed, err := url.Parse(r.baseURL) if err == nil { @@ -894,7 +1016,7 @@ func (r *NotaryRepository) errRepositoryNotExist() error { // Update bootstraps a trust anchor (root.json) before updating all the // metadata from the repo. -func (r *NotaryRepository) Update(forWrite bool) error { +func (r *repository) Update(forWrite bool) error { c, err := r.bootstrapClient(forWrite) if err != nil { if _, ok := err.(store.ErrMetaNotFound); ok { @@ -940,14 +1062,14 @@ func (r *NotaryRepository) Update(forWrite bool) error { // // Returns a TUFClient for the remote server, which may not be actually // operational (if the URL is invalid but a root.json is cached). -func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*TUFClient, error) { +func (r *repository) bootstrapClient(checkInitialized bool) (*tufClient, error) { minVersion := 1 // the old root on disk should not be validated against any trust pinning configuration // because if we have an old root, it itself is the thing that pins trust - oldBuilder := tuf.NewRepoBuilder(r.gun, r.CryptoService, trustpinning.TrustPinConfig{}) + oldBuilder := tuf.NewRepoBuilder(r.gun, r.GetCryptoService(), trustpinning.TrustPinConfig{}) // by default, we want to use the trust pinning configuration on any new root that we download - newBuilder := tuf.NewRepoBuilder(r.gun, r.CryptoService, r.trustPinning) + newBuilder := tuf.NewRepoBuilder(r.gun, r.GetCryptoService(), r.trustPinning) // Try to read root from cache first. We will trust this root until we detect a problem // during update which will cause us to download a new root and perform a rotation. @@ -961,7 +1083,7 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*TUFClient, e // again, the root on disk is the source of trust pinning, so use an empty trust // pinning configuration - newBuilder = tuf.NewRepoBuilder(r.gun, r.CryptoService, trustpinning.TrustPinConfig{}) + newBuilder = tuf.NewRepoBuilder(r.gun, r.GetCryptoService(), trustpinning.TrustPinConfig{}) if err := newBuilder.Load(data.CanonicalRootRole, rootJSON, minVersion, false); err != nil { // Ok, the old root is expired - we want to download a new one. But we want to use the @@ -1007,7 +1129,7 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*TUFClient, e return nil, ErrRepoNotInitialized{} } - return NewTUFClient(oldBuilder, newBuilder, remote, r.cache), nil + return newTufClient(oldBuilder, newBuilder, remote, r.cache), nil } // RotateKey removes all existing keys associated with the role. If no keys are @@ -1015,7 +1137,7 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*TUFClient, e // managing the key to the server. If key(s) are specified by keyList, then they are // used for signing the role. // These changes are staged in a changelist until publish is called. -func (r *NotaryRepository) RotateKey(role data.RoleName, serverManagesKey bool, keyList []string) error { +func (r *repository) RotateKey(role data.RoleName, serverManagesKey bool, keyList []string) error { if err := checkRotationInput(role, serverManagesKey); err != nil { return err } @@ -1033,7 +1155,7 @@ func (r *NotaryRepository) RotateKey(role data.RoleName, serverManagesKey bool, } // Given a set of new keys to rotate to and a set of keys to drop, returns the list of current keys to use -func (r *NotaryRepository) pubKeyListForRotation(role data.RoleName, serverManaged bool, newKeys []string) (pubKeyList data.KeyList, err error) { +func (r *repository) pubKeyListForRotation(role data.RoleName, serverManaged bool, newKeys []string) (pubKeyList data.KeyList, err error) { var pubKey data.PublicKey // If server manages the key being rotated, request a rotation and return the new key @@ -1051,7 +1173,7 @@ func (r *NotaryRepository) pubKeyListForRotation(role data.RoleName, serverManag // If no new keys are passed in, we generate one if len(newKeys) == 0 { pubKeyList = make(data.KeyList, 0, 1) - pubKey, err = r.CryptoService.Create(role, r.gun, data.ECDSAKey) + pubKey, err = r.GetCryptoService().Create(role, r.gun, data.ECDSAKey) pubKeyList = append(pubKeyList, pubKey) } if err != nil { @@ -1062,7 +1184,7 @@ func (r *NotaryRepository) pubKeyListForRotation(role data.RoleName, serverManag if len(newKeys) > 0 { pubKeyList = make(data.KeyList, 0, len(newKeys)) for _, keyID := range newKeys { - pubKey = r.CryptoService.GetKey(keyID) + pubKey = r.GetCryptoService().GetKey(keyID) if pubKey == nil { return nil, fmt.Errorf("unable to find key: %s", keyID) } @@ -1078,14 +1200,14 @@ func (r *NotaryRepository) pubKeyListForRotation(role data.RoleName, serverManag return pubKeyList, nil } -func (r *NotaryRepository) pubKeysToCerts(role data.RoleName, pubKeyList data.KeyList) (data.KeyList, error) { +func (r *repository) pubKeysToCerts(role data.RoleName, pubKeyList data.KeyList) (data.KeyList, error) { // only generate certs for root keys if role != data.CanonicalRootRole { return pubKeyList, nil } for i, pubKey := range pubKeyList { - privKey, loadedRole, err := r.CryptoService.GetPrivateKey(pubKey.ID()) + privKey, loadedRole, err := r.GetCryptoService().GetPrivateKey(pubKey.ID()) if err != nil { return nil, err } @@ -1119,7 +1241,7 @@ func checkRotationInput(role data.RoleName, serverManaged bool) error { return nil } -func (r *NotaryRepository) rootFileKeyChange(cl changelist.Changelist, role data.RoleName, action string, keyList []data.PublicKey) error { +func (r *repository) rootFileKeyChange(cl changelist.Changelist, role data.RoleName, action string, keyList []data.PublicKey) error { meta := changelist.TUFRootData{ RoleName: role, Keys: keyList, @@ -1147,7 +1269,7 @@ func DeleteTrustData(baseDir string, gun data.GUN, URL string, rt http.RoundTrip if err := os.RemoveAll(localRepo); err != nil { return fmt.Errorf("error clearing TUF repo data: %v", err) } - // Note that this will require admin permission in this NotaryRepository's roundtripper + // Note that this will require admin permission for the gun in the roundtripper if deleteRemote { remote, err := getRemoteStore(URL, gun, rt) if err != nil { @@ -1160,3 +1282,9 @@ func DeleteTrustData(baseDir string, gun data.GUN, URL string, rt http.RoundTrip } return nil } + +// SetLegacyVersions allows the number of legacy versions of the root +// to be inspected for old signing keys to be configured. +func (r *repository) SetLegacyVersions(n int) { + r.LegacyVersions = n +} diff --git a/src/vendor/github.com/docker/notary/client/delegations.go b/src/vendor/github.com/theupdateframework/notary/client/delegations.go similarity index 87% rename from src/vendor/github.com/docker/notary/client/delegations.go rename to src/vendor/github.com/theupdateframework/notary/client/delegations.go index d32c558fc..289654e22 100644 --- a/src/vendor/github.com/docker/notary/client/delegations.go +++ b/src/vendor/github.com/theupdateframework/notary/client/delegations.go @@ -4,17 +4,17 @@ import ( "encoding/json" "fmt" - "github.com/Sirupsen/logrus" - "github.com/docker/notary" - "github.com/docker/notary/client/changelist" - store "github.com/docker/notary/storage" - "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/utils" + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/client/changelist" + store "github.com/theupdateframework/notary/storage" + "github.com/theupdateframework/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/utils" ) // AddDelegation creates changelist entries to add provided delegation public keys and paths. // This method composes AddDelegationRoleAndKeys and AddDelegationPaths (each creates one changelist if called). -func (r *NotaryRepository) AddDelegation(name data.RoleName, delegationKeys []data.PublicKey, paths []string) error { +func (r *repository) AddDelegation(name data.RoleName, delegationKeys []data.PublicKey, paths []string) error { if len(delegationKeys) > 0 { err := r.AddDelegationRoleAndKeys(name, delegationKeys) if err != nil { @@ -33,7 +33,7 @@ func (r *NotaryRepository) AddDelegation(name data.RoleName, delegationKeys []da // AddDelegationRoleAndKeys creates a changelist entry to add provided delegation public keys. // This method is the simplest way to create a new delegation, because the delegation must have at least // one key upon creation to be valid since we will reject the changelist while validating the threshold. -func (r *NotaryRepository) AddDelegationRoleAndKeys(name data.RoleName, delegationKeys []data.PublicKey) error { +func (r *repository) AddDelegationRoleAndKeys(name data.RoleName, delegationKeys []data.PublicKey) error { if !data.IsDelegation(name) { return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"} @@ -57,7 +57,7 @@ func (r *NotaryRepository) AddDelegationRoleAndKeys(name data.RoleName, delegati // AddDelegationPaths creates a changelist entry to add provided paths to an existing delegation. // This method cannot create a new delegation itself because the role must meet the key threshold upon creation. -func (r *NotaryRepository) AddDelegationPaths(name data.RoleName, paths []string) error { +func (r *repository) AddDelegationPaths(name data.RoleName, paths []string) error { if !data.IsDelegation(name) { return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"} @@ -78,7 +78,7 @@ func (r *NotaryRepository) AddDelegationPaths(name data.RoleName, paths []string // RemoveDelegationKeysAndPaths creates changelist entries to remove provided delegation key IDs and paths. // This method composes RemoveDelegationPaths and RemoveDelegationKeys (each creates one changelist if called). -func (r *NotaryRepository) RemoveDelegationKeysAndPaths(name data.RoleName, keyIDs, paths []string) error { +func (r *repository) RemoveDelegationKeysAndPaths(name data.RoleName, keyIDs, paths []string) error { if len(paths) > 0 { err := r.RemoveDelegationPaths(name, paths) if err != nil { @@ -95,7 +95,7 @@ func (r *NotaryRepository) RemoveDelegationKeysAndPaths(name data.RoleName, keyI } // RemoveDelegationRole creates a changelist to remove all paths and keys from a role, and delete the role in its entirety. -func (r *NotaryRepository) RemoveDelegationRole(name data.RoleName) error { +func (r *repository) RemoveDelegationRole(name data.RoleName) error { if !data.IsDelegation(name) { return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"} @@ -108,7 +108,7 @@ func (r *NotaryRepository) RemoveDelegationRole(name data.RoleName) error { } // RemoveDelegationPaths creates a changelist entry to remove provided paths from an existing delegation. -func (r *NotaryRepository) RemoveDelegationPaths(name data.RoleName, paths []string) error { +func (r *repository) RemoveDelegationPaths(name data.RoleName, paths []string) error { if !data.IsDelegation(name) { return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"} @@ -132,7 +132,7 @@ func (r *NotaryRepository) RemoveDelegationPaths(name data.RoleName, paths []str // the role itself will be deleted in its entirety. // It can also delete a key from all delegations under a parent using a name // with a wildcard at the end. -func (r *NotaryRepository) RemoveDelegationKeys(name data.RoleName, keyIDs []string) error { +func (r *repository) RemoveDelegationKeys(name data.RoleName, keyIDs []string) error { if !data.IsDelegation(name) && !data.IsWildDelegation(name) { return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"} @@ -152,7 +152,7 @@ func (r *NotaryRepository) RemoveDelegationKeys(name data.RoleName, keyIDs []str } // ClearDelegationPaths creates a changelist entry to remove all paths from an existing delegation. -func (r *NotaryRepository) ClearDelegationPaths(name data.RoleName) error { +func (r *repository) ClearDelegationPaths(name data.RoleName) error { if !data.IsDelegation(name) { return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"} @@ -203,7 +203,7 @@ func newDeleteDelegationChange(name data.RoleName, content []byte) *changelist.T // GetDelegationRoles returns the keys and roles of the repository's delegations // Also converts key IDs to canonical key IDs to keep consistent with signing prompts -func (r *NotaryRepository) GetDelegationRoles() ([]data.Role, error) { +func (r *repository) GetDelegationRoles() ([]data.Role, error) { // Update state of the repo to latest if err := r.Update(false); err != nil { return nil, err diff --git a/src/vendor/github.com/docker/notary/client/errors.go b/src/vendor/github.com/theupdateframework/notary/client/errors.go similarity index 96% rename from src/vendor/github.com/docker/notary/client/errors.go rename to src/vendor/github.com/theupdateframework/notary/client/errors.go index 4c765dace..a2d4970ea 100644 --- a/src/vendor/github.com/docker/notary/client/errors.go +++ b/src/vendor/github.com/theupdateframework/notary/client/errors.go @@ -2,7 +2,8 @@ package client import ( "fmt" - "github.com/docker/notary/tuf/data" + + "github.com/theupdateframework/notary/tuf/data" ) // ErrRepoNotInitialized is returned when trying to publish an uninitialized diff --git a/src/vendor/github.com/docker/notary/client/helpers.go b/src/vendor/github.com/theupdateframework/notary/client/helpers.go similarity index 84% rename from src/vendor/github.com/docker/notary/client/helpers.go rename to src/vendor/github.com/theupdateframework/notary/client/helpers.go index 5ec0384e2..179d27ecb 100644 --- a/src/vendor/github.com/docker/notary/client/helpers.go +++ b/src/vendor/github.com/theupdateframework/notary/client/helpers.go @@ -6,12 +6,13 @@ import ( "net/http" "time" - "github.com/Sirupsen/logrus" - "github.com/docker/notary/client/changelist" - store "github.com/docker/notary/storage" - "github.com/docker/notary/tuf" - "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/utils" + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary/client/changelist" + store "github.com/theupdateframework/notary/storage" + "github.com/theupdateframework/notary/tuf" + "github.com/theupdateframework/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/signed" + "github.com/theupdateframework/notary/tuf/utils" ) // Use this to initialize remote HTTPStores from the config settings @@ -268,3 +269,38 @@ func serializeCanonicalRole(tufRepo *tuf.Repo, role data.RoleName, extraSigningK return json.Marshal(s) } + +func getAllPrivKeys(rootKeyIDs []string, cryptoService signed.CryptoService) ([]data.PrivateKey, error) { + if cryptoService == nil { + return nil, fmt.Errorf("no crypto service available to get private keys from") + } + + privKeys := make([]data.PrivateKey, 0, len(rootKeyIDs)) + for _, keyID := range rootKeyIDs { + privKey, _, err := cryptoService.GetPrivateKey(keyID) + if err != nil { + return nil, err + } + privKeys = append(privKeys, privKey) + } + if len(privKeys) == 0 { + var rootKeyID string + rootKeyList := cryptoService.ListKeys(data.CanonicalRootRole) + if len(rootKeyList) == 0 { + rootPublicKey, err := cryptoService.Create(data.CanonicalRootRole, "", data.ECDSAKey) + if err != nil { + return nil, err + } + rootKeyID = rootPublicKey.ID() + } else { + rootKeyID = rootKeyList[0] + } + privKey, _, err := cryptoService.GetPrivateKey(rootKeyID) + if err != nil { + return nil, err + } + privKeys = append(privKeys, privKey) + } + + return privKeys, nil +} diff --git a/src/vendor/github.com/theupdateframework/notary/client/interface.go b/src/vendor/github.com/theupdateframework/notary/client/interface.go new file mode 100644 index 000000000..4e6680dc0 --- /dev/null +++ b/src/vendor/github.com/theupdateframework/notary/client/interface.go @@ -0,0 +1,47 @@ +package client + +import ( + "github.com/theupdateframework/notary/client/changelist" + "github.com/theupdateframework/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/signed" +) + +// Repository represents the set of options that must be supported over a TUF repo. +type Repository interface { + // General management operations + Initialize(rootKeyIDs []string, serverManagedRoles ...data.RoleName) error + InitializeWithCertificate(rootKeyIDs []string, rootCerts []data.PublicKey, serverManagedRoles ...data.RoleName) error + Publish() error + + // Target Operations + AddTarget(target *Target, roles ...data.RoleName) error + RemoveTarget(targetName string, roles ...data.RoleName) error + ListTargets(roles ...data.RoleName) ([]*TargetWithRole, error) + GetTargetByName(name string, roles ...data.RoleName) (*TargetWithRole, error) + GetAllTargetMetadataByName(name string) ([]TargetSignedStruct, error) + + // Changelist operations + GetChangelist() (changelist.Changelist, error) + + // Role operations + ListRoles() ([]RoleWithSignatures, error) + GetDelegationRoles() ([]data.Role, error) + AddDelegation(name data.RoleName, delegationKeys []data.PublicKey, paths []string) error + AddDelegationRoleAndKeys(name data.RoleName, delegationKeys []data.PublicKey) error + AddDelegationPaths(name data.RoleName, paths []string) error + RemoveDelegationKeysAndPaths(name data.RoleName, keyIDs, paths []string) error + RemoveDelegationRole(name data.RoleName) error + RemoveDelegationPaths(name data.RoleName, paths []string) error + RemoveDelegationKeys(name data.RoleName, keyIDs []string) error + ClearDelegationPaths(name data.RoleName) error + + // Witness and other re-signing operations + Witness(roles ...data.RoleName) ([]data.RoleName, error) + + // Key Operations + RotateKey(role data.RoleName, serverManagesKey bool, keyList []string) error + + GetCryptoService() signed.CryptoService + SetLegacyVersions(int) + GetGUN() data.GUN +} diff --git a/src/vendor/github.com/docker/notary/client/repo.go b/src/vendor/github.com/theupdateframework/notary/client/repo.go similarity index 80% rename from src/vendor/github.com/docker/notary/client/repo.go rename to src/vendor/github.com/theupdateframework/notary/client/repo.go index 953fda10c..cf2242b77 100644 --- a/src/vendor/github.com/docker/notary/client/repo.go +++ b/src/vendor/github.com/theupdateframework/notary/client/repo.go @@ -5,8 +5,8 @@ package client import ( "fmt" - "github.com/docker/notary" - "github.com/docker/notary/trustmanager" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/trustmanager" ) func getKeyStores(baseDir string, retriever notary.PassRetriever) ([]trustmanager.KeyStore, error) { diff --git a/src/vendor/github.com/docker/notary/client/repo_pkcs11.go b/src/vendor/github.com/theupdateframework/notary/client/repo_pkcs11.go similarity index 78% rename from src/vendor/github.com/docker/notary/client/repo_pkcs11.go rename to src/vendor/github.com/theupdateframework/notary/client/repo_pkcs11.go index 3eccc2f7f..a24d3e604 100644 --- a/src/vendor/github.com/docker/notary/client/repo_pkcs11.go +++ b/src/vendor/github.com/theupdateframework/notary/client/repo_pkcs11.go @@ -5,9 +5,9 @@ package client import ( "fmt" - "github.com/docker/notary" - "github.com/docker/notary/trustmanager" - "github.com/docker/notary/trustmanager/yubikey" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/trustmanager" + "github.com/theupdateframework/notary/trustmanager/yubikey" ) func getKeyStores(baseDir string, retriever notary.PassRetriever) ([]trustmanager.KeyStore, error) { diff --git a/src/vendor/github.com/docker/notary/client/tufclient.go b/src/vendor/github.com/theupdateframework/notary/client/tufclient.go similarity index 89% rename from src/vendor/github.com/docker/notary/client/tufclient.go rename to src/vendor/github.com/theupdateframework/notary/client/tufclient.go index c72119eb7..17be93056 100644 --- a/src/vendor/github.com/docker/notary/client/tufclient.go +++ b/src/vendor/github.com/theupdateframework/notary/client/tufclient.go @@ -4,26 +4,26 @@ import ( "encoding/json" "fmt" - "github.com/Sirupsen/logrus" - "github.com/docker/notary" - store "github.com/docker/notary/storage" - "github.com/docker/notary/trustpinning" - "github.com/docker/notary/tuf" - "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/signed" + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary" + store "github.com/theupdateframework/notary/storage" + "github.com/theupdateframework/notary/trustpinning" + "github.com/theupdateframework/notary/tuf" + "github.com/theupdateframework/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/signed" ) -// TUFClient is a usability wrapper around a raw TUF repo -type TUFClient struct { +// tufClient is a usability wrapper around a raw TUF repo +type tufClient struct { remote store.RemoteStore cache store.MetadataStore oldBuilder tuf.RepoBuilder newBuilder tuf.RepoBuilder } -// NewTUFClient initialized a TUFClient with the given repo, remote source of content, and cache -func NewTUFClient(oldBuilder, newBuilder tuf.RepoBuilder, remote store.RemoteStore, cache store.MetadataStore) *TUFClient { - return &TUFClient{ +// newTufClient initialized a tufClient with the given repo, remote source of content, and cache +func newTufClient(oldBuilder, newBuilder tuf.RepoBuilder, remote store.RemoteStore, cache store.MetadataStore) *tufClient { + return &tufClient{ oldBuilder: oldBuilder, newBuilder: newBuilder, remote: remote, @@ -32,7 +32,7 @@ func NewTUFClient(oldBuilder, newBuilder tuf.RepoBuilder, remote store.RemoteSto } // Update performs an update to the TUF repo as defined by the TUF spec -func (c *TUFClient) Update() (*tuf.Repo, *tuf.Repo, error) { +func (c *tufClient) Update() (*tuf.Repo, *tuf.Repo, error) { // 1. Get timestamp // a. If timestamp error (verification, expired, etc...) download new root and return to 1. // 2. Check if local snapshot is up to date @@ -63,7 +63,7 @@ func (c *TUFClient) Update() (*tuf.Repo, *tuf.Repo, error) { return c.newBuilder.Finish() } -func (c *TUFClient) update() error { +func (c *tufClient) update() error { if err := c.downloadTimestamp(); err != nil { logrus.Debugf("Client Update (Timestamp): %s", err.Error()) return err @@ -82,7 +82,7 @@ func (c *TUFClient) update() error { // updateRoot checks if there is a newer version of the root available, and if so // downloads all intermediate root files to allow proper key rotation. -func (c *TUFClient) updateRoot() error { +func (c *tufClient) updateRoot() error { // Get current root version currentRootConsistentInfo := c.oldBuilder.GetConsistentInfo(data.CanonicalRootRole) currentVersion := c.oldBuilder.GetLoadedVersion(currentRootConsistentInfo.RoleName) @@ -147,7 +147,7 @@ func (c *TUFClient) updateRoot() error { // updateRootVersions updates the root from it's current version to a target, rotating keys // as they are found -func (c *TUFClient) updateRootVersions(fromVersion, toVersion int) error { +func (c *tufClient) updateRootVersions(fromVersion, toVersion int) error { for v := fromVersion; v <= toVersion; v++ { logrus.Debugf("updating root from version %d to version %d, currently fetching %d", fromVersion, toVersion, v) @@ -170,7 +170,7 @@ func (c *TUFClient) updateRootVersions(fromVersion, toVersion int) error { // downloadTimestamp is responsible for downloading the timestamp.json // Timestamps are special in that we ALWAYS attempt to download and only // use cache if the download fails (and the cache is still valid). -func (c *TUFClient) downloadTimestamp() error { +func (c *tufClient) downloadTimestamp() error { logrus.Debug("Loading timestamp...") role := data.CanonicalTimestampRole consistentInfo := c.newBuilder.GetConsistentInfo(role) @@ -206,7 +206,7 @@ func (c *TUFClient) downloadTimestamp() error { } // downloadSnapshot is responsible for downloading the snapshot.json -func (c *TUFClient) downloadSnapshot() error { +func (c *tufClient) downloadSnapshot() error { logrus.Debug("Loading snapshot...") role := data.CanonicalSnapshotRole consistentInfo := c.newBuilder.GetConsistentInfo(role) @@ -218,7 +218,7 @@ func (c *TUFClient) downloadSnapshot() error { // downloadTargets downloads all targets and delegated targets for the repository. // It uses a pre-order tree traversal as it's necessary to download parents first // to obtain the keys to validate children. -func (c *TUFClient) downloadTargets() error { +func (c *tufClient) downloadTargets() error { toDownload := []data.DelegationRole{{ BaseRole: data.BaseRole{Name: data.CanonicalTargetsRole}, Paths: []string{""}, @@ -251,7 +251,7 @@ func (c *TUFClient) downloadTargets() error { return nil } -func (c TUFClient) getTargetsFile(role data.DelegationRole, ci tuf.ConsistentInfo) ([]data.DelegationRole, error) { +func (c tufClient) getTargetsFile(role data.DelegationRole, ci tuf.ConsistentInfo) ([]data.DelegationRole, error) { logrus.Debugf("Loading %s...", role.Name) tgs := &data.SignedTargets{} @@ -267,7 +267,7 @@ func (c TUFClient) getTargetsFile(role data.DelegationRole, ci tuf.ConsistentInf } // downloadRoot is responsible for downloading the root.json -func (c *TUFClient) downloadRoot() ([]byte, error) { +func (c *tufClient) downloadRoot() ([]byte, error) { role := data.CanonicalRootRole consistentInfo := c.newBuilder.GetConsistentInfo(role) @@ -284,7 +284,7 @@ func (c *TUFClient) downloadRoot() ([]byte, error) { return c.tryLoadCacheThenRemote(consistentInfo) } -func (c *TUFClient) tryLoadCacheThenRemote(consistentInfo tuf.ConsistentInfo) ([]byte, error) { +func (c *tufClient) tryLoadCacheThenRemote(consistentInfo tuf.ConsistentInfo) ([]byte, error) { cachedTS, err := c.cache.GetSized(consistentInfo.RoleName.String(), consistentInfo.Length()) if err != nil { logrus.Debugf("no %s in cache, must download", consistentInfo.RoleName) @@ -300,7 +300,7 @@ func (c *TUFClient) tryLoadCacheThenRemote(consistentInfo tuf.ConsistentInfo) ([ return c.tryLoadRemote(consistentInfo, cachedTS) } -func (c *TUFClient) tryLoadRemote(consistentInfo tuf.ConsistentInfo, old []byte) ([]byte, error) { +func (c *tufClient) tryLoadRemote(consistentInfo tuf.ConsistentInfo, old []byte) ([]byte, error) { consistentName := consistentInfo.ConsistentName() raw, err := c.remote.GetSized(consistentName, consistentInfo.Length()) if err != nil { diff --git a/src/vendor/github.com/docker/notary/client/witness.go b/src/vendor/github.com/theupdateframework/notary/client/witness.go similarity index 88% rename from src/vendor/github.com/docker/notary/client/witness.go rename to src/vendor/github.com/theupdateframework/notary/client/witness.go index 72aed031c..ea6caa1b6 100644 --- a/src/vendor/github.com/docker/notary/client/witness.go +++ b/src/vendor/github.com/theupdateframework/notary/client/witness.go @@ -1,14 +1,14 @@ package client import ( - "github.com/docker/notary/client/changelist" - "github.com/docker/notary/tuf" - "github.com/docker/notary/tuf/data" + "github.com/theupdateframework/notary/client/changelist" + "github.com/theupdateframework/notary/tuf" + "github.com/theupdateframework/notary/tuf/data" ) // Witness creates change objects to witness (i.e. re-sign) the given // roles on the next publish. One change is created per role -func (r *NotaryRepository) Witness(roles ...data.RoleName) ([]data.RoleName, error) { +func (r *repository) Witness(roles ...data.RoleName) ([]data.RoleName, error) { var err error successful := make([]data.RoleName, 0, len(roles)) for _, role := range roles { diff --git a/src/vendor/github.com/docker/notary/codecov.yml b/src/vendor/github.com/theupdateframework/notary/codecov.yml similarity index 100% rename from src/vendor/github.com/docker/notary/codecov.yml rename to src/vendor/github.com/theupdateframework/notary/codecov.yml diff --git a/src/vendor/github.com/docker/notary/const.go b/src/vendor/github.com/theupdateframework/notary/const.go similarity index 100% rename from src/vendor/github.com/docker/notary/const.go rename to src/vendor/github.com/theupdateframework/notary/const.go diff --git a/src/vendor/github.com/docker/notary/const_nowindows.go b/src/vendor/github.com/theupdateframework/notary/const_nowindows.go similarity index 100% rename from src/vendor/github.com/docker/notary/const_nowindows.go rename to src/vendor/github.com/theupdateframework/notary/const_nowindows.go diff --git a/src/vendor/github.com/docker/notary/const_windows.go b/src/vendor/github.com/theupdateframework/notary/const_windows.go similarity index 100% rename from src/vendor/github.com/docker/notary/const_windows.go rename to src/vendor/github.com/theupdateframework/notary/const_windows.go diff --git a/src/vendor/github.com/docker/notary/cross.Dockerfile b/src/vendor/github.com/theupdateframework/notary/cross.Dockerfile similarity index 89% rename from src/vendor/github.com/docker/notary/cross.Dockerfile rename to src/vendor/github.com/theupdateframework/notary/cross.Dockerfile index 0df9e53f3..5ef9aebe8 100644 --- a/src/vendor/github.com/docker/notary/cross.Dockerfile +++ b/src/vendor/github.com/theupdateframework/notary/cross.Dockerfile @@ -1,8 +1,9 @@ -FROM golang:1.7.3 +FROM golang:1.10.1 RUN apt-get update && apt-get install -y \ curl \ clang \ + file \ libltdl-dev \ libsqlite3-dev \ patch \ @@ -19,7 +20,7 @@ RUN useradd -ms /bin/bash notary \ # Configure the container for OSX cross compilation ENV OSX_SDK MacOSX10.11.sdk -ENV OSX_CROSS_COMMIT 8aa9b71a394905e6c5f4b59e2b97b87a004658a4 +ENV OSX_CROSS_COMMIT 1a1733a773fe26e7b6c93b16fbf9341f22fac831 RUN set -x \ && export OSXCROSS_PATH="/osxcross" \ && git clone https://github.com/tpoechtrager/osxcross.git $OSXCROSS_PATH \ @@ -28,7 +29,7 @@ RUN set -x \ && UNATTENDED=yes OSX_VERSION_MIN=10.6 ${OSXCROSS_PATH}/build.sh > /dev/null ENV PATH /osxcross/target/bin:$PATH -ENV NOTARYDIR /go/src/github.com/docker/notary +ENV NOTARYDIR /go/src/github.com/theupdateframework/notary COPY . ${NOTARYDIR} RUN chmod -R a+rw /go diff --git a/src/vendor/github.com/docker/notary/cryptoservice/certificate.go b/src/vendor/github.com/theupdateframework/notary/cryptoservice/certificate.go similarity index 92% rename from src/vendor/github.com/docker/notary/cryptoservice/certificate.go rename to src/vendor/github.com/theupdateframework/notary/cryptoservice/certificate.go index 26de51039..0270e89fb 100644 --- a/src/vendor/github.com/docker/notary/cryptoservice/certificate.go +++ b/src/vendor/github.com/theupdateframework/notary/cryptoservice/certificate.go @@ -7,8 +7,8 @@ import ( "fmt" "time" - "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/utils" + "github.com/theupdateframework/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/utils" ) // GenerateCertificate generates an X509 Certificate from a template, given a GUN and validity interval diff --git a/src/vendor/github.com/docker/notary/cryptoservice/crypto_service.go b/src/vendor/github.com/theupdateframework/notary/cryptoservice/crypto_service.go similarity index 75% rename from src/vendor/github.com/docker/notary/cryptoservice/crypto_service.go rename to src/vendor/github.com/theupdateframework/notary/cryptoservice/crypto_service.go index d8f42535c..a558304f1 100644 --- a/src/vendor/github.com/docker/notary/cryptoservice/crypto_service.go +++ b/src/vendor/github.com/theupdateframework/notary/cryptoservice/crypto_service.go @@ -1,17 +1,16 @@ package cryptoservice import ( - "crypto/rand" - "fmt" - "crypto/x509" "encoding/pem" "errors" - "github.com/Sirupsen/logrus" - "github.com/docker/notary" - "github.com/docker/notary/trustmanager" - "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/utils" + "fmt" + + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/trustmanager" + "github.com/theupdateframework/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/utils" ) var ( @@ -37,42 +36,18 @@ func NewCryptoService(keyStores ...trustmanager.KeyStore) *CryptoService { // Create is used to generate keys for targets, snapshots and timestamps func (cs *CryptoService) Create(role data.RoleName, gun data.GUN, algorithm string) (data.PublicKey, error) { - var privKey data.PrivateKey - var err error + if algorithm == data.RSAKey { + return nil, fmt.Errorf("%s keys can only be imported", data.RSAKey) + } - switch algorithm { - case data.RSAKey: - privKey, err = utils.GenerateRSAKey(rand.Reader, notary.MinRSABitSize) - if err != nil { - return nil, fmt.Errorf("failed to generate RSA key: %v", err) - } - case data.ECDSAKey: - privKey, err = utils.GenerateECDSAKey(rand.Reader) - if err != nil { - return nil, fmt.Errorf("failed to generate EC key: %v", err) - } - case data.ED25519Key: - privKey, err = utils.GenerateED25519Key(rand.Reader) - if err != nil { - return nil, fmt.Errorf("failed to generate ED25519 key: %v", err) - } - default: - return nil, fmt.Errorf("private key type not supported for key generation: %s", algorithm) + privKey, err := utils.GenerateKey(algorithm) + if err != nil { + return nil, fmt.Errorf("failed to generate %s key: %v", algorithm, err) } logrus.Debugf("generated new %s key for role: %s and keyID: %s", algorithm, role.String(), privKey.ID()) + pubKey := data.PublicKeyFromPrivate(privKey) - // Store the private key into our keystore - for _, ks := range cs.keyStores { - err = ks.AddKey(trustmanager.KeyInfo{Role: role, Gun: gun}, privKey) - if err == nil { - return data.PublicKeyFromPrivate(privKey), nil - } - } - if err != nil { - return nil, fmt.Errorf("failed to add key to filestore: %v", err) - } - - return nil, fmt.Errorf("keystores would not accept new private keys for unknown reasons") + return pubKey, cs.AddKey(role, gun, privKey) } // GetPrivateKey returns a private key and role if present by ID. @@ -173,9 +148,12 @@ func CheckRootKeyIsEncrypted(pemBytes []byte) error { return ErrNoValidPrivateKey } - if !x509.IsEncryptedPEMBlock(block) { - return ErrRootKeyNotEncrypted + if block.Type == "ENCRYPTED PRIVATE KEY" { + return nil + } + if !notary.FIPSEnabled() && x509.IsEncryptedPEMBlock(block) { + return nil } - return nil + return ErrRootKeyNotEncrypted } diff --git a/src/vendor/github.com/docker/notary/development.mysql.yml b/src/vendor/github.com/theupdateframework/notary/development.mysql.yml similarity index 97% rename from src/vendor/github.com/docker/notary/development.mysql.yml rename to src/vendor/github.com/theupdateframework/notary/development.mysql.yml index 7ed45737e..023a77455 100644 --- a/src/vendor/github.com/docker/notary/development.mysql.yml +++ b/src/vendor/github.com/theupdateframework/notary/development.mysql.yml @@ -33,7 +33,7 @@ services: - mdb volumes: - ./notarysql/mysql-initdb.d:/docker-entrypoint-initdb.d - image: mariadb:10.1.10 + image: mariadb:10.1.28 environment: - TERM=dumb - MYSQL_ALLOW_EMPTY_PASSWORD="true" diff --git a/src/vendor/github.com/docker/notary/development.postgresql.yml b/src/vendor/github.com/theupdateframework/notary/development.postgresql.yml similarity index 71% rename from src/vendor/github.com/docker/notary/development.postgresql.yml rename to src/vendor/github.com/theupdateframework/notary/development.postgresql.yml index 17dfd0297..2106b3770 100644 --- a/src/vendor/github.com/docker/notary/development.postgresql.yml +++ b/src/vendor/github.com/theupdateframework/notary/development.postgresql.yml @@ -14,7 +14,7 @@ services: command: -c "./migrations/migrate.sh && notary-server -config=fixtures/server-config.postgres.json" environment: MIGRATIONS_PATH: migrations/server/postgresql - DB_URL: postgres://server@postgresql:5432/notaryserver?sslmode=disable + DB_URL: postgres://server@postgresql:5432/notaryserver?sslmode=verify-ca&sslrootcert=/go/src/github.com/theupdateframework/notary/fixtures/database/ca.pem&sslcert=/go/src/github.com/theupdateframework/notary/fixtures/database/notary-server.pem&sslkey=/go/src/github.com/theupdateframework/notary/fixtures/database/notary-server-key.pem depends_on: - postgresql - signer @@ -31,7 +31,7 @@ services: command: -c "./migrations/migrate.sh && notary-signer -config=fixtures/signer-config.postgres.json" environment: MIGRATIONS_PATH: migrations/signer/postgresql - DB_URL: postgres://signer@postgresql:5432/notarysigner?sslmode=disable + DB_URL: postgres://signer@postgresql:5432/notarysigner?sslmode=verify-ca&sslrootcert=/go/src/github.com/theupdateframework/notary/fixtures/database/ca.pem&sslcert=/go/src/github.com/theupdateframework/notary/fixtures/database/notary-signer.pem&sslkey=/go/src/github.com/theupdateframework/notary/fixtures/database/notary-signer-key.pem depends_on: - postgresql postgresql: @@ -40,6 +40,7 @@ services: - mdb volumes: - ./notarysql/postgresql-initdb.d:/docker-entrypoint-initdb.d + command: -l client: build: context: . diff --git a/src/vendor/github.com/docker/notary/development.rethink.yml b/src/vendor/github.com/theupdateframework/notary/development.rethink.yml similarity index 100% rename from src/vendor/github.com/docker/notary/development.rethink.yml rename to src/vendor/github.com/theupdateframework/notary/development.rethink.yml diff --git a/src/vendor/github.com/docker/notary/docker-compose.postgresql.yml b/src/vendor/github.com/theupdateframework/notary/docker-compose.postgresql.yml similarity index 69% rename from src/vendor/github.com/docker/notary/docker-compose.postgresql.yml rename to src/vendor/github.com/theupdateframework/notary/docker-compose.postgresql.yml index 8a518255c..4146238e5 100644 --- a/src/vendor/github.com/docker/notary/docker-compose.postgresql.yml +++ b/src/vendor/github.com/theupdateframework/notary/docker-compose.postgresql.yml @@ -14,7 +14,7 @@ services: command: -c "./migrations/migrate.sh && notary-server -config=fixtures/server-config.postgres.json" environment: MIGRATIONS_PATH: migrations/server/postgresql - DB_URL: postgres://server@postgresql:5432/notaryserver?sslmode=disable + DB_URL: postgres://server@postgresql:5432/notaryserver?sslmode=verify-ca&sslrootcert=/go/src/github.com/theupdateframework/notary/fixtures/database/ca.pem&sslcert=/go/src/github.com/theupdateframework/notary/fixtures/database/notary-server.pem&sslkey=/go/src/github.com/theupdateframework/notary/fixtures/database/notary-server-key.pem depends_on: - postgresql - signer @@ -31,7 +31,7 @@ services: command: -c "./migrations/migrate.sh && notary-signer -config=fixtures/signer-config.postgres.json" environment: MIGRATIONS_PATH: migrations/signer/postgresql - DB_URL: postgres://signer@postgresql:5432/notarysigner?sslmode=disable + DB_URL: postgres://signer@postgresql:5432/notarysigner?sslmode=verify-ca&sslrootcert=/go/src/github.com/theupdateframework/notary/fixtures/database/ca.pem&sslcert=/go/src/github.com/theupdateframework/notary/fixtures/database/notary-signer.pem&sslkey=/go/src/github.com/theupdateframework/notary/fixtures/database/notary-signer-key.pem depends_on: - postgresql postgresql: @@ -43,6 +43,7 @@ services: - notary_data:/var/lib/postgresql ports: - 5432:5432 + command: -l volumes: notary_data: external: false diff --git a/src/vendor/github.com/docker/notary/docker-compose.rethink.yml b/src/vendor/github.com/theupdateframework/notary/docker-compose.rethink.yml similarity index 100% rename from src/vendor/github.com/docker/notary/docker-compose.rethink.yml rename to src/vendor/github.com/theupdateframework/notary/docker-compose.rethink.yml diff --git a/src/vendor/github.com/docker/notary/docker-compose.yml b/src/vendor/github.com/theupdateframework/notary/docker-compose.yml similarity index 97% rename from src/vendor/github.com/docker/notary/docker-compose.yml rename to src/vendor/github.com/theupdateframework/notary/docker-compose.yml index bc77313b2..73bebed80 100644 --- a/src/vendor/github.com/docker/notary/docker-compose.yml +++ b/src/vendor/github.com/theupdateframework/notary/docker-compose.yml @@ -34,7 +34,7 @@ services: volumes: - ./notarysql/mysql-initdb.d:/docker-entrypoint-initdb.d - notary_data:/var/lib/mysql - image: mariadb:10.1.10 + image: mariadb:10.1.28 environment: - TERM=dumb - MYSQL_ALLOW_EMPTY_PASSWORD="true" diff --git a/src/vendor/github.com/docker/notary/escrow.Dockerfile b/src/vendor/github.com/theupdateframework/notary/escrow.Dockerfile similarity index 67% rename from src/vendor/github.com/docker/notary/escrow.Dockerfile rename to src/vendor/github.com/theupdateframework/notary/escrow.Dockerfile index da7a03ede..192267e1b 100644 --- a/src/vendor/github.com/docker/notary/escrow.Dockerfile +++ b/src/vendor/github.com/theupdateframework/notary/escrow.Dockerfile @@ -1,7 +1,6 @@ -FROM golang:1.7.3-alpine -MAINTAINER David Lawrence "david.lawrence@docker.com" +FROM golang:1.10.1-alpine -ENV NOTARYPKG github.com/docker/notary +ENV NOTARYPKG github.com/theupdateframework/notary # Copy the local repo to the expected go path COPY . /go/src/${NOTARYPKG} diff --git a/src/vendor/github.com/theupdateframework/notary/fips.go b/src/vendor/github.com/theupdateframework/notary/fips.go new file mode 100644 index 000000000..3f3bc68ef --- /dev/null +++ b/src/vendor/github.com/theupdateframework/notary/fips.go @@ -0,0 +1,14 @@ +package notary + +import ( + "crypto" + // Need to import md5 so can test availability. + _ "crypto/md5" +) + +// FIPSEnabled returns true if running in FIPS mode. +// If compiled in FIPS mode the md5 hash function is never available +// even when imported. This seems to be the best test we have for it. +func FIPSEnabled() bool { + return !crypto.MD5.Available() +} diff --git a/src/vendor/github.com/docker/notary/notary.go b/src/vendor/github.com/theupdateframework/notary/notary.go similarity index 100% rename from src/vendor/github.com/docker/notary/notary.go rename to src/vendor/github.com/theupdateframework/notary/notary.go diff --git a/src/vendor/github.com/docker/notary/server.Dockerfile b/src/vendor/github.com/theupdateframework/notary/server.Dockerfile similarity index 53% rename from src/vendor/github.com/docker/notary/server.Dockerfile rename to src/vendor/github.com/theupdateframework/notary/server.Dockerfile index 6f9eb95d7..57bc3b275 100644 --- a/src/vendor/github.com/docker/notary/server.Dockerfile +++ b/src/vendor/github.com/theupdateframework/notary/server.Dockerfile @@ -1,18 +1,19 @@ -FROM golang:1.7.3-alpine -MAINTAINER David Lawrence "david.lawrence@docker.com" +FROM golang:1.10.1-alpine -RUN apk add --update git gcc libc-dev && rm -rf /var/cache/apk/* +RUN apk add --update git gcc libc-dev -# Install SQL DB migration tool -RUN go get github.com/mattes/migrate +# Pin to the specific v3.0.0 version +RUN go get -tags 'mysql postgres file' github.com/mattes/migrate/cli && mv /go/bin/cli /go/bin/migrate -ENV NOTARYPKG github.com/docker/notary +ENV NOTARYPKG github.com/theupdateframework/notary # Copy the local repo to the expected go path COPY . /go/src/${NOTARYPKG} WORKDIR /go/src/${NOTARYPKG} +RUN chmod 0600 ./fixtures/database/* + ENV SERVICE_NAME=notary_server EXPOSE 4443 @@ -20,7 +21,7 @@ EXPOSE 4443 RUN go install \ -tags pkcs11 \ -ldflags "-w -X ${NOTARYPKG}/version.GitCommit=`git rev-parse --short HEAD` -X ${NOTARYPKG}/version.NotaryVersion=`cat NOTARY_VERSION`" \ - ${NOTARYPKG}/cmd/notary-server && apk del git gcc libc-dev + ${NOTARYPKG}/cmd/notary-server && apk del git gcc libc-dev && rm -rf /var/cache/apk/* ENTRYPOINT [ "notary-server" ] CMD [ "-config=fixtures/server-config-local.json" ] diff --git a/src/vendor/github.com/theupdateframework/notary/server.minimal.Dockerfile b/src/vendor/github.com/theupdateframework/notary/server.minimal.Dockerfile new file mode 100644 index 000000000..06ab349b7 --- /dev/null +++ b/src/vendor/github.com/theupdateframework/notary/server.minimal.Dockerfile @@ -0,0 +1,37 @@ +FROM golang:1.10.1-alpine AS build-env +RUN apk add --update git gcc libc-dev +# Pin to the specific v3.0.0 version +RUN go get -tags 'mysql postgres file' github.com/mattes/migrate/cli && mv /go/bin/cli /go/bin/migrate + +ENV NOTARYPKG github.com/theupdateframework/notary + +# Copy the local repo to the expected go path +COPY . /go/src/${NOTARYPKG} +WORKDIR /go/src/${NOTARYPKG} + +# Build notary-server +RUN go install \ + -tags pkcs11 \ + -ldflags "-w -X ${NOTARYPKG}/version.GitCommit=`git rev-parse --short HEAD` -X ${NOTARYPKG}/version.NotaryVersion=`cat NOTARY_VERSION`" \ + ${NOTARYPKG}/cmd/notary-server + + +FROM busybox:latest + +# the ln is for compatibility with the docker-compose.yml, making these +# images a straight swap for the those built in the compose file. +RUN mkdir -p /usr/bin /var/lib && ln -s /bin/env /usr/bin/env + +COPY --from=build-env /go/bin/notary-server /usr/bin/notary-server +COPY --from=build-env /go/bin/migrate /usr/bin/migrate +COPY --from=build-env /lib/ld-musl-x86_64.so.1 /lib/ld-musl-x86_64.so.1 +COPY --from=build-env /go/src/github.com/theupdateframework/notary/migrations/ /var/lib/notary/migrations +COPY --from=build-env /go/src/github.com/theupdateframework/notary/fixtures /var/lib/notary/fixtures +RUN chmod 0600 /var/lib/notary/fixtures/database/* + +WORKDIR /var/lib/notary +# SERVICE_NAME needed for migration script +ENV SERVICE_NAME=notary_server +EXPOSE 4443 +ENTRYPOINT [ "/usr/bin/notary-server" ] +CMD [ "-config=/var/lib/notary/fixtures/server-config-local.json" ] diff --git a/src/vendor/github.com/docker/notary/signer.Dockerfile b/src/vendor/github.com/theupdateframework/notary/signer.Dockerfile similarity index 57% rename from src/vendor/github.com/docker/notary/signer.Dockerfile rename to src/vendor/github.com/theupdateframework/notary/signer.Dockerfile index 07ab0fe3a..49f22d23c 100644 --- a/src/vendor/github.com/docker/notary/signer.Dockerfile +++ b/src/vendor/github.com/theupdateframework/notary/signer.Dockerfile @@ -1,18 +1,19 @@ -FROM golang:1.7.3-alpine -MAINTAINER David Lawrence "david.lawrence@docker.com" +FROM golang:1.10.1-alpine -RUN apk add --update git gcc libc-dev && rm -rf /var/cache/apk/* +RUN apk add --update git gcc libc-dev -# Install SQL DB migration tool -RUN go get github.com/mattes/migrate +# Pin to the specific v3.0.0 version +RUN go get -tags 'mysql postgres file' github.com/mattes/migrate/cli && mv /go/bin/cli /go/bin/migrate -ENV NOTARYPKG github.com/docker/notary +ENV NOTARYPKG github.com/theupdateframework/notary # Copy the local repo to the expected go path COPY . /go/src/${NOTARYPKG} WORKDIR /go/src/${NOTARYPKG} +RUN chmod 0600 ./fixtures/database/* + ENV SERVICE_NAME=notary_signer ENV NOTARY_SIGNER_DEFAULT_ALIAS="timestamp_1" ENV NOTARY_SIGNER_TIMESTAMP_1="testpassword" @@ -21,7 +22,7 @@ ENV NOTARY_SIGNER_TIMESTAMP_1="testpassword" RUN go install \ -tags pkcs11 \ -ldflags "-w -X ${NOTARYPKG}/version.GitCommit=`git rev-parse --short HEAD` -X ${NOTARYPKG}/version.NotaryVersion=`cat NOTARY_VERSION`" \ - ${NOTARYPKG}/cmd/notary-signer && apk del git gcc libc-dev + ${NOTARYPKG}/cmd/notary-signer && apk del git gcc libc-dev && rm -rf /var/cache/apk/* ENTRYPOINT [ "notary-signer" ] CMD [ "-config=fixtures/signer-config-local.json" ] diff --git a/src/vendor/github.com/theupdateframework/notary/signer.minimal.Dockerfile b/src/vendor/github.com/theupdateframework/notary/signer.minimal.Dockerfile new file mode 100644 index 000000000..f24030f90 --- /dev/null +++ b/src/vendor/github.com/theupdateframework/notary/signer.minimal.Dockerfile @@ -0,0 +1,39 @@ +FROM golang:1.10.1-alpine AS build-env +RUN apk add --update git gcc libc-dev +# Pin to the specific v3.0.0 version +RUN go get -tags 'mysql postgres file' github.com/mattes/migrate/cli && mv /go/bin/cli /go/bin/migrate + +ENV NOTARYPKG github.com/theupdateframework/notary + +# Copy the local repo to the expected go path +COPY . /go/src/${NOTARYPKG} +WORKDIR /go/src/${NOTARYPKG} + +# Build notary-signer +RUN go install \ + -tags pkcs11 \ + -ldflags "-w -X ${NOTARYPKG}/version.GitCommit=`git rev-parse --short HEAD` -X ${NOTARYPKG}/version.NotaryVersion=`cat NOTARY_VERSION`" \ + ${NOTARYPKG}/cmd/notary-signer + + +FROM busybox:latest + +# the ln is for compatibility with the docker-compose.yml, making these +# images a straight swap for the those built in the compose file. +RUN mkdir -p /usr/bin /var/lib && ln -s /bin/env /usr/bin/env + +COPY --from=build-env /go/bin/notary-signer /usr/bin/notary-signer +COPY --from=build-env /go/bin/migrate /usr/bin/migrate +COPY --from=build-env /lib/ld-musl-x86_64.so.1 /lib/ld-musl-x86_64.so.1 +COPY --from=build-env /go/src/github.com/theupdateframework/notary/migrations/ /var/lib/notary/migrations +COPY --from=build-env /go/src/github.com/theupdateframework/notary/fixtures /var/lib/notary/fixtures +RUN chmod 0600 /var/lib/notary/fixtures/database/* + +WORKDIR /var/lib/notary +# SERVICE_NAME needed for migration script +ENV SERVICE_NAME=notary_signer +ENV NOTARY_SIGNER_DEFAULT_ALIAS="timestamp_1" +ENV NOTARY_SIGNER_TIMESTAMP_1="testpassword" + +ENTRYPOINT [ "/usr/bin/notary-signer" ] +CMD [ "-config=/var/lib/notary/fixtures/signer-config-local.json" ] diff --git a/src/vendor/github.com/docker/notary/storage/errors.go b/src/vendor/github.com/theupdateframework/notary/storage/errors.go similarity index 100% rename from src/vendor/github.com/docker/notary/storage/errors.go rename to src/vendor/github.com/theupdateframework/notary/storage/errors.go diff --git a/src/vendor/github.com/docker/notary/storage/filestore.go b/src/vendor/github.com/theupdateframework/notary/storage/filestore.go similarity index 97% rename from src/vendor/github.com/docker/notary/storage/filestore.go rename to src/vendor/github.com/theupdateframework/notary/storage/filestore.go index 1b82740b8..c150d1ce5 100644 --- a/src/vendor/github.com/docker/notary/storage/filestore.go +++ b/src/vendor/github.com/theupdateframework/notary/storage/filestore.go @@ -10,8 +10,8 @@ import ( "path/filepath" "strings" - "github.com/Sirupsen/logrus" - "github.com/docker/notary" + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary" ) // NewFileStore creates a fully configurable file store @@ -63,7 +63,7 @@ func (f *FilesystemStore) moveKeyTo0Dot4Location(file string) { fileDir = strings.TrimPrefix(fileDir, notary.RootKeysSubdir) fileDir = strings.TrimPrefix(fileDir, notary.NonRootKeysSubdir) if fileDir != "" { - block.Headers["gun"] = fileDir[1:] + block.Headers["gun"] = filepath.ToSlash(fileDir[1:]) } if strings.Contains(keyID, "_") { role := strings.Split(keyID, "_")[1] @@ -206,10 +206,7 @@ func (f *FilesystemStore) Set(name string, meta []byte) error { os.RemoveAll(fp) // Write the file to disk - if err = ioutil.WriteFile(fp, meta, notary.PrivNoExecPerms); err != nil { - return err - } - return nil + return ioutil.WriteFile(fp, meta, notary.PrivNoExecPerms) } // RemoveAll clears the existing filestore by removing its base directory diff --git a/src/vendor/github.com/docker/notary/storage/httpstore.go b/src/vendor/github.com/theupdateframework/notary/storage/httpstore.go similarity index 92% rename from src/vendor/github.com/docker/notary/storage/httpstore.go rename to src/vendor/github.com/theupdateframework/notary/storage/httpstore.go index 6b6be8f79..03392d4d1 100644 --- a/src/vendor/github.com/docker/notary/storage/httpstore.go +++ b/src/vendor/github.com/theupdateframework/notary/storage/httpstore.go @@ -3,7 +3,7 @@ // - Response bodies for error codes should be unmarshallable as: // {"errors": [{..., "detail": }]} // else validation error details, etc. will be unparsable. The errors -// should have a github.com/docker/notary/tuf/validation/SerializableError +// should have a github.com/theupdateframework/notary/tuf/validation/SerializableError // in the Details field. // If writing your own server, please have a look at // github.com/docker/distribution/registry/api/errcode @@ -22,10 +22,17 @@ import ( "net/url" "path" - "github.com/Sirupsen/logrus" - "github.com/docker/notary" - "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/validation" + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/validation" +) + +const ( + // MaxErrorResponseSize is the maximum size for an error message - 1KiB + MaxErrorResponseSize int64 = 1 << 10 + // MaxKeySize is the maximum size for a stored TUF key - 256KiB + MaxKeySize = 256 << 10 ) // ErrServerUnavailable indicates an error from the server. code allows us to @@ -104,7 +111,9 @@ type HTTPStore struct { roundTrip http.RoundTripper } -// NewHTTPStore initializes a new store against a URL and a number of configuration options +// NewHTTPStore initializes a new store against a URL and a number of configuration options. +// +// In case of a nil `roundTrip`, a default offline store is used instead. func NewHTTPStore(baseURL, metaPrefix, metaExtension, keyExtension string, roundTrip http.RoundTripper) (RemoteStore, error) { base, err := url.Parse(baseURL) if err != nil { @@ -126,7 +135,8 @@ func NewHTTPStore(baseURL, metaPrefix, metaExtension, keyExtension string, round } func tryUnmarshalError(resp *http.Response, defaultError error) error { - bodyBytes, err := ioutil.ReadAll(resp.Body) + b := io.LimitReader(resp.Body, MaxErrorResponseSize) + bodyBytes, err := ioutil.ReadAll(b) if err != nil { return defaultError } @@ -317,7 +327,8 @@ func (s HTTPStore) GetKey(role data.RoleName) ([]byte, error) { if err := translateStatusToError(resp, role.String()+" key"); err != nil { return nil, err } - body, err := ioutil.ReadAll(resp.Body) + b := io.LimitReader(resp.Body, MaxKeySize) + body, err := ioutil.ReadAll(b) if err != nil { return nil, err } @@ -342,7 +353,8 @@ func (s HTTPStore) RotateKey(role data.RoleName) ([]byte, error) { if err := translateStatusToError(resp, role.String()+" key"); err != nil { return nil, err } - body, err := ioutil.ReadAll(resp.Body) + b := io.LimitReader(resp.Body, MaxKeySize) + body, err := ioutil.ReadAll(b) if err != nil { return nil, err } diff --git a/src/vendor/github.com/docker/notary/storage/interfaces.go b/src/vendor/github.com/theupdateframework/notary/storage/interfaces.go similarity index 95% rename from src/vendor/github.com/docker/notary/storage/interfaces.go rename to src/vendor/github.com/theupdateframework/notary/storage/interfaces.go index c9ac03b60..c008f437a 100644 --- a/src/vendor/github.com/docker/notary/storage/interfaces.go +++ b/src/vendor/github.com/theupdateframework/notary/storage/interfaces.go @@ -1,7 +1,7 @@ package storage import ( - "github.com/docker/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/data" ) // NoSizeLimit is represented as -1 for arguments to GetMeta diff --git a/src/vendor/github.com/docker/notary/storage/memorystore.go b/src/vendor/github.com/theupdateframework/notary/storage/memorystore.go similarity index 96% rename from src/vendor/github.com/docker/notary/storage/memorystore.go rename to src/vendor/github.com/theupdateframework/notary/storage/memorystore.go index b4ae64669..0c92c6994 100644 --- a/src/vendor/github.com/docker/notary/storage/memorystore.go +++ b/src/vendor/github.com/theupdateframework/notary/storage/memorystore.go @@ -5,9 +5,9 @@ import ( "encoding/json" "fmt" - "github.com/docker/notary" - "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/utils" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/utils" ) // NewMemoryStore returns a MetadataStore that operates entirely in memory. diff --git a/src/vendor/github.com/docker/notary/storage/offlinestore.go b/src/vendor/github.com/theupdateframework/notary/storage/offlinestore.go similarity index 96% rename from src/vendor/github.com/docker/notary/storage/offlinestore.go rename to src/vendor/github.com/theupdateframework/notary/storage/offlinestore.go index 9a4faf6d4..c5062ae6b 100644 --- a/src/vendor/github.com/docker/notary/storage/offlinestore.go +++ b/src/vendor/github.com/theupdateframework/notary/storage/offlinestore.go @@ -1,7 +1,7 @@ package storage import ( - "github.com/docker/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/data" ) // ErrOffline is used to indicate we are operating offline diff --git a/src/vendor/github.com/docker/notary/trustmanager/errors.go b/src/vendor/github.com/theupdateframework/notary/trustmanager/errors.go similarity index 100% rename from src/vendor/github.com/docker/notary/trustmanager/errors.go rename to src/vendor/github.com/theupdateframework/notary/trustmanager/errors.go diff --git a/src/vendor/github.com/theupdateframework/notary/trustmanager/importLogic.md b/src/vendor/github.com/theupdateframework/notary/trustmanager/importLogic.md new file mode 100644 index 000000000..90da0ebfa --- /dev/null +++ b/src/vendor/github.com/theupdateframework/notary/trustmanager/importLogic.md @@ -0,0 +1,8 @@ +###This document is intended as an overview of the logic we use for importing keys + +# A flowchart to detail the logic of our import function in `utils/keys.go` (`func ImportKeys`) + +![alt text](http://i.imgur.com/HQICWeO.png "Flowchart of key import logic") + +### Should this logic change, you can edit this image at `https://www.draw.io/i/HQICWeO` + diff --git a/src/vendor/github.com/docker/notary/trustmanager/interfaces.go b/src/vendor/github.com/theupdateframework/notary/trustmanager/interfaces.go similarity index 97% rename from src/vendor/github.com/docker/notary/trustmanager/interfaces.go rename to src/vendor/github.com/theupdateframework/notary/trustmanager/interfaces.go index 5cce58983..9925d0ff5 100644 --- a/src/vendor/github.com/docker/notary/trustmanager/interfaces.go +++ b/src/vendor/github.com/theupdateframework/notary/trustmanager/interfaces.go @@ -1,7 +1,7 @@ package trustmanager import ( - "github.com/docker/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/data" ) // Storage implements the bare bones primitives (no hierarchy) diff --git a/src/vendor/github.com/theupdateframework/notary/trustmanager/keys.go b/src/vendor/github.com/theupdateframework/notary/trustmanager/keys.go new file mode 100644 index 000000000..89e82a75a --- /dev/null +++ b/src/vendor/github.com/theupdateframework/notary/trustmanager/keys.go @@ -0,0 +1,246 @@ +package trustmanager + +import ( + "encoding/pem" + "errors" + "fmt" + "io" + "io/ioutil" + "path/filepath" + "sort" + "strings" + + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary" + tufdata "github.com/theupdateframework/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/utils" +) + +// Exporter is a simple interface for the two functions we need from the Storage interface +type Exporter interface { + Get(string) ([]byte, error) + ListFiles() []string +} + +// Importer is a simple interface for the one function we need from the Storage interface +type Importer interface { + Set(string, []byte) error +} + +// ExportKeysByGUN exports all keys filtered to a GUN +func ExportKeysByGUN(to io.Writer, s Exporter, gun string) error { + keys := s.ListFiles() + sort.Strings(keys) // ensure consistency. ListFiles has no order guarantee + for _, loc := range keys { + keyFile, err := s.Get(loc) + if err != nil { + logrus.Warn("Could not parse key file at ", loc) + continue + } + block, _ := pem.Decode(keyFile) + keyGun := block.Headers["gun"] + if keyGun == gun { // must be full GUN match + if err := ExportKeys(to, s, loc); err != nil { + return err + } + } + } + return nil +} + +// ExportKeysByID exports all keys matching the given ID +func ExportKeysByID(to io.Writer, s Exporter, ids []string) error { + want := make(map[string]struct{}) + for _, id := range ids { + want[id] = struct{}{} + } + keys := s.ListFiles() + for _, k := range keys { + id := filepath.Base(k) + if _, ok := want[id]; ok { + if err := ExportKeys(to, s, k); err != nil { + return err + } + } + } + return nil +} + +// ExportKeys copies a key from the store to the io.Writer +func ExportKeys(to io.Writer, s Exporter, from string) error { + // get PEM block + k, err := s.Get(from) + if err != nil { + return err + } + + // parse PEM blocks if there are more than one + for block, rest := pem.Decode(k); block != nil; block, rest = pem.Decode(rest) { + // add from path in a header for later import + block.Headers["path"] = from + // write serialized PEM + err = pem.Encode(to, block) + if err != nil { + return err + } + } + return nil +} + +// ImportKeys expects an io.Reader containing one or more PEM blocks. +// It reads PEM blocks one at a time until pem.Decode returns a nil +// block. +// Each block is written to the subpath indicated in the "path" PEM +// header. If the file already exists, the file is truncated. Multiple +// adjacent PEMs with the same "path" header are appended together. +func ImportKeys(from io.Reader, to []Importer, fallbackRole string, fallbackGUN string, passRet notary.PassRetriever) error { + // importLogic.md contains a small flowchart I made to clear up my understand while writing the cases in this function + // it is very rough, but it may help while reading this piece of code + data, err := ioutil.ReadAll(from) + if err != nil { + return err + } + var ( + writeTo string + toWrite []byte + errBlocks []string + ) + for block, rest := pem.Decode(data); block != nil; block, rest = pem.Decode(rest) { + handleLegacyPath(block) + setFallbacks(block, fallbackGUN, fallbackRole) + + loc, err := checkValidity(block) + if err != nil { + // already logged in checkValidity + errBlocks = append(errBlocks, err.Error()) + continue + } + + // the path header is not of any use once we've imported the key so strip it away + delete(block.Headers, "path") + + // we are now all set for import but let's first encrypt the key + blockBytes := pem.EncodeToMemory(block) + // check if key is encrypted, note: if it is encrypted at this point, it will have had a path header + if privKey, err := utils.ParsePEMPrivateKey(blockBytes, ""); err == nil { + // Key is not encrypted- ask for a passphrase and encrypt this key + var chosenPassphrase string + for attempts := 0; ; attempts++ { + var giveup bool + chosenPassphrase, giveup, err = passRet(loc, block.Headers["role"], true, attempts) + if err == nil { + break + } + if giveup || attempts > 10 { + return errors.New("maximum number of passphrase attempts exceeded") + } + } + blockBytes, err = utils.ConvertPrivateKeyToPKCS8(privKey, tufdata.RoleName(block.Headers["role"]), tufdata.GUN(block.Headers["gun"]), chosenPassphrase) + if err != nil { + return errors.New("failed to encrypt key with given passphrase") + } + } + + if loc != writeTo { + // next location is different from previous one. We've finished aggregating + // data for the previous file. If we have data, write the previous file, + // clear toWrite and set writeTo to the next path we're going to write + if toWrite != nil { + if err = importToStores(to, writeTo, toWrite); err != nil { + return err + } + } + // set up for aggregating next file's data + toWrite = nil + writeTo = loc + } + + toWrite = append(toWrite, blockBytes...) + } + if toWrite != nil { // close out final iteration if there's data left + return importToStores(to, writeTo, toWrite) + } + if len(errBlocks) > 0 { + return fmt.Errorf("failed to import all keys: %s", strings.Join(errBlocks, ", ")) + } + return nil +} + +func handleLegacyPath(block *pem.Block) { + // if there is a legacy path then we set the gun header from this path + // this is the case when a user attempts to import a key bundle generated by an older client + if rawPath := block.Headers["path"]; rawPath != "" && rawPath != filepath.Base(rawPath) { + // this is a legacy filepath and we should try to deduce the gun name from it + pathWOFileName := filepath.Dir(rawPath) + if strings.HasPrefix(pathWOFileName, notary.NonRootKeysSubdir) { + // remove the notary keystore-specific segment of the path, and any potential leading or trailing slashes + gunName := strings.Trim(strings.TrimPrefix(pathWOFileName, notary.NonRootKeysSubdir), "/") + if gunName != "" { + block.Headers["gun"] = gunName + } + } + block.Headers["path"] = filepath.Base(rawPath) + } +} + +func setFallbacks(block *pem.Block, fallbackGUN, fallbackRole string) { + if block.Headers["gun"] == "" { + if fallbackGUN != "" { + block.Headers["gun"] = fallbackGUN + } + } + + if block.Headers["role"] == "" { + if fallbackRole == "" { + block.Headers["role"] = notary.DefaultImportRole + } else { + block.Headers["role"] = fallbackRole + } + } +} + +// checkValidity ensures the fields in the pem headers are valid and parses out the location. +// While importing a collection of keys, errors from this function should result in only the +// current pem block being skipped. +func checkValidity(block *pem.Block) (string, error) { + // A root key or a delegations key should not have a gun + // Note that a key that is not any of the canonical roles (except root) is a delegations key and should not have a gun + switch block.Headers["role"] { + case tufdata.CanonicalSnapshotRole.String(), tufdata.CanonicalTargetsRole.String(), tufdata.CanonicalTimestampRole.String(): + // check if the key is missing a gun header or has an empty gun and error out since we don't know what gun it belongs to + if block.Headers["gun"] == "" { + logrus.Warnf("failed to import key (%s) to store: Cannot have canonical role key without a gun, don't know what gun it belongs to", block.Headers["path"]) + return "", errors.New("invalid key pem block") + } + default: + delete(block.Headers, "gun") + } + + loc, ok := block.Headers["path"] + // only if the path isn't specified do we get into this parsing path logic + if !ok || loc == "" { + // if the path isn't specified, we will try to infer the path rel to trust dir from the role (and then gun) + // parse key for the keyID which we will save it by. + // if the key is encrypted at this point, we will generate an error and continue since we don't know the ID to save it by + + decodedKey, err := utils.ParsePEMPrivateKey(pem.EncodeToMemory(block), "") + if err != nil { + logrus.Warn("failed to import key to store: Invalid key generated, key may be encrypted and does not contain path header") + return "", errors.New("invalid key pem block") + } + loc = decodedKey.ID() + } + return loc, nil +} + +func importToStores(to []Importer, path string, bytes []byte) error { + var err error + for _, i := range to { + if err = i.Set(path, bytes); err != nil { + logrus.Errorf("failed to import key to store: %s", err.Error()) + continue + } + break + } + return err +} diff --git a/src/vendor/github.com/docker/notary/trustmanager/keystore.go b/src/vendor/github.com/theupdateframework/notary/trustmanager/keystore.go similarity index 89% rename from src/vendor/github.com/docker/notary/trustmanager/keystore.go rename to src/vendor/github.com/theupdateframework/notary/trustmanager/keystore.go index 2049a3ef7..4383f8ed7 100644 --- a/src/vendor/github.com/docker/notary/trustmanager/keystore.go +++ b/src/vendor/github.com/theupdateframework/notary/trustmanager/keystore.go @@ -1,17 +1,16 @@ package trustmanager import ( - "encoding/pem" "fmt" "path/filepath" "strings" "sync" - "github.com/Sirupsen/logrus" - "github.com/docker/notary" - store "github.com/docker/notary/storage" - "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/utils" + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary" + store "github.com/theupdateframework/notary/storage" + "github.com/theupdateframework/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/utils" ) type keyInfoMap map[string]KeyInfo @@ -114,11 +113,7 @@ func (s *GenericKeyStore) AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error } } - if chosenPassphrase != "" { - pemPrivKey, err = utils.EncryptPrivateKey(privKey, keyInfo.Role, keyInfo.Gun, chosenPassphrase) - } else { - pemPrivKey, err = utils.KeyToPEM(privKey, keyInfo.Role, keyInfo.Gun) - } + pemPrivKey, err = utils.ConvertPrivateKeyToPKCS8(privKey, keyInfo.Role, keyInfo.Gun, chosenPassphrase) if err != nil { return err @@ -204,11 +199,11 @@ func copyKeyInfoMap(keyInfoMap map[string]KeyInfo) map[string]KeyInfo { func KeyInfoFromPEM(pemBytes []byte, filename string) (string, KeyInfo, error) { var keyID string keyID = filepath.Base(filename) - block, _ := pem.Decode(pemBytes) - if block == nil { - return "", KeyInfo{}, fmt.Errorf("could not decode PEM block for key %s", filename) + role, gun, err := utils.ExtractPrivateKeyAttributes(pemBytes) + if err != nil { + return "", KeyInfo{}, err } - return keyID, KeyInfo{Gun: data.GUN(block.Headers["gun"]), Role: data.RoleName(block.Headers["role"])}, nil + return keyID, KeyInfo{Gun: gun, Role: role}, nil } // getKeyRole finds the role for the given keyID. It attempts to look @@ -224,10 +219,12 @@ func getKeyRole(s Storage, keyID string) (data.RoleName, error) { if err != nil { return "", err } - block, _ := pem.Decode(d) - if block != nil { - return data.RoleName(block.Headers["role"]), nil + + role, _, err := utils.ExtractPrivateKeyAttributes(d) + if err != nil { + return "", err } + return role, nil } } return "", ErrKeyNotFound{KeyID: keyID} diff --git a/src/vendor/github.com/docker/notary/trustmanager/yubikey/import.go b/src/vendor/github.com/theupdateframework/notary/trustmanager/yubikey/import.go similarity index 87% rename from src/vendor/github.com/docker/notary/trustmanager/yubikey/import.go rename to src/vendor/github.com/theupdateframework/notary/trustmanager/yubikey/import.go index c8eddf6dc..680ded289 100644 --- a/src/vendor/github.com/docker/notary/trustmanager/yubikey/import.go +++ b/src/vendor/github.com/theupdateframework/notary/trustmanager/yubikey/import.go @@ -5,10 +5,11 @@ package yubikey import ( "encoding/pem" "errors" - "github.com/docker/notary" - "github.com/docker/notary/trustmanager" - "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/utils" + + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/trustmanager" + "github.com/theupdateframework/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/utils" ) // YubiImport is a wrapper around the YubiStore that allows us to import private diff --git a/src/vendor/github.com/docker/notary/trustmanager/yubikey/non_pkcs11.go b/src/vendor/github.com/theupdateframework/notary/trustmanager/yubikey/non_pkcs11.go similarity index 100% rename from src/vendor/github.com/docker/notary/trustmanager/yubikey/non_pkcs11.go rename to src/vendor/github.com/theupdateframework/notary/trustmanager/yubikey/non_pkcs11.go diff --git a/src/vendor/github.com/docker/notary/trustmanager/yubikey/pkcs11_darwin.go b/src/vendor/github.com/theupdateframework/notary/trustmanager/yubikey/pkcs11_darwin.go similarity index 100% rename from src/vendor/github.com/docker/notary/trustmanager/yubikey/pkcs11_darwin.go rename to src/vendor/github.com/theupdateframework/notary/trustmanager/yubikey/pkcs11_darwin.go diff --git a/src/vendor/github.com/docker/notary/trustmanager/yubikey/pkcs11_interface.go b/src/vendor/github.com/theupdateframework/notary/trustmanager/yubikey/pkcs11_interface.go similarity index 100% rename from src/vendor/github.com/docker/notary/trustmanager/yubikey/pkcs11_interface.go rename to src/vendor/github.com/theupdateframework/notary/trustmanager/yubikey/pkcs11_interface.go diff --git a/src/vendor/github.com/docker/notary/trustmanager/yubikey/pkcs11_linux.go b/src/vendor/github.com/theupdateframework/notary/trustmanager/yubikey/pkcs11_linux.go similarity index 58% rename from src/vendor/github.com/docker/notary/trustmanager/yubikey/pkcs11_linux.go rename to src/vendor/github.com/theupdateframework/notary/trustmanager/yubikey/pkcs11_linux.go index 9967e89e1..836018f00 100644 --- a/src/vendor/github.com/docker/notary/trustmanager/yubikey/pkcs11_linux.go +++ b/src/vendor/github.com/theupdateframework/notary/trustmanager/yubikey/pkcs11_linux.go @@ -4,7 +4,9 @@ package yubikey var possiblePkcs11Libs = []string{ "/usr/lib/libykcs11.so", + "/usr/lib/libykcs11.so.1", // yubico-piv-tool on Fedora installs here "/usr/lib64/libykcs11.so", + "/usr/lib64/libykcs11.so.1", // yubico-piv-tool on Fedora installs here "/usr/lib/x86_64-linux-gnu/libykcs11.so", "/usr/local/lib/libykcs11.so", } diff --git a/src/vendor/github.com/docker/notary/trustmanager/yubikey/yubikeystore.go b/src/vendor/github.com/theupdateframework/notary/trustmanager/yubikey/yubikeystore.go similarity index 98% rename from src/vendor/github.com/docker/notary/trustmanager/yubikey/yubikeystore.go rename to src/vendor/github.com/theupdateframework/notary/trustmanager/yubikey/yubikeystore.go index ebb10b954..1fd71eea9 100644 --- a/src/vendor/github.com/docker/notary/trustmanager/yubikey/yubikeystore.go +++ b/src/vendor/github.com/theupdateframework/notary/trustmanager/yubikey/yubikeystore.go @@ -16,13 +16,13 @@ import ( "os" "time" - "github.com/Sirupsen/logrus" - "github.com/docker/notary" - "github.com/docker/notary/trustmanager" - "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/signed" - "github.com/docker/notary/tuf/utils" "github.com/miekg/pkcs11" + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/trustmanager" + "github.com/theupdateframework/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/signed" + "github.com/theupdateframework/notary/tuf/utils" ) const ( diff --git a/src/vendor/github.com/docker/notary/trustpinning/ca.crt b/src/vendor/github.com/theupdateframework/notary/trustpinning/ca.crt similarity index 100% rename from src/vendor/github.com/docker/notary/trustpinning/ca.crt rename to src/vendor/github.com/theupdateframework/notary/trustpinning/ca.crt diff --git a/src/vendor/github.com/docker/notary/trustpinning/certs.go b/src/vendor/github.com/theupdateframework/notary/trustpinning/certs.go similarity index 98% rename from src/vendor/github.com/docker/notary/trustpinning/certs.go rename to src/vendor/github.com/theupdateframework/notary/trustpinning/certs.go index 9d4f2f0c9..9be49ee10 100644 --- a/src/vendor/github.com/docker/notary/trustpinning/certs.go +++ b/src/vendor/github.com/theupdateframework/notary/trustpinning/certs.go @@ -6,10 +6,10 @@ import ( "fmt" "strings" - "github.com/Sirupsen/logrus" - "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/signed" - "github.com/docker/notary/tuf/utils" + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/signed" + "github.com/theupdateframework/notary/tuf/utils" ) const wildcard = "*" diff --git a/src/vendor/github.com/docker/notary/trustpinning/test.crt b/src/vendor/github.com/theupdateframework/notary/trustpinning/test.crt similarity index 100% rename from src/vendor/github.com/docker/notary/trustpinning/test.crt rename to src/vendor/github.com/theupdateframework/notary/trustpinning/test.crt diff --git a/src/vendor/github.com/docker/notary/trustpinning/trustpin.go b/src/vendor/github.com/theupdateframework/notary/trustpinning/trustpin.go similarity index 71% rename from src/vendor/github.com/docker/notary/trustpinning/trustpin.go rename to src/vendor/github.com/theupdateframework/notary/trustpinning/trustpin.go index c102051f3..67908f8b2 100644 --- a/src/vendor/github.com/docker/notary/trustpinning/trustpin.go +++ b/src/vendor/github.com/theupdateframework/notary/trustpinning/trustpin.go @@ -5,16 +5,31 @@ import ( "fmt" "strings" - "github.com/Sirupsen/logrus" - "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/utils" + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/utils" ) // TrustPinConfig represents the configuration under the trust_pinning section of the config file // This struct represents the preferred way to bootstrap trust for this repository +// This is fully optional. If left at the default, uninitialized value Notary will use TOFU over +// HTTPS. +// You can use this to provide certificates or a CA to pin to as a root of trust for a GUN. +// These are used with the following precedence: +// +// 1. Certs +// 2. CA +// 3. TOFUS (TOFU over HTTPS) +// +// Only one trust pinning option will be used to validate a particular GUN. type TrustPinConfig struct { - CA map[string]string - Certs map[string][]string + // CA maps a GUN prefix to file paths containing the root CA. + // This file can contain multiple root certificates, bundled in separate PEM blocks. + CA map[string]string + // Certs maps a GUN to a list of certificate IDs + Certs map[string][]string + // DisableTOFU, when true, disables "Trust On First Use" of new key data + // This is false by default, which means new key data will always be trusted the first time it is seen. DisableTOFU bool } @@ -37,6 +52,11 @@ func NewTrustPinChecker(trustPinConfig TrustPinConfig, gun data.GUN, firstBootst t.pinnedCertIDs = pinnedCerts return t.certsCheck, nil } + var ok bool + t.pinnedCertIDs, ok = wildcardMatch(gun, trustPinConfig.Certs) + if ok { + return t.certsCheck, nil + } if caFilepath, err := getPinnedCAFilepathByPrefix(gun, trustPinConfig); err == nil { logrus.Debugf("trust-pinning using root CA bundle at: %s", caFilepath) @@ -121,3 +141,23 @@ func getPinnedCAFilepathByPrefix(gun data.GUN, t TrustPinConfig) (string, error) } return specificCAFilepath, nil } + +// wildcardMatch will attempt to match the most specific (longest prefix) wildcarded +// trustpinning option for key IDs. Given the simple globbing and the use of maps, +// it is impossible to have two different prefixes of equal length. +// This logic also solves the issue of Go's randomization of map iteration. +func wildcardMatch(gun data.GUN, certs map[string][]string) ([]string, bool) { + var ( + longest = "" + ids []string + ) + for gunPrefix, keyIDs := range certs { + if strings.HasSuffix(gunPrefix, "*") { + if strings.HasPrefix(gun.String(), gunPrefix[:len(gunPrefix)-1]) && len(gunPrefix) > len(longest) { + longest = gunPrefix + ids = keyIDs + } + } + } + return ids, ids != nil +} diff --git a/src/vendor/github.com/docker/notary/tuf/LICENSE b/src/vendor/github.com/theupdateframework/notary/tuf/LICENSE similarity index 100% rename from src/vendor/github.com/docker/notary/tuf/LICENSE rename to src/vendor/github.com/theupdateframework/notary/tuf/LICENSE diff --git a/src/vendor/github.com/docker/notary/tuf/README.md b/src/vendor/github.com/theupdateframework/notary/tuf/README.md similarity index 100% rename from src/vendor/github.com/docker/notary/tuf/README.md rename to src/vendor/github.com/theupdateframework/notary/tuf/README.md diff --git a/src/vendor/github.com/docker/notary/tuf/builder.go b/src/vendor/github.com/theupdateframework/notary/tuf/builder.go similarity index 99% rename from src/vendor/github.com/docker/notary/tuf/builder.go rename to src/vendor/github.com/theupdateframework/notary/tuf/builder.go index b86874377..db0a4d111 100644 --- a/src/vendor/github.com/docker/notary/tuf/builder.go +++ b/src/vendor/github.com/theupdateframework/notary/tuf/builder.go @@ -4,12 +4,12 @@ import ( "fmt" "github.com/docker/go/canonical/json" - "github.com/docker/notary" + "github.com/theupdateframework/notary" - "github.com/docker/notary/trustpinning" - "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/signed" - "github.com/docker/notary/tuf/utils" + "github.com/theupdateframework/notary/trustpinning" + "github.com/theupdateframework/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/signed" + "github.com/theupdateframework/notary/tuf/utils" ) // ErrBuildDone is returned when any functions are called on RepoBuilder, and it diff --git a/src/vendor/github.com/docker/notary/tuf/data/errors.go b/src/vendor/github.com/theupdateframework/notary/tuf/data/errors.go similarity index 100% rename from src/vendor/github.com/docker/notary/tuf/data/errors.go rename to src/vendor/github.com/theupdateframework/notary/tuf/data/errors.go diff --git a/src/vendor/github.com/docker/notary/tuf/data/keys.go b/src/vendor/github.com/theupdateframework/notary/tuf/data/keys.go similarity index 99% rename from src/vendor/github.com/docker/notary/tuf/data/keys.go rename to src/vendor/github.com/theupdateframework/notary/tuf/data/keys.go index 8abbf9ac3..c19ea8fa1 100644 --- a/src/vendor/github.com/docker/notary/tuf/data/keys.go +++ b/src/vendor/github.com/theupdateframework/notary/tuf/data/keys.go @@ -12,9 +12,9 @@ import ( "io" "math/big" - "github.com/Sirupsen/logrus" "github.com/agl/ed25519" "github.com/docker/go/canonical/json" + "github.com/sirupsen/logrus" ) // PublicKey is the necessary interface for public keys diff --git a/src/vendor/github.com/docker/notary/tuf/data/roles.go b/src/vendor/github.com/theupdateframework/notary/tuf/data/roles.go similarity index 99% rename from src/vendor/github.com/docker/notary/tuf/data/roles.go rename to src/vendor/github.com/theupdateframework/notary/tuf/data/roles.go index cb8f923a4..1a6541ca3 100644 --- a/src/vendor/github.com/docker/notary/tuf/data/roles.go +++ b/src/vendor/github.com/theupdateframework/notary/tuf/data/roles.go @@ -6,7 +6,7 @@ import ( "regexp" "strings" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) // Canonical base role names diff --git a/src/vendor/github.com/docker/notary/tuf/data/root.go b/src/vendor/github.com/theupdateframework/notary/tuf/data/root.go similarity index 100% rename from src/vendor/github.com/docker/notary/tuf/data/root.go rename to src/vendor/github.com/theupdateframework/notary/tuf/data/root.go diff --git a/src/vendor/github.com/docker/notary/tuf/data/serializer.go b/src/vendor/github.com/theupdateframework/notary/tuf/data/serializer.go similarity index 100% rename from src/vendor/github.com/docker/notary/tuf/data/serializer.go rename to src/vendor/github.com/theupdateframework/notary/tuf/data/serializer.go diff --git a/src/vendor/github.com/docker/notary/tuf/data/snapshot.go b/src/vendor/github.com/theupdateframework/notary/tuf/data/snapshot.go similarity index 98% rename from src/vendor/github.com/docker/notary/tuf/data/snapshot.go rename to src/vendor/github.com/theupdateframework/notary/tuf/data/snapshot.go index 028609f06..2a07105ba 100644 --- a/src/vendor/github.com/docker/notary/tuf/data/snapshot.go +++ b/src/vendor/github.com/theupdateframework/notary/tuf/data/snapshot.go @@ -4,9 +4,9 @@ import ( "bytes" "fmt" - "github.com/Sirupsen/logrus" "github.com/docker/go/canonical/json" - "github.com/docker/notary" + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary" ) // SignedSnapshot is a fully unpacked snapshot.json diff --git a/src/vendor/github.com/docker/notary/tuf/data/targets.go b/src/vendor/github.com/theupdateframework/notary/tuf/data/targets.go similarity index 100% rename from src/vendor/github.com/docker/notary/tuf/data/targets.go rename to src/vendor/github.com/theupdateframework/notary/tuf/data/targets.go diff --git a/src/vendor/github.com/docker/notary/tuf/data/timestamp.go b/src/vendor/github.com/theupdateframework/notary/tuf/data/timestamp.go similarity index 98% rename from src/vendor/github.com/docker/notary/tuf/data/timestamp.go rename to src/vendor/github.com/theupdateframework/notary/tuf/data/timestamp.go index 883641cd7..baf4016ee 100644 --- a/src/vendor/github.com/docker/notary/tuf/data/timestamp.go +++ b/src/vendor/github.com/theupdateframework/notary/tuf/data/timestamp.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/docker/go/canonical/json" - "github.com/docker/notary" + "github.com/theupdateframework/notary" ) // SignedTimestamp is a fully unpacked timestamp.json diff --git a/src/vendor/github.com/docker/notary/tuf/data/types.go b/src/vendor/github.com/theupdateframework/notary/tuf/data/types.go similarity index 97% rename from src/vendor/github.com/docker/notary/tuf/data/types.go rename to src/vendor/github.com/theupdateframework/notary/tuf/data/types.go index 778e0f1a0..6f9c11201 100644 --- a/src/vendor/github.com/docker/notary/tuf/data/types.go +++ b/src/vendor/github.com/theupdateframework/notary/tuf/data/types.go @@ -14,12 +14,14 @@ import ( "strings" "time" - "github.com/Sirupsen/logrus" "github.com/docker/go/canonical/json" - "github.com/docker/notary" + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary" ) -// GUN type for specifying gun +// GUN is a Globally Unique Name. It is used to identify trust collections. +// An example usage of this is for container image repositories. +// For example: myregistry.io/myuser/myimage type GUN string func (g GUN) String() string { diff --git a/src/vendor/github.com/docker/notary/tuf/signed/ed25519.go b/src/vendor/github.com/theupdateframework/notary/tuf/signed/ed25519.go similarity index 94% rename from src/vendor/github.com/docker/notary/tuf/signed/ed25519.go rename to src/vendor/github.com/theupdateframework/notary/tuf/signed/ed25519.go index e08daba35..b526085a4 100644 --- a/src/vendor/github.com/docker/notary/tuf/signed/ed25519.go +++ b/src/vendor/github.com/theupdateframework/notary/tuf/signed/ed25519.go @@ -4,9 +4,9 @@ import ( "crypto/rand" "errors" - "github.com/docker/notary/trustmanager" - "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/utils" + "github.com/theupdateframework/notary/trustmanager" + "github.com/theupdateframework/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/utils" ) type edCryptoKey struct { diff --git a/src/vendor/github.com/docker/notary/tuf/signed/errors.go b/src/vendor/github.com/theupdateframework/notary/tuf/signed/errors.go similarity index 98% rename from src/vendor/github.com/docker/notary/tuf/signed/errors.go rename to src/vendor/github.com/theupdateframework/notary/tuf/signed/errors.go index 5d4ff04ab..29ec40de2 100644 --- a/src/vendor/github.com/docker/notary/tuf/signed/errors.go +++ b/src/vendor/github.com/theupdateframework/notary/tuf/signed/errors.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/docker/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/data" ) // ErrInsufficientSignatures - can not create enough signatures on a piece of diff --git a/src/vendor/github.com/docker/notary/tuf/signed/interface.go b/src/vendor/github.com/theupdateframework/notary/tuf/signed/interface.go similarity index 96% rename from src/vendor/github.com/docker/notary/tuf/signed/interface.go rename to src/vendor/github.com/theupdateframework/notary/tuf/signed/interface.go index 03b426f16..14f3a33fa 100644 --- a/src/vendor/github.com/docker/notary/tuf/signed/interface.go +++ b/src/vendor/github.com/theupdateframework/notary/tuf/signed/interface.go @@ -1,6 +1,6 @@ package signed -import "github.com/docker/notary/tuf/data" +import "github.com/theupdateframework/notary/tuf/data" // KeyService provides management of keys locally. It will never // accept or provide private keys. Communication between the KeyService diff --git a/src/vendor/github.com/docker/notary/tuf/signed/sign.go b/src/vendor/github.com/theupdateframework/notary/tuf/signed/sign.go similarity index 95% rename from src/vendor/github.com/docker/notary/tuf/signed/sign.go rename to src/vendor/github.com/theupdateframework/notary/tuf/signed/sign.go index 31abb1204..b3e329ce4 100644 --- a/src/vendor/github.com/docker/notary/tuf/signed/sign.go +++ b/src/vendor/github.com/theupdateframework/notary/tuf/signed/sign.go @@ -14,10 +14,10 @@ package signed import ( "crypto/rand" - "github.com/Sirupsen/logrus" - "github.com/docker/notary/trustmanager" - "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/utils" + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary/trustmanager" + "github.com/theupdateframework/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/utils" ) // Sign takes a data.Signed and a cryptoservice containing private keys, diff --git a/src/vendor/github.com/docker/notary/tuf/signed/verifiers.go b/src/vendor/github.com/theupdateframework/notary/tuf/signed/verifiers.go similarity index 98% rename from src/vendor/github.com/docker/notary/tuf/signed/verifiers.go rename to src/vendor/github.com/theupdateframework/notary/tuf/signed/verifiers.go index ba518ce05..d5ce7f862 100644 --- a/src/vendor/github.com/docker/notary/tuf/signed/verifiers.go +++ b/src/vendor/github.com/theupdateframework/notary/tuf/signed/verifiers.go @@ -10,9 +10,9 @@ import ( "fmt" "math/big" - "github.com/Sirupsen/logrus" "github.com/agl/ed25519" - "github.com/docker/notary/tuf/data" + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary/tuf/data" ) const ( diff --git a/src/vendor/github.com/docker/notary/tuf/signed/verify.go b/src/vendor/github.com/theupdateframework/notary/tuf/signed/verify.go similarity index 82% rename from src/vendor/github.com/docker/notary/tuf/signed/verify.go rename to src/vendor/github.com/theupdateframework/notary/tuf/signed/verify.go index a72a63344..5ae2da485 100644 --- a/src/vendor/github.com/docker/notary/tuf/signed/verify.go +++ b/src/vendor/github.com/theupdateframework/notary/tuf/signed/verify.go @@ -6,9 +6,10 @@ import ( "strings" "time" - "github.com/Sirupsen/logrus" "github.com/docker/go/canonical/json" - "github.com/docker/notary/tuf/data" + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/utils" ) // Various basic signing errors @@ -107,3 +108,16 @@ func VerifySignature(msg []byte, sig *data.Signature, pk data.PublicKey) error { sig.IsValid = true return nil } + +// VerifyPublicKeyMatchesPrivateKey checks if the private key and the public keys forms valid key pairs. +// Supports both x509 certificate PublicKeys and non-certificate PublicKeys +func VerifyPublicKeyMatchesPrivateKey(privKey data.PrivateKey, pubKey data.PublicKey) error { + pubKeyID, err := utils.CanonicalKeyID(pubKey) + if err != nil { + return fmt.Errorf("could not verify key pair: %v", err) + } + if privKey == nil || pubKeyID != privKey.ID() { + return fmt.Errorf("private key is nil or does not match public key") + } + return nil +} diff --git a/src/vendor/github.com/docker/notary/tuf/tuf.go b/src/vendor/github.com/theupdateframework/notary/tuf/tuf.go similarity index 98% rename from src/vendor/github.com/docker/notary/tuf/tuf.go rename to src/vendor/github.com/theupdateframework/notary/tuf/tuf.go index 1843849da..866403bba 100644 --- a/src/vendor/github.com/docker/notary/tuf/tuf.go +++ b/src/vendor/github.com/theupdateframework/notary/tuf/tuf.go @@ -8,11 +8,11 @@ import ( "strings" "time" - "github.com/Sirupsen/logrus" - "github.com/docker/notary" - "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/signed" - "github.com/docker/notary/tuf/utils" + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/signed" + "github.com/theupdateframework/notary/tuf/utils" ) // ErrSigVerifyFail - signature verification failed @@ -248,17 +248,14 @@ func (tr *Repo) GetDelegationRole(name data.RoleName) (data.DelegationRole, erro } // Check all public key certificates in the role for expiry // Currently we do not reject expired delegation keys but warn if they might expire soon or have already - for keyID, pubKey := range delgRole.Keys { + for _, pubKey := range delgRole.Keys { certFromKey, err := utils.LoadCertFromPEM(pubKey.Public()) if err != nil { continue } - if err := utils.ValidateCertificate(certFromKey, true); err != nil { - if _, ok := err.(data.ErrCertExpired); !ok { - // do not allow other invalid cert errors - return err - } - logrus.Warnf("error with delegation %s key ID %d: %s", delgRole.Name, keyID, err) + //Don't check the delegation certificate expiry once added, use the TUF role expiry instead + if err := utils.ValidateCertificate(certFromKey, false); err != nil { + return err } } foundRole = &delgRole diff --git a/src/vendor/github.com/theupdateframework/notary/tuf/utils/pkcs8.go b/src/vendor/github.com/theupdateframework/notary/tuf/utils/pkcs8.go new file mode 100644 index 000000000..edcaa77ff --- /dev/null +++ b/src/vendor/github.com/theupdateframework/notary/tuf/utils/pkcs8.go @@ -0,0 +1,341 @@ +// Package utils contains tuf related utility functions however this file is hard +// forked from https://github.com/youmark/pkcs8 package. It has been further modified +// based on the requirements of Notary. For converting keys into PKCS#8 format, +// original package expected *crypto.PrivateKey interface, which then type inferred +// to either *rsa.PrivateKey or *ecdsa.PrivateKey depending on the need and later +// converted to ASN.1 DER encoded form, this whole process was superfluous here as +// keys are already being kept in ASN.1 DER format wrapped in data.PrivateKey +// structure. With these changes, package has became tightly coupled with notary as +// most of the method signatures have been updated. Moreover support for ED25519 +// keys has been added as well. License for original package is following: +// +// The MIT License (MIT) +// +// Copyright (c) 2014 youmark +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +package utils + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + + "golang.org/x/crypto/pbkdf2" + + "github.com/theupdateframework/notary/tuf/data" +) + +// Copy from crypto/x509 +var ( + oidPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} + oidPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1} + oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1} + // crypto/x509 doesn't have support for ED25519 + // http://www.oid-info.com/get/1.3.6.1.4.1.11591.15.1 + oidPublicKeyED25519 = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11591, 15, 1} +) + +// Copy from crypto/x509 +var ( + oidNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33} + oidNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} + oidNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} + oidNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} +) + +// Copy from crypto/x509 +func oidFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) { + switch curve { + case elliptic.P224(): + return oidNamedCurveP224, true + case elliptic.P256(): + return oidNamedCurveP256, true + case elliptic.P384(): + return oidNamedCurveP384, true + case elliptic.P521(): + return oidNamedCurveP521, true + } + + return nil, false +} + +// Unecrypted PKCS8 +var ( + oidPKCS5PBKDF2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 12} + oidPBES2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 13} + oidAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42} +) + +type ecPrivateKey struct { + Version int + PrivateKey []byte + NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"` + PublicKey asn1.BitString `asn1:"optional,explicit,tag:1"` +} + +type privateKeyInfo struct { + Version int + PrivateKeyAlgorithm []asn1.ObjectIdentifier + PrivateKey []byte +} + +// Encrypted PKCS8 +type pbkdf2Params struct { + Salt []byte + IterationCount int +} + +type pbkdf2Algorithms struct { + IDPBKDF2 asn1.ObjectIdentifier + PBKDF2Params pbkdf2Params +} + +type pbkdf2Encs struct { + EncryAlgo asn1.ObjectIdentifier + IV []byte +} + +type pbes2Params struct { + KeyDerivationFunc pbkdf2Algorithms + EncryptionScheme pbkdf2Encs +} + +type pbes2Algorithms struct { + IDPBES2 asn1.ObjectIdentifier + PBES2Params pbes2Params +} + +type encryptedPrivateKeyInfo struct { + EncryptionAlgorithm pbes2Algorithms + EncryptedData []byte +} + +// pkcs8 reflects an ASN.1, PKCS#8 PrivateKey. +// copied from https://github.com/golang/go/blob/964639cc338db650ccadeafb7424bc8ebb2c0f6c/src/crypto/x509/pkcs8.go#L17 +type pkcs8 struct { + Version int + Algo pkix.AlgorithmIdentifier + PrivateKey []byte +} + +func parsePKCS8ToTufKey(der []byte) (data.PrivateKey, error) { + var key pkcs8 + + if _, err := asn1.Unmarshal(der, &key); err != nil { + if _, ok := err.(asn1.StructuralError); ok { + return nil, errors.New("could not decrypt private key") + } + return nil, err + } + + if key.Algo.Algorithm.Equal(oidPublicKeyED25519) { + tufED25519PrivateKey, err := ED25519ToPrivateKey(key.PrivateKey) + if err != nil { + return nil, fmt.Errorf("could not convert ed25519.PrivateKey to data.PrivateKey: %v", err) + } + + return tufED25519PrivateKey, nil + } + + privKey, err := x509.ParsePKCS8PrivateKey(der) + if err != nil { + return nil, err + } + + switch priv := privKey.(type) { + case *rsa.PrivateKey: + tufRSAPrivateKey, err := RSAToPrivateKey(priv) + if err != nil { + return nil, fmt.Errorf("could not convert rsa.PrivateKey to data.PrivateKey: %v", err) + } + + return tufRSAPrivateKey, nil + case *ecdsa.PrivateKey: + tufECDSAPrivateKey, err := ECDSAToPrivateKey(priv) + if err != nil { + return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err) + } + + return tufECDSAPrivateKey, nil + } + + return nil, errors.New("unsupported key type") +} + +// ParsePKCS8ToTufKey requires PKCS#8 key in DER format and returns data.PrivateKey +// Password should be provided in case of Encrypted PKCS#8 key, else it should be nil. +func ParsePKCS8ToTufKey(der []byte, password []byte) (data.PrivateKey, error) { + if password == nil { + return parsePKCS8ToTufKey(der) + } + + var privKey encryptedPrivateKeyInfo + if _, err := asn1.Unmarshal(der, &privKey); err != nil { + return nil, errors.New("pkcs8: only PKCS #5 v2.0 supported") + } + + if !privKey.EncryptionAlgorithm.IDPBES2.Equal(oidPBES2) { + return nil, errors.New("pkcs8: only PBES2 supported") + } + + if !privKey.EncryptionAlgorithm.PBES2Params.KeyDerivationFunc.IDPBKDF2.Equal(oidPKCS5PBKDF2) { + return nil, errors.New("pkcs8: only PBKDF2 supported") + } + + encParam := privKey.EncryptionAlgorithm.PBES2Params.EncryptionScheme + kdfParam := privKey.EncryptionAlgorithm.PBES2Params.KeyDerivationFunc.PBKDF2Params + + switch { + case encParam.EncryAlgo.Equal(oidAES256CBC): + iv := encParam.IV + salt := kdfParam.Salt + iter := kdfParam.IterationCount + + encryptedKey := privKey.EncryptedData + symkey := pbkdf2.Key(password, salt, iter, 32, sha1.New) + block, err := aes.NewCipher(symkey) + if err != nil { + return nil, err + } + mode := cipher.NewCBCDecrypter(block, iv) + mode.CryptBlocks(encryptedKey, encryptedKey) + + // no need to explicitly remove padding, as ASN.1 unmarshalling will automatically discard it + key, err := parsePKCS8ToTufKey(encryptedKey) + if err != nil { + return nil, errors.New("pkcs8: incorrect password") + } + + return key, nil + default: + return nil, errors.New("pkcs8: only AES-256-CBC supported") + } + +} + +func convertTUFKeyToPKCS8(priv data.PrivateKey) ([]byte, error) { + var pkey privateKeyInfo + + switch priv.Algorithm() { + case data.RSAKey, data.RSAx509Key: + // Per RFC5958, if publicKey is present, then version is set to v2(1) else version is set to v1(0). + // But openssl set to v1 even publicKey is present + pkey.Version = 0 + pkey.PrivateKeyAlgorithm = make([]asn1.ObjectIdentifier, 1) + pkey.PrivateKeyAlgorithm[0] = oidPublicKeyRSA + pkey.PrivateKey = priv.Private() + case data.ECDSAKey, data.ECDSAx509Key: + // To extract Curve value, parsing ECDSA key to *ecdsa.PrivateKey + eckey, err := x509.ParseECPrivateKey(priv.Private()) + if err != nil { + return nil, err + } + + oidNamedCurve, ok := oidFromNamedCurve(eckey.Curve) + if !ok { + return nil, errors.New("pkcs8: unknown elliptic curve") + } + + // Per RFC5958, if publicKey is present, then version is set to v2(1) else version is set to v1(0). + // But openssl set to v1 even publicKey is present + pkey.Version = 1 + pkey.PrivateKeyAlgorithm = make([]asn1.ObjectIdentifier, 2) + pkey.PrivateKeyAlgorithm[0] = oidPublicKeyECDSA + pkey.PrivateKeyAlgorithm[1] = oidNamedCurve + pkey.PrivateKey = priv.Private() + case data.ED25519Key: + pkey.Version = 0 + pkey.PrivateKeyAlgorithm = make([]asn1.ObjectIdentifier, 1) + pkey.PrivateKeyAlgorithm[0] = oidPublicKeyED25519 + pkey.PrivateKey = priv.Private() + default: + return nil, fmt.Errorf("algorithm %s not supported", priv.Algorithm()) + } + + return asn1.Marshal(pkey) +} + +func convertTUFKeyToPKCS8Encrypted(priv data.PrivateKey, password []byte) ([]byte, error) { + // Convert private key into PKCS8 format + pkey, err := convertTUFKeyToPKCS8(priv) + if err != nil { + return nil, err + } + + // Calculate key from password based on PKCS5 algorithm + // Use 8 byte salt, 16 byte IV, and 2048 iteration + iter := 2048 + salt := make([]byte, 8) + iv := make([]byte, 16) + _, err = rand.Reader.Read(salt) + if err != nil { + return nil, err + } + + _, err = rand.Reader.Read(iv) + if err != nil { + return nil, err + } + + key := pbkdf2.Key(password, salt, iter, 32, sha1.New) + + // Use AES256-CBC mode, pad plaintext with PKCS5 padding scheme + padding := aes.BlockSize - len(pkey)%aes.BlockSize + if padding > 0 { + n := len(pkey) + pkey = append(pkey, make([]byte, padding)...) + for i := 0; i < padding; i++ { + pkey[n+i] = byte(padding) + } + } + + encryptedKey := make([]byte, len(pkey)) + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + mode := cipher.NewCBCEncrypter(block, iv) + mode.CryptBlocks(encryptedKey, pkey) + + pbkdf2algo := pbkdf2Algorithms{oidPKCS5PBKDF2, pbkdf2Params{salt, iter}} + pbkdf2encs := pbkdf2Encs{oidAES256CBC, iv} + pbes2algo := pbes2Algorithms{oidPBES2, pbes2Params{pbkdf2algo, pbkdf2encs}} + + encryptedPkey := encryptedPrivateKeyInfo{pbes2algo, encryptedKey} + return asn1.Marshal(encryptedPkey) +} + +// ConvertTUFKeyToPKCS8 converts a private key (data.Private) to PKCS#8 and returns in DER format +// if password is not nil, it would convert the Private Key to Encrypted PKCS#8. +func ConvertTUFKeyToPKCS8(priv data.PrivateKey, password []byte) ([]byte, error) { + if password == nil { + return convertTUFKeyToPKCS8(priv) + } + return convertTUFKeyToPKCS8Encrypted(priv, password) +} diff --git a/src/vendor/github.com/docker/notary/tuf/utils/role_sort.go b/src/vendor/github.com/theupdateframework/notary/tuf/utils/role_sort.go similarity index 100% rename from src/vendor/github.com/docker/notary/tuf/utils/role_sort.go rename to src/vendor/github.com/theupdateframework/notary/tuf/utils/role_sort.go diff --git a/src/vendor/github.com/docker/notary/tuf/utils/stack.go b/src/vendor/github.com/theupdateframework/notary/tuf/utils/stack.go similarity index 100% rename from src/vendor/github.com/docker/notary/tuf/utils/stack.go rename to src/vendor/github.com/theupdateframework/notary/tuf/utils/stack.go diff --git a/src/vendor/github.com/docker/notary/tuf/utils/utils.go b/src/vendor/github.com/theupdateframework/notary/tuf/utils/utils.go similarity index 98% rename from src/vendor/github.com/docker/notary/tuf/utils/utils.go rename to src/vendor/github.com/theupdateframework/notary/tuf/utils/utils.go index 2899a0340..ada7dc8cc 100644 --- a/src/vendor/github.com/docker/notary/tuf/utils/utils.go +++ b/src/vendor/github.com/theupdateframework/notary/tuf/utils/utils.go @@ -7,7 +7,7 @@ import ( "fmt" "io" - "github.com/docker/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/data" ) // StrSliceContains checks if the given string appears in the slice diff --git a/src/vendor/github.com/docker/notary/tuf/utils/x509.go b/src/vendor/github.com/theupdateframework/notary/tuf/utils/x509.go similarity index 82% rename from src/vendor/github.com/docker/notary/tuf/utils/x509.go rename to src/vendor/github.com/theupdateframework/notary/tuf/utils/x509.go index 7cf8fff88..4ebf83c2e 100644 --- a/src/vendor/github.com/docker/notary/tuf/utils/x509.go +++ b/src/vendor/github.com/theupdateframework/notary/tuf/utils/x509.go @@ -16,16 +16,19 @@ import ( "math/big" "time" - "github.com/Sirupsen/logrus" "github.com/agl/ed25519" - "github.com/docker/notary" - "github.com/docker/notary/tuf/data" + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/tuf/data" ) // CanonicalKeyID returns the ID of the public bytes version of a TUF key. // On regular RSA/ECDSA TUF keys, this is just the key ID. On X509 RSA/ECDSA // TUF keys, this is the key ID of the public key part of the key in the leaf cert func CanonicalKeyID(k data.PublicKey) (string, error) { + if k == nil { + return "", errors.New("public key is nil") + } switch k.Algorithm() { case data.ECDSAx509Key, data.RSAx509Key: return X509PublicKeyID(k) @@ -82,14 +85,7 @@ func X509PublicKeyID(certPubKey data.PublicKey) (string, error) { return key.ID(), nil } -// ParsePEMPrivateKey returns a data.PrivateKey from a PEM encoded private key. It -// only supports RSA (PKCS#1) and attempts to decrypt using the passphrase, if encrypted. -func ParsePEMPrivateKey(pemBytes []byte, passphrase string) (data.PrivateKey, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("no valid private key found") - } - +func parseLegacyPrivateKey(block *pem.Block, passphrase string) (data.PrivateKey, error) { var privKeyBytes []byte var err error if x509.IsEncryptedPEMBlock(block) { @@ -142,6 +138,35 @@ func ParsePEMPrivateKey(pemBytes []byte, passphrase string) (data.PrivateKey, er } } +// ParsePEMPrivateKey returns a data.PrivateKey from a PEM encoded private key. It +// supports PKCS#8 as well as RSA/ECDSA (PKCS#1) only in non-FIPS mode and +// attempts to decrypt using the passphrase, if encrypted. +func ParsePEMPrivateKey(pemBytes []byte, passphrase string) (data.PrivateKey, error) { + return parsePEMPrivateKey(pemBytes, passphrase, notary.FIPSEnabled()) +} + +func parsePEMPrivateKey(pemBytes []byte, passphrase string, fips bool) (data.PrivateKey, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.New("no valid private key found") + } + + switch block.Type { + case "RSA PRIVATE KEY", "EC PRIVATE KEY", "ED25519 PRIVATE KEY": + if fips { + return nil, fmt.Errorf("%s not supported in FIPS mode", block.Type) + } + return parseLegacyPrivateKey(block, passphrase) + case "ENCRYPTED PRIVATE KEY", "PRIVATE KEY": + if passphrase == "" { + return ParsePKCS8ToTufKey(block.Bytes, nil) + } + return ParsePKCS8ToTufKey(block.Bytes, []byte(passphrase)) + default: + return nil, fmt.Errorf("unsupported key type %q", block.Type) + } +} + // CertToPEM is a utility function returns a PEM encoded x509 Certificate func CertToPEM(cert *x509.Certificate) []byte { pemCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) @@ -313,21 +338,16 @@ func ValidateCertificate(c *x509.Certificate, checkExpiry bool) error { return nil } -// GenerateRSAKey generates an RSA private key and returns a TUF PrivateKey -func GenerateRSAKey(random io.Reader, bits int) (data.PrivateKey, error) { - rsaPrivKey, err := rsa.GenerateKey(random, bits) - if err != nil { - return nil, fmt.Errorf("could not generate private key: %v", err) +// GenerateKey returns a new private key using the provided algorithm or an +// error detailing why the key could not be generated +func GenerateKey(algorithm string) (data.PrivateKey, error) { + switch algorithm { + case data.ECDSAKey: + return GenerateECDSAKey(rand.Reader) + case data.ED25519Key: + return GenerateED25519Key(rand.Reader) } - - tufPrivKey, err := RSAToPrivateKey(rsaPrivKey) - if err != nil { - return nil, err - } - - logrus.Debugf("generated RSA key with keyID: %s", tufPrivKey.ID()) - - return tufPrivKey, nil + return nil, fmt.Errorf("private key type not supported for key generation: %s", algorithm) } // RSAToPrivateKey converts an rsa.Private key to a TUF data.PrivateKey type @@ -414,75 +434,58 @@ func ED25519ToPrivateKey(privKeyBytes []byte) (data.PrivateKey, error) { return data.NewED25519PrivateKey(*pubKey, privKeyBytes) } -func blockType(k data.PrivateKey) (string, error) { - switch k.Algorithm() { - case data.RSAKey, data.RSAx509Key: - return "RSA PRIVATE KEY", nil - case data.ECDSAKey, data.ECDSAx509Key: - return "EC PRIVATE KEY", nil - case data.ED25519Key: - return "ED25519 PRIVATE KEY", nil - default: - return "", fmt.Errorf("algorithm %s not supported", k.Algorithm()) - } +// ExtractPrivateKeyAttributes extracts role and gun values from private key bytes +func ExtractPrivateKeyAttributes(pemBytes []byte) (data.RoleName, data.GUN, error) { + return extractPrivateKeyAttributes(pemBytes, notary.FIPSEnabled()) } -// KeyToPEM returns a PEM encoded key from a Private Key -func KeyToPEM(privKey data.PrivateKey, role data.RoleName, gun data.GUN) ([]byte, error) { - bt, err := blockType(privKey) - if err != nil { - return nil, err +func extractPrivateKeyAttributes(pemBytes []byte, fips bool) (data.RoleName, data.GUN, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return "", "", errors.New("PEM block is empty") } - headers := map[string]string{} + switch block.Type { + case "RSA PRIVATE KEY", "EC PRIVATE KEY", "ED25519 PRIVATE KEY": + if fips { + return "", "", fmt.Errorf("%s not supported in FIPS mode", block.Type) + } + case "PRIVATE KEY", "ENCRYPTED PRIVATE KEY": + // do nothing for PKCS#8 keys + default: + return "", "", errors.New("unknown key format") + } + return data.RoleName(block.Headers["role"]), data.GUN(block.Headers["gun"]), nil +} + +// ConvertPrivateKeyToPKCS8 converts a data.PrivateKey to PKCS#8 Format +func ConvertPrivateKeyToPKCS8(key data.PrivateKey, role data.RoleName, gun data.GUN, passphrase string) ([]byte, error) { + var ( + err error + der []byte + blockType = "PRIVATE KEY" + ) + + if passphrase == "" { + der, err = ConvertTUFKeyToPKCS8(key, nil) + } else { + blockType = "ENCRYPTED PRIVATE KEY" + der, err = ConvertTUFKeyToPKCS8(key, []byte(passphrase)) + } + if err != nil { + return nil, fmt.Errorf("unable to convert to PKCS8 key") + } + + headers := make(map[string]string) if role != "" { headers["role"] = role.String() } + if gun != "" { headers["gun"] = gun.String() } - block := &pem.Block{ - Type: bt, - Headers: headers, - Bytes: privKey.Private(), - } - - return pem.EncodeToMemory(block), nil -} - -// EncryptPrivateKey returns an encrypted PEM key given a Privatekey -// and a passphrase -func EncryptPrivateKey(key data.PrivateKey, role data.RoleName, gun data.GUN, passphrase string) ([]byte, error) { - bt, err := blockType(key) - if err != nil { - return nil, err - } - - password := []byte(passphrase) - cipherType := x509.PEMCipherAES256 - - encryptedPEMBlock, err := x509.EncryptPEMBlock(rand.Reader, - bt, - key.Private(), - password, - cipherType) - if err != nil { - return nil, err - } - - if encryptedPEMBlock.Headers == nil { - return nil, fmt.Errorf("unable to encrypt key - invalid PEM file produced") - } - - if role != "" { - encryptedPEMBlock.Headers["role"] = role.String() - } - if gun != "" { - encryptedPEMBlock.Headers["gun"] = gun.String() - } - - return pem.EncodeToMemory(encryptedPEMBlock), nil + return pem.EncodeToMemory(&pem.Block{Bytes: der, Type: blockType, Headers: headers}), nil } // CertToKey transforms a single input certificate into its corresponding @@ -537,8 +540,8 @@ func CertBundleToKey(leafCert *x509.Certificate, intCerts []*x509.Certificate) ( return newKey, nil } -// NewCertificate returns an X509 Certificate following a template, given a GUN and validity interval. -func NewCertificate(gun string, startTime, endTime time.Time) (*x509.Certificate, error) { +// NewCertificate returns an X509 Certificate following a template, given a Common Name and validity interval. +func NewCertificate(commonName string, startTime, endTime time.Time) (*x509.Certificate, error) { serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) @@ -549,7 +552,7 @@ func NewCertificate(gun string, startTime, endTime time.Time) (*x509.Certificate return &x509.Certificate{ SerialNumber: serialNumber, Subject: pkix.Name{ - CommonName: gun, + CommonName: commonName, }, NotBefore: startTime, NotAfter: endTime, diff --git a/src/vendor/github.com/docker/notary/tuf/validation/errors.go b/src/vendor/github.com/theupdateframework/notary/tuf/validation/errors.go similarity index 100% rename from src/vendor/github.com/docker/notary/tuf/validation/errors.go rename to src/vendor/github.com/theupdateframework/notary/tuf/validation/errors.go diff --git a/src/vendor/github.com/theupdateframework/notary/vendor.conf b/src/vendor/github.com/theupdateframework/notary/vendor.conf new file mode 100644 index 000000000..94db21070 --- /dev/null +++ b/src/vendor/github.com/theupdateframework/notary/vendor.conf @@ -0,0 +1,59 @@ +github.com/Shopify/logrus-bugsnag 6dbc35f2c30d1e37549f9673dd07912452ab28a5 +github.com/sirupsen/logrus f006c2ac4710855cf0f916dd6b77acf6b048dc6e # v1.0.3 +github.com/agl/ed25519 278e1ec8e8a6e017cd07577924d6766039146ced +github.com/bugsnag/bugsnag-go 13fd6b8acda029830ef9904df6b63be0a83369d0 +github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 +github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 +github.com/docker/distribution edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c +github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb +github.com/docker/go-connections 7395e3f8aa162843a74ed6d48e79627d9792ac55 +github.com/docker/go d30aec9fd63c35133f8f79c3412ad91a3b08be06 +github.com/dvsekhvalnov/jose2go f21a8cedbbae609f623613ec8f81125c243212e6 # v1.3 +github.com/go-sql-driver/mysql a0583e0143b1624142adab07e0e97fe106d99561 # v1.3 +github.com/gorilla/mux 53c1911da2b537f792e7cafcb446b05ffe33b996 # v1.6.1 +github.com/jinzhu/gorm 5409931a1bb87e484d68d649af9367c207713ea2 +github.com/jinzhu/inflection 1c35d901db3da928c72a72d8458480cc9ade058f +github.com/lib/pq 0dad96c0b94f8dee039aa40467f767467392a0af +github.com/mattn/go-sqlite3 6c771bb9887719704b210e87e934f08be014bdb1 # v1.6.0 +github.com/miekg/pkcs11 5f6e0d0dad6f472df908c8e968a98ef00c9224bb +github.com/prometheus/client_golang 449ccefff16c8e2b7229f6be1921ba22f62461fe +github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 # model-0.0.2-12-gfa8ad6f +github.com/prometheus/procfs b1afdc266f54247f5dc725544f5d351a8661f502 +github.com/prometheus/common 4fdc91a58c9d3696b982e8a680f4997403132d44 +github.com/golang/protobuf c3cefd437628a0b7d31b34fe44b3a7a540e98527 +github.com/spf13/cobra 7b2c5ac9fc04fc5efafb60700713d4fa609b777b # v0.0.1 +github.com/spf13/viper be5ff3e4840cf692388bde7a057595a474ef379e +golang.org/x/crypto 76eec36fa14229c4b25bb894c2d0e591527af429 +golang.org/x/net 6a513affb38dc9788b449d59ffed099b8de18fa0 +golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993 +google.golang.org/grpc 708a7f9f3283aa2d4f6132d287d78683babe55c8 # v1.0.5 +github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9 + +github.com/spf13/pflag e57e3eeb33f795204c1ca35f56c44f83227c6e66 # v1.0.0 +github.com/spf13/cast 4d07383ffe94b5e5a6fa3af9211374a4507a0184 +gopkg.in/yaml.v2 5420a8b6744d3b0345ab293f6fcba19c978f1183 # v2.2.1 +gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715 +github.com/gorilla/context 14f550f51af52180c2eefed15e5fd18d63c0a64a # unused +github.com/spf13/jwalterweatherman 3d60171a64319ef63c78bd45bd60e6eab1e75f8b +github.com/mitchellh/mapstructure 2caf8efc93669b6c43e0441cdc6aed17546c96f3 +github.com/magiconair/properties 624009598839a9432bd97bb75552389422357723 # v1.5.3 +github.com/kr/text 6807e777504f54ad073ecef66747de158294b639 +github.com/kr/pretty bc9499caa0f45ee5edb2f0209fbd61fbf3d9018f # go.weekly.2011-12-22-18-gbc9499c +github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478 +github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20 +github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d +github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895 + +github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453 +github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 + +gopkg.in/dancannon/gorethink.v3 e324d6ad938205da6c1e8a0179dc97a5b1a92185 https://github.com/docker/gorethink # v3.0.0-logrus +# dependencies of gorethink.v3 +gopkg.in/gorethink/gorethink.v2 ac5be4ae8538d44ae8843b97fc9f90860cb48a85 https://github.com/docker/gorethink # v2.2.2-logrus +github.com/cenk/backoff 32cd0c5b3aef12c76ed64aaf678f6c79736be7dc # v1.0.0 + +# Testing requirements +github.com/stretchr/testify 089c7181b8c728499929ff09b62d3fdd8df8adff +github.com/cloudflare/cfssl 4e2dcbde500472449917533851bf4bae9bdff562 # v1.3.1 +github.com/google/certificate-transparency-go 5ab67e519c93568ac3ee50fd6772a5bcf8aa460d +github.com/gogo/protobuf 1adfc126b41513cc696b209667c8656ea7aac67c # v1.0.0