bump upp distribution to v2.8.2 on release-2.8.0 (#18769)

Signed-off-by: yminer <yminer@vmware.com>
This commit is contained in:
MinerYang 2023-05-31 14:11:07 +08:00 committed by GitHub
parent fade2e383a
commit 450e7e7884
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
35 changed files with 263 additions and 231 deletions

View File

@ -104,14 +104,14 @@ PKGVERSIONTAG=dev
PREPARE_VERSION_NAME=versions
#versions
REGISTRYVERSION=v2.8.0-patch-redis
REGISTRYVERSION=v2.8.2-patch-redis
NOTARYVERSION=v0.6.1
NOTARYMIGRATEVERSION=v4.11.0
TRIVYVERSION=v0.40.0
TRIVYADAPTERVERSION=v0.30.11
# version of registry for pulling the source code
REGISTRY_SRC_TAG=v2.8.0
REGISTRY_SRC_TAG=v2.8.2
# dependency binaries
NOTARYURL=https://storage.googleapis.com/harbor-builds/bin/notary/release-${NOTARYVERSION}/binary-bundle.tgz

View File

@ -22,12 +22,6 @@ cur=$PWD
TEMP=`mktemp -d ${TMPDIR-/tmp}/distribution.XXXXXX`
git clone -b $VERSION https://github.com/distribution/distribution.git $TEMP
# add patch 2815
echo 'add patch https://github.com/distribution/distribution/pull/2815 ...'
cd $TEMP
wget https://github.com/distribution/distribution/pull/2815.patch
git apply 2815.patch
# add patch redis
cd $TEMP
git apply $cur/redis.patch

View File

@ -198,7 +198,7 @@ require (
replace (
github.com/Azure/go-autorest => github.com/Azure/go-autorest v14.2.0+incompatible
github.com/docker/distribution => github.com/distribution/distribution v2.8.1+incompatible
github.com/docker/distribution => github.com/distribution/distribution v2.8.2+incompatible
github.com/goharbor/harbor => ../
github.com/gomodule/redigo => github.com/gomodule/redigo v1.8.8
google.golang.org/api => google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff

View File

@ -383,8 +383,8 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cu
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dhui/dktest v0.3.7 h1:jWjWgHAPDAdqgUr7lAsB3bqB2DKWC3OaA+isfekjRew=
github.com/dhui/dktest v0.3.7/go.mod h1:nYMOkafiA07WchSwKnKFUSbGMb2hMm5DrCGiXYG6gwM=
github.com/distribution/distribution v2.8.1+incompatible h1:8iXUoOqRPx30bhzIEPUmNIqlmBlWdrieW1bqr6LrX30=
github.com/distribution/distribution v2.8.1+incompatible/go.mod h1:EgLm2NgWtdKgzF9NpMzUKgzmR7AMmb0VQi2B+ZzDRjc=
github.com/distribution/distribution v2.8.2+incompatible h1:k9+4DKdOG+quPFZXT/mUsiQrGu9vYCp+dXpuPkuqhk8=
github.com/distribution/distribution v2.8.2+incompatible/go.mod h1:EgLm2NgWtdKgzF9NpMzUKgzmR7AMmb0VQi2B+ZzDRjc=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docker/docker v20.10.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=

View File

@ -0,0 +1 @@
bin/

View File

@ -18,3 +18,10 @@ run:
deadline: 2m
skip-dirs:
- vendor
issues:
exclude-rules:
# io/ioutil is deprecated, but won't be removed until Go v2. It's safe to ignore for the release/2.8 branch.
- text: "SA1019: \"io/ioutil\" has been deprecated since Go 1.16"
linters:
- staticcheck

View File

@ -44,6 +44,8 @@ Thomas Berger <loki@lokis-chaos.de> Thomas Berger <tbe@users.noreply.github.com>
Samuel Karp <skarp@amazon.com> Samuel Karp <samuelkarp@users.noreply.github.com>
Justin Cormack <justin.cormack@docker.com>
sayboras <sayboras@yahoo.com>
CrazyMax <github@crazymax.dev>
CrazyMax <github@crazymax.dev> <1951866+crazy-max@users.noreply.github.com>
CrazyMax <github@crazymax.dev> <crazy-max@users.noreply.github.com>
Hayley Swimelar <hswimelar@gmail.com>
Jose D. Gomez R <jose.gomez@suse.com>
Shengjing Zhu <zhsj@debian.org>
Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com>

View File

@ -1,49 +1,59 @@
# syntax=docker/dockerfile:1.3
# syntax=docker/dockerfile:1
ARG GO_VERSION=1.16.15
ARG GORELEASER_XX_VERSION=1.2.5
ARG GO_VERSION=1.19.9
ARG ALPINE_VERSION=3.16
ARG XX_VERSION=1.2.1
FROM --platform=$BUILDPLATFORM crazymax/goreleaser-xx:${GORELEASER_XX_VERSION} AS goreleaser-xx
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS base
COPY --from=goreleaser-xx / /
RUN apk add --no-cache file git
WORKDIR /go/src/github.com/docker/distribution
FROM base AS build
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS base
COPY --from=xx / /
RUN apk add --no-cache bash coreutils file git
ENV GO111MODULE=auto
ENV CGO_ENABLED=0
# GIT_REF is used by goreleaser-xx to handle the proper git ref when available.
# It will fallback to the working tree info if empty and use "git tag --points-at"
# or "git describe" to define the version info.
ARG GIT_REF
ARG TARGETPLATFORM
ARG PKG="github.com/distribution/distribution"
ARG BUILDTAGS="include_oss include_gcs"
RUN --mount=type=bind,rw \
--mount=type=cache,target=/root/.cache/go-build \
--mount=target=/go/pkg/mod,type=cache \
goreleaser-xx --debug \
--name="registry" \
--dist="/out" \
--main="./cmd/registry" \
--flags="-v" \
--ldflags="-s -w -X '$PKG/version.Version={{.Version}}' -X '$PKG/version.Revision={{.Commit}}' -X '$PKG/version.Package=$PKG'" \
--tags="$BUILDTAGS" \
--files="LICENSE" \
--files="README.md"
WORKDIR /go/src/github.com/docker/distribution
FROM scratch AS artifact
COPY --from=build /out/*.tar.gz /
COPY --from=build /out/*.zip /
COPY --from=build /out/*.sha256 /
FROM base AS version
ARG PKG="github.com/docker/distribution"
RUN --mount=target=. \
VERSION=$(git describe --match 'v[0-9]*' --dirty='.m' --always --tags) REVISION=$(git rev-parse HEAD)$(if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi); \
echo "-X ${PKG}/version.Version=${VERSION#v} -X ${PKG}/version.Revision=${REVISION} -X ${PKG}/version.Package=${PKG}" | tee /tmp/.ldflags; \
echo -n "${VERSION}" | tee /tmp/.version;
FROM base AS build
ARG TARGETPLATFORM
ARG LDFLAGS="-s -w"
ARG BUILDTAGS="include_oss include_gcs"
RUN --mount=type=bind,target=/go/src/github.com/docker/distribution,rw \
--mount=type=cache,target=/root/.cache/go-build \
--mount=target=/go/pkg/mod,type=cache \
--mount=type=bind,source=/tmp/.ldflags,target=/tmp/.ldflags,from=version \
set -x ; xx-go build -trimpath -ldflags "$(cat /tmp/.ldflags) ${LDFLAGS}" -o /usr/bin/registry ./cmd/registry \
&& xx-verify --static /usr/bin/registry
FROM scratch AS binary
COPY --from=build /usr/local/bin/registry* /
COPY --from=build /usr/bin/registry /
FROM alpine:3.14
FROM base AS releaser
ARG TARGETOS
ARG TARGETARCH
ARG TARGETVARIANT
WORKDIR /work
RUN --mount=from=binary,target=/build \
--mount=type=bind,target=/src \
--mount=type=bind,source=/tmp/.version,target=/tmp/.version,from=version \
VERSION=$(cat /tmp/.version) \
&& mkdir -p /out \
&& cp /build/registry /src/README.md /src/LICENSE . \
&& tar -czvf "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz" * \
&& sha256sum -z "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz" | awk '{ print $1 }' > "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz.sha256"
FROM scratch AS artifact
COPY --from=releaser /out /
FROM alpine:${ALPINE_VERSION}
RUN apk add --no-cache ca-certificates
COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml
COPY --from=build /usr/local/bin/registry /bin/registry
COPY --from=binary /registry /bin/registry
VOLUME ["/var/lib/registry"]
EXPOSE 5000
ENTRYPOINT ["registry"]

View File

@ -50,7 +50,7 @@ version/version.go:
check: ## run all linters (TODO: enable "unused", "varcheck", "ineffassign", "unconvert", "staticheck", "goimports", "structcheck")
@echo "$(WHALE) $@"
golangci-lint run
@GO111MODULE=off golangci-lint run
test: ## run tests, except integration test with test.short
@echo "$(WHALE) $@"

View File

@ -193,7 +193,8 @@ type Configuration struct {
} `yaml:"pool,omitempty"`
} `yaml:"redis,omitempty"`
Health Health `yaml:"health,omitempty"`
Health Health `yaml:"health,omitempty"`
Catalog Catalog `yaml:"catalog,omitempty"`
Proxy Proxy `yaml:"proxy,omitempty"`
@ -244,6 +245,16 @@ type Configuration struct {
} `yaml:"policy,omitempty"`
}
// Catalog is composed of MaxEntries.
// Catalog endpoint (/v2/_catalog) configuration, it provides the configuration
// options to control the maximum number of entries returned by the catalog endpoint.
type Catalog struct {
// Max number of entries returned by the catalog endpoint. Requesting n entries
// to the catalog endpoint will return at most MaxEntries entries.
// An empty or a negative value will set a default of 1000 maximum entries by default.
MaxEntries int `yaml:"maxentries,omitempty"`
}
// LogHook is composed of hook Level and Type.
// After hooks configuration, it can execute the next handling automatically,
// when defined levels of log message emitted.
@ -584,7 +595,7 @@ type Events struct {
IncludeReferences bool `yaml:"includereferences"` // include reference data in manifest events
}
//Ignore configures mediaTypes and actions of the event, that it won't be propagated
// Ignore configures mediaTypes and actions of the event, that it won't be propagated
type Ignore struct {
MediaTypes []string `yaml:"mediatypes"` // target media types to ignore
Actions []string `yaml:"actions"` // ignore action types
@ -670,6 +681,11 @@ func Parse(rd io.Reader) (*Configuration, error) {
if v0_1.Loglevel != Loglevel("") {
v0_1.Loglevel = Loglevel("")
}
if v0_1.Catalog.MaxEntries <= 0 {
v0_1.Catalog.MaxEntries = 1000
}
if v0_1.Storage.Type() == "" {
return nil, errors.New("no storage configuration provided")
}

View File

@ -4,68 +4,68 @@
//
// The easiest way to get started is to get the background context:
//
// ctx := context.Background()
// ctx := context.Background()
//
// The returned context should be passed around your application and be the
// root of all other context instances. If the application has a version, this
// line should be called before anything else:
//
// ctx := context.WithVersion(context.Background(), version)
// ctx := context.WithVersion(context.Background(), version)
//
// The above will store the version in the context and will be available to
// the logger.
//
// Logging
// # Logging
//
// The most useful aspect of this package is GetLogger. This function takes
// any context.Context interface and returns the current logger from the
// context. Canonical usage looks like this:
//
// GetLogger(ctx).Infof("something interesting happened")
// GetLogger(ctx).Infof("something interesting happened")
//
// GetLogger also takes optional key arguments. The keys will be looked up in
// the context and reported with the logger. The following example would
// return a logger that prints the version with each log message:
//
// ctx := context.Context(context.Background(), "version", version)
// GetLogger(ctx, "version").Infof("this log message has a version field")
// ctx := context.Context(context.Background(), "version", version)
// GetLogger(ctx, "version").Infof("this log message has a version field")
//
// The above would print out a log message like this:
//
// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m
// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m
//
// When used with WithLogger, we gain the ability to decorate the context with
// loggers that have information from disparate parts of the call stack.
// Following from the version example, we can build a new context with the
// configured logger such that we always print the version field:
//
// ctx = WithLogger(ctx, GetLogger(ctx, "version"))
// ctx = WithLogger(ctx, GetLogger(ctx, "version"))
//
// Since the logger has been pushed to the context, we can now get the version
// field for free with our log messages. Future calls to GetLogger on the new
// context will have the version field:
//
// GetLogger(ctx).Infof("this log message has a version field")
// GetLogger(ctx).Infof("this log message has a version field")
//
// This becomes more powerful when we start stacking loggers. Let's say we
// have the version logger from above but also want a request id. Using the
// context above, in our request scoped function, we place another logger in
// the context:
//
// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context
// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id"))
// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context
// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id"))
//
// When GetLogger is called on the new context, "http.request.id" will be
// included as a logger field, along with the original "version" field:
//
// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m
// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m
//
// Note that this only affects the new context, the previous context, with the
// version field, can be used independently. Put another way, the new logger,
// added to the request context, is unique to that context and can have
// request scoped variables.
//
// HTTP Requests
// # HTTP Requests
//
// This package also contains several methods for working with http requests.
// The concepts are very similar to those described above. We simply place the
@ -73,13 +73,13 @@
// available. GetRequestLogger can then be called to get request specific
// variables in a log line:
//
// ctx = WithRequest(ctx, req)
// GetRequestLogger(ctx).Infof("request variables")
// ctx = WithRequest(ctx, req)
// GetRequestLogger(ctx).Infof("request variables")
//
// Like above, if we want to include the request data in all log messages in
// the context, we push the logger to a new context and use that one:
//
// ctx = WithLogger(ctx, GetRequestLogger(ctx))
// ctx = WithLogger(ctx, GetRequestLogger(ctx))
//
// The concept is fairly powerful and ensures that calls throughout the stack
// can be traced in log messages. Using the fields like "http.request.id", one

View File

@ -246,11 +246,7 @@ func (ctx *muxVarsContext) Value(key interface{}) interface{} {
return ctx.vars
}
if strings.HasPrefix(keyStr, "vars.") {
keyStr = strings.TrimPrefix(keyStr, "vars.")
}
if v, ok := ctx.vars[keyStr]; ok {
if v, ok := ctx.vars[strings.TrimPrefix(keyStr, "vars.")]; ok {
return v
}
}

View File

@ -24,16 +24,16 @@ import (
//
// Here is an example of the usage:
//
// func timedOperation(ctx Context) {
// ctx, done := WithTrace(ctx)
// defer done("this will be the log message")
// // ... function body ...
// }
// func timedOperation(ctx Context) {
// ctx, done := WithTrace(ctx)
// defer done("this will be the log message")
// // ... function body ...
// }
//
// If the function ran for roughly 1s, such a usage would emit a log message
// as follows:
//
// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id=<id> ...
// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id=<id> ...
//
// Notice that the function name is automatically resolved, along with the
// package and a trace id is emitted that can be linked with parent ids.

View File

@ -1,15 +1,3 @@
// GITHUB_REF is the actual ref that triggers the workflow
// https://docs.github.com/en/actions/learn-github-actions/environment-variables#default-environment-variables
variable "GITHUB_REF" {
default = ""
}
target "_common" {
args = {
GIT_REF = GITHUB_REF
}
}
group "default" {
targets = ["image-local"]
}
@ -20,13 +8,11 @@ target "docker-metadata-action" {
}
target "binary" {
inherits = ["_common"]
target = "binary"
output = ["./bin"]
}
target "artifact" {
inherits = ["_common"]
target = "artifact"
output = ["./bin"]
}
@ -43,8 +29,13 @@ target "artifact-all" {
]
}
// Special target: https://github.com/docker/metadata-action#bake-definition
target "docker-metadata-action" {
tags = ["registry:local"]
}
target "image" {
inherits = ["_common", "docker-metadata-action"]
inherits = ["docker-metadata-action"]
}
target "image-local" {

View File

@ -13,29 +13,29 @@
// particularly useful for checks that verify upstream connectivity or
// database status, since they might take a long time to return/timeout.
//
// Installing
// # Installing
//
// To install health, just import it in your application:
//
// import "github.com/docker/distribution/health"
// import "github.com/docker/distribution/health"
//
// You can also (optionally) import "health/api" that will add two convenience
// endpoints: "/debug/health/down" and "/debug/health/up". These endpoints add
// "manual" checks that allow the service to quickly be brought in/out of
// rotation.
//
// import _ "github.com/docker/distribution/health/api"
// import _ "github.com/docker/distribution/health/api"
//
// # curl localhost:5001/debug/health
// {}
// # curl -X POST localhost:5001/debug/health/down
// # curl localhost:5001/debug/health
// {"manual_http_status":"Manual Check"}
// # curl localhost:5001/debug/health
// {}
// # curl -X POST localhost:5001/debug/health/down
// # curl localhost:5001/debug/health
// {"manual_http_status":"Manual Check"}
//
// After importing these packages to your main application, you can start
// registering checks.
//
// Registering Checks
// # Registering Checks
//
// The recommended way of registering checks is using a periodic Check.
// PeriodicChecks run on a certain schedule and asynchronously update the
@ -45,22 +45,22 @@
// A trivial example of a check that runs every 5 seconds and shuts down our
// server if the current minute is even, could be added as follows:
//
// func currentMinuteEvenCheck() error {
// m := time.Now().Minute()
// if m%2 == 0 {
// return errors.New("Current minute is even!")
// }
// return nil
// }
// func currentMinuteEvenCheck() error {
// m := time.Now().Minute()
// if m%2 == 0 {
// return errors.New("Current minute is even!")
// }
// return nil
// }
//
// health.RegisterPeriodicFunc("minute_even", currentMinuteEvenCheck, time.Second*5)
// health.RegisterPeriodicFunc("minute_even", currentMinuteEvenCheck, time.Second*5)
//
// Alternatively, you can also make use of "RegisterPeriodicThresholdFunc" to
// implement the exact same check, but add a threshold of failures after which
// the check will be unhealthy. This is particularly useful for flaky Checks,
// ensuring some stability of the service when handling them.
//
// health.RegisterPeriodicThresholdFunc("minute_even", currentMinuteEvenCheck, time.Second*5, 4)
// health.RegisterPeriodicThresholdFunc("minute_even", currentMinuteEvenCheck, time.Second*5, 4)
//
// The lowest-level way to interact with the health package is calling
// "Register" directly. Register allows you to pass in an arbitrary string and
@ -72,7 +72,7 @@
// Assuming you wish to register a method called "currentMinuteEvenCheck()
// error" you could do that by doing:
//
// health.Register("even_minute", health.CheckFunc(currentMinuteEvenCheck))
// health.Register("even_minute", health.CheckFunc(currentMinuteEvenCheck))
//
// CheckFunc is a convenience type that implements Checker.
//
@ -80,11 +80,11 @@
// and the convenience method RegisterFunc. An example that makes the status
// endpoint always return an error:
//
// health.RegisterFunc("my_check", func() error {
// return Errors.new("This is an error!")
// }))
// health.RegisterFunc("my_check", func() error {
// return Errors.new("This is an error!")
// }))
//
// Examples
// # Examples
//
// You could also use the health checker mechanism to ensure your application
// only comes up if certain conditions are met, or to allow the developer to
@ -92,35 +92,35 @@
// database connectivity and immediately takes the server out of rotation on
// err:
//
// updater = health.NewStatusUpdater()
// health.RegisterFunc("database_check", func() error {
// return updater.Check()
// }))
// updater = health.NewStatusUpdater()
// health.RegisterFunc("database_check", func() error {
// return updater.Check()
// }))
//
// conn, err := Connect(...) // database call here
// if err != nil {
// updater.Update(errors.New("Error connecting to the database: " + err.Error()))
// }
// conn, err := Connect(...) // database call here
// if err != nil {
// updater.Update(errors.New("Error connecting to the database: " + err.Error()))
// }
//
// You can also use the predefined Checkers that come included with the health
// package. First, import the checks:
//
// import "github.com/docker/distribution/health/checks
// import "github.com/docker/distribution/health/checks
//
// After that you can make use of any of the provided checks. An example of
// using a `FileChecker` to take the application out of rotation if a certain
// file exists can be done as follows:
//
// health.Register("fileChecker", health.PeriodicChecker(checks.FileChecker("/tmp/disable"), time.Second*5))
// health.Register("fileChecker", health.PeriodicChecker(checks.FileChecker("/tmp/disable"), time.Second*5))
//
// After registering the check, it is trivial to take an application out of
// rotation from the console:
//
// # curl localhost:5001/debug/health
// {}
// # touch /tmp/disable
// # curl localhost:5001/debug/health
// {"fileChecker":"file exists"}
// # curl localhost:5001/debug/health
// {}
// # touch /tmp/disable
// # curl localhost:5001/debug/health
// {"fileChecker":"file exists"}
//
// FileChecker only accepts absolute or relative file path. It does not work
// properly with tilde(~). You should make sure that the application has
@ -132,5 +132,5 @@
// "HTTPChecker", but ensure that you only mark the test unhealthy if there
// are a minimum of two failures in a row:
//
// health.Register("httpChecker", health.PeriodicThresholdChecker(checks.HTTPChecker("https://www.google.pt"), time.Second*5, 2))
// health.Register("httpChecker", health.PeriodicThresholdChecker(checks.HTTPChecker("https://www.google.pt"), time.Second*5, 2))
package health

View File

@ -3,13 +3,13 @@
//
// Grammar
//
// reference := name [ ":" tag ] [ "@" digest ]
// reference := name [ ":" tag ] [ "@" digest ]
// name := [domain '/'] path-component ['/' path-component]*
// domain := domain-component ['.' domain-component]* [':' port-number]
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
// port-number := /[0-9]+/
// path-component := alpha-numeric [separator alpha-numeric]*
// alpha-numeric := /[a-z0-9]+/
// alpha-numeric := /[a-z0-9]+/
// separator := /[_.]|__|[-]*/
//
// tag := /[\w][\w.-]{0,127}/

View File

@ -8,28 +8,27 @@
// An implementation registers its access controller by name with a constructor
// which accepts an options map for configuring the access controller.
//
// options := map[string]interface{}{"sillySecret": "whysosilly?"}
// accessController, _ := auth.GetAccessController("silly", options)
// options := map[string]interface{}{"sillySecret": "whysosilly?"}
// accessController, _ := auth.GetAccessController("silly", options)
//
// This `accessController` can then be used in a request handler like so:
//
// func updateOrder(w http.ResponseWriter, r *http.Request) {
// orderNumber := r.FormValue("orderNumber")
// resource := auth.Resource{Type: "customerOrder", Name: orderNumber}
// access := auth.Access{Resource: resource, Action: "update"}
// func updateOrder(w http.ResponseWriter, r *http.Request) {
// orderNumber := r.FormValue("orderNumber")
// resource := auth.Resource{Type: "customerOrder", Name: orderNumber}
// access := auth.Access{Resource: resource, Action: "update"}
//
// if ctx, err := accessController.Authorized(ctx, access); err != nil {
// if challenge, ok := err.(auth.Challenge) {
// // Let the challenge write the response.
// challenge.SetHeaders(r, w)
// w.WriteHeader(http.StatusUnauthorized)
// return
// } else {
// // Some other error.
// }
// if ctx, err := accessController.Authorized(ctx, access); err != nil {
// if challenge, ok := err.(auth.Challenge) {
// // Let the challenge write the response.
// challenge.SetHeaders(r, w)
// w.WriteHeader(http.StatusUnauthorized)
// return
// } else {
// // Some other error.
// }
// }
//
// }
// }
package auth
import (

View File

@ -185,13 +185,15 @@ func (t *Token) Verify(verifyOpts VerifyOptions) error {
// VerifySigningKey attempts to get the key which was used to sign this token.
// The token header should contain either of these 3 fields:
// `x5c` - The x509 certificate chain for the signing key. Needs to be
// verified.
// `jwk` - The JSON Web Key representation of the signing key.
// May contain its own `x5c` field which needs to be verified.
// `kid` - The unique identifier for the key. This library interprets it
// as a libtrust fingerprint. The key itself can be looked up in
// the trustedKeys field of the given verify options.
//
// `x5c` - The x509 certificate chain for the signing key. Needs to be
// verified.
// `jwk` - The JSON Web Key representation of the signing key.
// May contain its own `x5c` field which needs to be verified.
// `kid` - The unique identifier for the key. This library interprets it
// as a libtrust fingerprint. The key itself can be looked up in
// the trustedKeys field of the given verify options.
//
// Each of these methods are tried in that order of preference until the
// signing key is found or an error is returned.
func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) {

View File

@ -180,7 +180,6 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) {
// context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range"))
}
req.Header.Add("Accept-Encoding", "identity")
resp, err := hrs.client.Do(req)
if err != nil {
return nil, err

View File

@ -1,3 +1,4 @@
//go:build noresumabledigest
// +build noresumabledigest
package storage

View File

@ -1,3 +1,4 @@
//go:build !noresumabledigest
// +build !noresumabledigest
package storage

View File

@ -6,14 +6,14 @@
// struct such that calls are proxied through this implementation. First,
// declare the internal driver, as follows:
//
// type driver struct { ... internal ...}
// type driver struct { ... internal ...}
//
// The resulting type should implement StorageDriver such that it can be the
// target of a Base struct. The exported type can then be declared as follows:
//
// type Driver struct {
// Base
// }
// type Driver struct {
// Base
// }
//
// Because Driver embeds Base, it effectively implements Base. If the driver
// needs to intercept a call, before going to base, Driver should implement
@ -23,15 +23,15 @@
// To further shield the embed from other packages, it is recommended to
// employ a private embed struct:
//
// type baseEmbed struct {
// base.Base
// }
// type baseEmbed struct {
// base.Base
// }
//
// Then, declare driver to embed baseEmbed, rather than Base directly:
//
// type Driver struct {
// baseEmbed
// }
// type Driver struct {
// baseEmbed
// }
//
// The type now implements StorageDriver, proxying through Base, without
// exporting an unnecessary field.

View File

@ -145,7 +145,7 @@ func (r *regulator) Stat(ctx context.Context, path string) (storagedriver.FileIn
}
// List returns a list of the objects that are direct descendants of the
//given path.
// given path.
func (r *regulator) List(ctx context.Context, path string) ([]string, error) {
r.enter()
defer r.exit()

View File

@ -10,6 +10,7 @@
// Note that the contents of incomplete uploads are not accessible even though
// Stat returns their length
//
//go:build include_gcs
// +build include_gcs
package gcs

View File

@ -279,6 +279,9 @@ func (f *file) sectionReader(offset int64) io.Reader {
}
func (f *file) ReadAt(p []byte, offset int64) (n int, err error) {
if offset >= int64(len(f.data)) {
return 0, io.EOF
}
return copy(p, f.data[offset:]), nil
}

View File

@ -1,6 +1,5 @@
// Package middleware - cloudfront wrapper for storage libs
// N.B. currently only works with S3, not arbitrary sites
//
package middleware
import (
@ -38,7 +37,9 @@ var _ storagedriver.StorageDriver = &cloudFrontStorageMiddleware{}
// Optional options: ipFilteredBy, awsregion
// ipfilteredby: valid value "none|aws|awsregion". "none", do not filter any IP, default value. "aws", only aws IP goes
// to S3 directly. "awsregion", only regions listed in awsregion options goes to S3 directly
//
// to S3 directly. "awsregion", only regions listed in awsregion options goes to S3 directly
//
// awsregion: a comma separated string of AWS regions.
func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) {
// parse baseurl

View File

@ -7,6 +7,7 @@
// Because OSS is a key, value store the Stat call does not support last modification
// time for directories (directories are an abstraction for key, value stores)
//
//go:build include_oss
// +build include_oss
package oss

View File

@ -82,7 +82,7 @@ var validRegions = map[string]struct{}{}
// validObjectACLs contains known s3 object Acls
var validObjectACLs = map[string]struct{}{}
//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set
// DriverParameters A struct that encapsulates all of the driver parameters after all values have been set
type DriverParameters struct {
AccessKey string
SecretKey string
@ -549,9 +549,9 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read
// Writer returns a FileWriter which will store the content written to it
// at the location designated by "path" after the call to Commit.
func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) {
func (d *driver) Writer(ctx context.Context, path string, appendParam bool) (storagedriver.FileWriter, error) {
key := d.s3Path(path)
if !append {
if !appendParam {
// TODO (brianbland): cancel other uploads at this path
resp, err := d.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{
Bucket: aws.String(d.Bucket),
@ -574,7 +574,7 @@ func (d *driver) Writer(ctx context.Context, path string, append bool) (storaged
if err != nil {
return nil, parseError(path, err)
}
var allParts []*s3.Part
for _, multi := range resp.Uploads {
if key != *multi.Key {
continue
@ -587,11 +587,20 @@ func (d *driver) Writer(ctx context.Context, path string, append bool) (storaged
if err != nil {
return nil, parseError(path, err)
}
var multiSize int64
for _, part := range resp.Parts {
multiSize += *part.Size
allParts = append(allParts, resp.Parts...)
for *resp.IsTruncated {
resp, err = d.S3.ListParts(&s3.ListPartsInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(key),
UploadId: multi.UploadId,
PartNumberMarker: resp.NextPartNumberMarker,
})
if err != nil {
return nil, parseError(path, err)
}
allParts = append(allParts, resp.Parts...)
}
return d.newWriter(key, *multi.UploadId, resp.Parts), nil
return d.newWriter(key, *multi.UploadId, allParts), nil
}
return nil, storagedriver.PathNotFoundError{Path: path}
}

View File

@ -12,7 +12,7 @@ import (
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
//ocischemaManifestHandler is a ManifestHandler that covers ocischema manifests.
// ocischemaManifestHandler is a ManifestHandler that covers ocischema manifests.
type ocischemaManifestHandler struct {
repository distribution.Repository
blobStore distribution.BlobStore

View File

@ -23,25 +23,25 @@ const (
//
// The path layout in the storage backend is roughly as follows:
//
// <root>/v2
// -> repositories/
// -><name>/
// -> _manifests/
// revisions
// -> <manifest digest path>
// -> link
// tags/<tag>
// -> current/link
// -> index
// -> <algorithm>/<hex digest>/link
// -> _layers/
// <layer links to blob store>
// -> _uploads/<id>
// data
// startedat
// hashstates/<algorithm>/<offset>
// -> blob/<algorithm>
// <split directory content addressable storage>
// <root>/v2
// -> repositories/
// -><name>/
// -> _manifests/
// revisions
// -> <manifest digest path>
// -> link
// tags/<tag>
// -> current/link
// -> index
// -> <algorithm>/<hex digest>/link
// -> _layers/
// <layer links to blob store>
// -> _uploads/<id>
// data
// startedat
// hashstates/<algorithm>/<offset>
// -> blob/<algorithm>
// <split directory content addressable storage>
//
// The storage backend layout is broken up into a content-addressable blob
// store and repositories. The content-addressable blob store holds most data
@ -71,35 +71,35 @@ const (
//
// Manifests:
//
// manifestRevisionsPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/
// manifestRevisionPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/<algorithm>/<hex digest>/
// manifestRevisionLinkPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/<algorithm>/<hex digest>/link
// manifestRevisionsPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/
// manifestRevisionPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/<algorithm>/<hex digest>/
// manifestRevisionLinkPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/<algorithm>/<hex digest>/link
//
// Tags:
//
// manifestTagsPathSpec: <root>/v2/repositories/<name>/_manifests/tags/
// manifestTagPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/
// manifestTagCurrentPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/current/link
// manifestTagIndexPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/index/
// manifestTagIndexEntryPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/index/<algorithm>/<hex digest>/
// manifestTagIndexEntryLinkPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/index/<algorithm>/<hex digest>/link
// manifestTagsPathSpec: <root>/v2/repositories/<name>/_manifests/tags/
// manifestTagPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/
// manifestTagCurrentPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/current/link
// manifestTagIndexPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/index/
// manifestTagIndexEntryPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/index/<algorithm>/<hex digest>/
// manifestTagIndexEntryLinkPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/index/<algorithm>/<hex digest>/link
//
// Blobs:
// Blobs:
//
// layerLinkPathSpec: <root>/v2/repositories/<name>/_layers/<algorithm>/<hex digest>/link
// layerLinkPathSpec: <root>/v2/repositories/<name>/_layers/<algorithm>/<hex digest>/link
//
// Uploads:
//
// uploadDataPathSpec: <root>/v2/repositories/<name>/_uploads/<id>/data
// uploadStartedAtPathSpec: <root>/v2/repositories/<name>/_uploads/<id>/startedat
// uploadHashStatePathSpec: <root>/v2/repositories/<name>/_uploads/<id>/hashstates/<algorithm>/<offset>
// uploadDataPathSpec: <root>/v2/repositories/<name>/_uploads/<id>/data
// uploadStartedAtPathSpec: <root>/v2/repositories/<name>/_uploads/<id>/startedat
// uploadHashStatePathSpec: <root>/v2/repositories/<name>/_uploads/<id>/hashstates/<algorithm>/<offset>
//
// Blob Store:
//
// blobsPathSpec: <root>/v2/blobs/
// blobPathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>
// blobDataPathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>/data
// blobMediaTypePathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>/data
// blobPathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>
// blobDataPathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>/data
// blobMediaTypePathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>/data
//
// For more information on the semantic meaning of each path and their
// contents, please see the path spec documentation.
@ -339,11 +339,11 @@ func (manifestTagIndexEntryLinkPathSpec) pathSpec() {}
// blob id. The blob link will contain a content addressable blob id reference
// into the blob store. The format of the contents is as follows:
//
// <algorithm>:<hex digest of layer data>
// <algorithm>:<hex digest of layer data>
//
// The following example of the file contents is more illustrative:
//
// sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36
// sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36
//
// This indicates that there is a blob with the id/digest, calculated via
// sha256 that can be fetched from the blob store.
@ -429,13 +429,12 @@ func (repositoriesRootPathSpec) pathSpec() {}
// digestPathComponents provides a consistent path breakdown for a given
// digest. For a generic digest, it will be as follows:
//
// <algorithm>/<hex digest>
// <algorithm>/<hex digest>
//
// If multilevel is true, the first two bytes of the digest will separate
// groups of digest folder. It will be as follows:
//
// <algorithm>/<first two bytes of digest>/<full digest>
//
// <algorithm>/<first two bytes of digest>/<full digest>
func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) {
if err := dgst.Validate(); err != nil {
return nil, err

View File

@ -18,7 +18,7 @@ var (
errInvalidURL = errors.New("invalid URL on layer")
)
//schema2ManifestHandler is a ManifestHandler that covers schema2 manifests.
// schema2ManifestHandler is a ManifestHandler that covers schema2 manifests.
type schema2ManifestHandler struct {
repository distribution.Repository
blobStore distribution.BlobStore

View File

@ -9,13 +9,12 @@ import (
// FprintVersion outputs the version string to the writer, in the following
// format, followed by a newline:
//
// <cmd> <project> <version>
// <cmd> <project> <version>
//
// For example, a binary "registry" built from github.com/docker/distribution
// with version "v2.0" would print the following:
//
// registry github.com/docker/distribution v2.0
//
// registry github.com/docker/distribution v2.0
func FprintVersion(w io.Writer) {
fmt.Fprintln(w, os.Args[0], Package, Version)
}

View File

@ -8,7 +8,7 @@ var Package = "github.com/docker/distribution"
// the latest release tag by hand, always suffixed by "+unknown". During
// build, it will be replaced by the actual version. The value here will be
// used if the registry is run after a go get based install.
var Version = "v2.8.1+unknown"
var Version = "v2.8.2+unknown"
// Revision is filled with the VCS (e.g. git) revision being used to build
// the program at linking time.

View File

@ -17,7 +17,7 @@ var Package = "$(go list)"
// Version indicates which version of the binary is running. This is set to
// the latest release tag by hand, always suffixed by "+unknown". During
// build, it will be replaced by the actual version. The value here will be
// used if the registry is run after a go get based install.
// used if the registry is run after a go install based install.
var Version = "$(git describe --match 'v[0-9]*' --dirty='.m' --always)+unknown"
// Revision is filled with the VCS (e.g. git) revision being used to build

View File

@ -206,7 +206,7 @@ github.com/dghubble/sling
github.com/dgryski/go-rendezvous
# github.com/dnaeon/go-vcr v1.2.0
## explicit; go 1.15
# github.com/docker/distribution v2.8.1+incompatible => github.com/distribution/distribution v2.8.1+incompatible
# github.com/docker/distribution v2.8.1+incompatible => github.com/distribution/distribution v2.8.2+incompatible
## explicit
github.com/docker/distribution
github.com/docker/distribution/configuration
@ -1043,7 +1043,7 @@ sigs.k8s.io/structured-merge-diff/v4/value
## explicit; go 1.12
sigs.k8s.io/yaml
# github.com/Azure/go-autorest => github.com/Azure/go-autorest v14.2.0+incompatible
# github.com/docker/distribution => github.com/distribution/distribution v2.8.1+incompatible
# github.com/docker/distribution => github.com/distribution/distribution v2.8.2+incompatible
# github.com/goharbor/harbor => ../
# github.com/gomodule/redigo => github.com/gomodule/redigo v1.8.8
# google.golang.org/api => google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff