Merge master into dev

Conflicts:
	.travis.yml
	Dockerfile.job
	vendor/vendor.json
This commit is contained in:
Wenkai Yin 2016-08-15 11:30:06 +08:00
commit 4ef5c1f98a
183 changed files with 20450 additions and 4787 deletions

View File

@ -54,7 +54,7 @@ install:
- go get -d github.com/go-sql-driver/mysql
- go get github.com/golang/lint/golint
- go get github.com/GeertJohan/fgt
# - sudo apt-get install -y --force-yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" docker-engine=1.11.1-0~trusty
# - sudo rm /usr/local/bin/docker-compose
- curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose
- chmod +x docker-compose

View File

@ -5,12 +5,11 @@ MAINTAINER jiangd@vmware.com
RUN apt-get update \
&& apt-get install -y libldap2-dev \
&& rm -r /var/lib/apt/lists/*
RUN go get -d github.com/docker/distribution \
&& go get -d github.com/docker/libtrust \
&& go get -d github.com/go-sql-driver/mysql
COPY . /go/src/github.com/vmware/harbor
WORKDIR /go/src/github.com/vmware/harbor/jobservice
RUN go build -v -a -o /go/bin/harbor_jobservice \
&& chmod u+x /go/bin/harbor_jobservice
WORKDIR /go/bin/

View File

@ -7,14 +7,9 @@ RUN apt-get update \
&& rm -r /var/lib/apt/lists/*
COPY . /go/src/github.com/vmware/harbor
#golang.org is blocked in China
COPY ./vendor/golang.org /go/src/golang.org
WORKDIR /go/src/github.com/vmware/harbor/ui
RUN go get -d github.com/docker/distribution \
&& go get -d github.com/docker/libtrust \
&& go get -d github.com/go-sql-driver/mysql \
&& go build -v -a -o /go/bin/harbor_ui
RUN go build -v -a -o /go/bin/harbor_ui
ENV MYSQL_USR root \
MYSQL_PWD root \

View File

@ -86,7 +86,28 @@ func (b *BaseAPI) DecodeJSONReqAndValidate(v interface{}) {
// ValidateUser checks if the request triggered by a valid user
func (b *BaseAPI) ValidateUser() int {
userID, needsCheck, ok := b.GetUserIDForRequest()
if !ok {
log.Warning("No user id in session, canceling request")
b.CustomAbort(http.StatusUnauthorized, "")
}
if needsCheck {
u, err := dao.GetUser(models.User{UserID: userID})
if err != nil {
log.Errorf("Error occurred in GetUser, error: %v", err)
b.CustomAbort(http.StatusInternalServerError, "Internal error.")
}
if u == nil {
log.Warningf("User was deleted already, user id: %d, canceling request.", userID)
b.CustomAbort(http.StatusUnauthorized, "")
}
}
return userID
}
// GetUserIDForRequest tries to get user ID from basic auth header and session.
// It returns the user ID, whether need further verification(when the id is from session) and if the action is successful
func (b *BaseAPI) GetUserIDForRequest() (int, bool, bool) {
username, password, ok := b.Ctx.Request.BasicAuth()
if ok {
log.Infof("Requst with Basic Authentication header, username: %s", username)
@ -99,25 +120,17 @@ func (b *BaseAPI) ValidateUser() int {
user = nil
}
if user != nil {
return user.UserID
// User login successfully no further check required.
return user.UserID, false, true
}
}
sessionUserID := b.GetSession("userId")
if sessionUserID == nil {
log.Warning("No user id in session, canceling request")
b.CustomAbort(http.StatusUnauthorized, "")
sessionUserID, ok := b.GetSession("userId").(int)
if ok {
// The ID is from session
return sessionUserID, true, true
}
userID := sessionUserID.(int)
u, err := dao.GetUser(models.User{UserID: userID})
if err != nil {
log.Errorf("Error occurred in GetUser, error: %v", err)
b.CustomAbort(http.StatusInternalServerError, "Internal error.")
}
if u == nil {
log.Warningf("User was deleted already, user id: %d, canceling request.", userID)
b.CustomAbort(http.StatusUnauthorized, "")
}
return userID
log.Debug("No valid user id in session.")
return 0, false, false
}
// Redirect does redirection to resource URI with http header status code.

View File

@ -132,6 +132,10 @@ func (ra *RepositoryAPI) Delete() {
ra.CustomAbort(http.StatusInternalServerError, "")
}
if project == nil {
ra.CustomAbort(http.StatusNotFound, fmt.Sprintf("project %s not found", projectName))
}
if project.Public == 0 {
userID := ra.ValidateUser()
if !hasProjectAdminRole(userID, project.ProjectID) {
@ -290,6 +294,10 @@ func (ra *RepositoryAPI) GetManifests() {
ra.CustomAbort(http.StatusInternalServerError, "")
}
if project == nil {
ra.CustomAbort(http.StatusNotFound, fmt.Sprintf("project %s not found", projectName))
}
if project.Public == 0 {
userID := ra.ValidateUser()
if !checkProjectPermission(userID, project.ProjectID) {

View File

@ -39,7 +39,7 @@ type searchResult struct {
// Get ...
func (s *SearchAPI) Get() {
userID, ok := s.GetSession("userId").(int)
userID, _, ok := s.GetUserIDForRequest()
if !ok {
userID = dao.NonExistUserID
}

View File

@ -32,7 +32,6 @@ paths:
description: Search parameter for project and repository name.
required: true
type: string
format: string
tags:
- Products
responses:
@ -55,7 +54,6 @@ paths:
description: Project name for filtering results.
required: false
type: string
format: string
- name: is_public
in: query
description: Public sign for filtering projects.
@ -71,6 +69,8 @@ paths:
type: array
items:
$ref: '#/definitions/Project'
401:
description: User need to log in first.
500:
description: Internal errors.
head:
@ -83,7 +83,6 @@ paths:
description: Project name for checking exists.
required: true
type: string
format: string
tags:
- Products
responses:
@ -119,6 +118,31 @@ paths:
description: Project name already exists.
500:
description: Unexpected internal errors.
/projects/{project_id}:
get:
summary: Return specific project detail infomation
description: |
This endpoint returns specific project information by project ID.
parameters:
- name: project_id
in: path
description: Project ID for filtering results.
required: true
type: integer
format: int64
tags:
- Products
responses:
200:
description: Return matched project information.
schema:
type: array
items:
$ref: '#/definitions/Project'
401:
description: User need to log in first.
500:
description: Internal errors.
/projects/{project_id}/publicity:
put:
summary: Update properties for a selected project.
@ -128,7 +152,7 @@ paths:
- name: project_id
in: path
type: integer
format: int32
format: int64
required: true
description: Selected project ID.
- name: project
@ -161,7 +185,7 @@ paths:
- name: project_id
in: path
type: integer
format: int32
format: int64
required: true
description: Relevant project ID
- name: access_log
@ -193,7 +217,7 @@ paths:
- name: project_id
in: path
type: integer
format: int32
format: int64
required: true
description: Relevant project ID.
tags:
@ -223,7 +247,7 @@ paths:
- name: project_id
in: path
type: integer
format: int32
format: int64
required: true
description: Relevant project ID.
- name: roles
@ -257,13 +281,13 @@ paths:
- name: project_id
in: path
type: integer
format: int32
format: int64
required: true
description: Relevant project ID
- name: user_id
in: path
type: integer
format: int32
format: int
required: true
description: Relevant user ID
tags:
@ -293,13 +317,13 @@ paths:
- name: project_id
in: path
type: integer
format: int32
format: int64
required: true
description: Relevant project ID.
- name: user_id
in: path
type: integer
format: int32
format: int
required: true
description: Relevant user ID.
- name: roles
@ -330,13 +354,13 @@ paths:
- name: project_id
in: path
type: integer
format: int32
format: int64
required: true
description: Relevant project ID.
- name: user_id
in: path
type: integer
format: int32
format: int
required: true
description: Relevant user ID.
tags:
@ -377,10 +401,9 @@ paths:
description: |
This endpoint is for user to search registered users, support for filtering results with username.Notice, by now this operation is only for administrator.
parameters:
- name: user_name
- name: username
in: query
type: string
format: string
required: false
description: Username for filtering results.
tags:
@ -431,7 +454,7 @@ paths:
- name: user_id
in: path
type: integer
format: int32
format: int
required: true
description: Registered user ID
- name: profile
@ -464,7 +487,7 @@ paths:
- name: user_id
in: path
type: integer
format: int32
format: int
required: true
description: User ID for marking as to be removed.
tags:
@ -491,7 +514,7 @@ paths:
- name: user_id
in: path
type: integer
format: int32
format: int
required: true
description: Registered user ID.
- name: password
@ -508,9 +531,9 @@ paths:
400:
description: Invalid user ID; Old password is blank; New password is blank.
401:
description: Old password is not correct.
description: Don't have authority to change password. Please check login status.
403:
description: Guests can only change their own account.
description: Old password is not correct.
500:
description: Unexpected internal errors.
/users/{user_id}/sysadmin:
@ -523,7 +546,7 @@ paths:
- name: user_id
in: path
type: integer
format: int32
format: int
required: true
description: Registered user ID
tags:
@ -556,7 +579,6 @@ paths:
- name: q
in: query
type: string
format: string
required: false
description: Repo name for filtering results.
tags:
@ -584,13 +606,11 @@ paths:
- name: repo_name
in: query
type: string
format: string
required: true
description: The name of repository which will be deleted.
- name: tag
in: query
type: string
format: string
required: false
description: Tag of a repository.
tags:
@ -645,6 +665,10 @@ paths:
responses:
200:
description: Retrieved manifests from a relevant repository successfully.
schema:
$ref: '#/definitions/Repository'
404:
description: Retrieved manifests from a relevant repository not found.
500:
description: Unexpected internal errors.
/repositories/top:
@ -711,6 +735,437 @@ paths:
description: User need to login first.
500:
description: Unexpected internal errors.
/jobs/replication:
get:
summary: List filters jobs according to the policy and repository
description: |
This endpoint let user list filters jobs according to the policy and repository. (if start_time and end_time are both null, list jobs of last 10 days)
tags:
- Products
parameters:
- name: policy_id
in: query
type: integer
format: int
required: false
description: The ID of the policy that triggered this job.
- name: num
in: query
type: integer
format: int32
required: false
description: The return list length number.
- name: end_time
in: query
type: integer
format: int64
required: false
description: The end time of jobs done. (Timestamp)
- name: start_time
in: query
type: integer
format: int64
required: false
description: The start time of jobs. (Timestamp)
- name: repository
in: query
type: string
required: false
description: The respond jobs list filter by repository name.
- name: status
in: query
type: string
required: false
description: The respond jobs list filter by status.
responses:
200:
description: Get the required logs successfully.
schema:
type: array
items:
$ref: '#/definitions/JobStatus'
400:
description: Bad request because of invalid parameters.
401:
description: User need to login first.
500:
description: Unexpected internal errors.
/jobs/replication/{id}:
delete:
summary: Delete specific ID job.
description: |
This endpoint is aimed to remove specific ID job from jobservice.
parameters:
- name: id
in: path
type: integer
format: int64
required: true
description: Delete job ID.
tags:
- Products
responses:
200:
description: Job deleted successfully.
400:
description: Job ID is invalid or can't remove this job.
401:
description: Only admin has this authority.
404:
description: Project ID does not exist.
500:
description: Unexpected internal errors.
/jobs/replication/{id}/log:
get:
summary: Get job logs.
description: |
This endpoint let user search job logs filtered by specific ID.
parameters:
- name: id
in: path
type: integer
format: int64
required: true
description: Relevant repository ID
tags:
- Products
responses:
200:
description: Get job log successfully.
400:
description: Illegal format of provided ID value.
401:
description: User need to log in first.
404:
description: The specific repository ID's log does not exist.
500:
description: Unexpected internal errors.
/policies/replication:
get:
summary: List filters policies by name and project_id
description: |
This endpoint let user list filters policies by name and project_id, if name and project_id are nil, list returns all policies
parameters:
- name: name
in: query
type: string
required: false
description: The replication's policy name.
- name: project_id
in: query
type: integer
format: int64
required: false
description: Relevant project ID.
tags:
- Products
responses:
200:
description: Get policy successfully.
schema:
type: array
items:
$ref: '#/definitions/JobStatus'
400:
description: Invalid project ID.
401:
description: User need to log in first.
500:
description: Unexpected internal errors.
post:
summary: Post creates a policy
description: |
This endpoint let user creates a policy, and if it is enabled, the replication will be triggered right now.
parameters:
- name: policyinfo
in: body
description: Create new policy.
required: true
schema:
$ref: '#/definitions/RepPolicyPost'
tags:
- Products
responses:
201:
description: Create policy successfully.
400:
description: Invalid project ID or target ID.
401:
description: User need to log in first.
409:
description: Policy name already used or policy already exists with the same project and target.
500:
description: Unexpected internal errors.
/policies/replication/{id}:
get:
summary: Get replication policy.
description: |
This endpoint let user search replication policy by specific ID.
parameters:
- name: id
in: path
type: integer
format: int64
required: true
description: policy ID
tags:
- Products
responses:
200:
description: Get job policy successfully.
schema:
$ref: '#/definitions/RepPolicy'
401:
description: User need to log in first.
404:
description: The specific repository ID's policy does not exist.
500:
description: Unexpected internal errors.
put:
summary: Put modifies name, description, target and enablement of policy.
description: |
This endpoint let user update policy name, description, target and enablement.
parameters:
- name: id
in: path
type: integer
format: int64
required: true
description: policy ID
- name: policyupdate
in: body
description: Update policy name, description, target and enablement.
required: true
schema:
$ref: '#/definitions/RepPolicyUpdate'
tags:
- Products
responses:
200:
description: Update job policy content successfully.
400:
description: policy is enabled or target does not exist
401:
description: User need to log in first.
404:
description: The specific repository ID's policy does not exist.
409:
description: Policy name already used or policy already exists with the same project and target.
500:
description: Unexpected internal errors.
/policies/replication/{id}/enablement:
put:
summary: Put modifies enablement of the policy.
description: |
This endpoint let user update policy enablement flag.
parameters:
- name: id
in: path
type: integer
format: int64
required: true
description: policy ID
- name: enabledflag
in: body
description: The policy enablement flag.
required: true
schema:
$ref: '#/definitions/RepPolicyEnablementReq'
tags:
- Products
responses:
200:
description: Update job policy enablement successfully.
400:
description: Invalid enabled value.
401:
description: User need to log in first.
404:
description: The specific repository ID's policy does not exist.
500:
description: Unexpected internal errors.
/targets:
get:
summary: List filters targets by name.
description: |
This endpoint let user list filters targets by name, if name is nil, list returns all targets.
parameters:
- name: name
in: query
type: string
required: false
description: The replication's target name.
tags:
- Products
responses:
200:
description: Get policy successfully.
schema:
type: array
items:
$ref: '#/definitions/RepTarget'
401:
description: User need to log in first.
500:
description: Unexpected internal errors.
post:
summary: Create a new replication target.
description: |
This endpoint is for user to create a new replication target.
parameters:
- name: reptarget
in: body
description: New created replication target.
required: true
schema:
$ref: '#/definitions/RepTargetPost'
tags:
- Products
responses:
201:
description: Replication target created successfully.
400:
description: Unsatisfied with constraints of the target creation.
401:
description: User need to log in first.
409:
description: Replication target name already exists.
500:
description: Unexpected internal errors.
/targets/ping:
post:
summary: Ping validates target.
description: |
This endpoint is for ping validates whether the target is reachable and whether the credential is valid.
parameters:
- name: id
in: query
type: integer
format: int64
required: false
description: The replication's policy ID.
tags:
- Products
responses:
200:
description: Ping target successfully.
400:
description: Target id is invalid/ endpoint is needed/ invaild URL/ network issue.
401:
description: User need to log in first or wrong username/password for remote target.
404:
description: Target not found.
500:
description: Unexpected internal errors.
/targets/{id}:
put:
summary: Update replication's target.
description: |
This endpoint is for update specific replication's target.
parameters:
- name: id
in: path
type: integer
format: int64
required: true
description: The replication's target ID.
- name: repo_target
in: body
required: true
schema:
$ref: '#/definitions/RepTargetPost'
description: Updates of replication's target.
tags:
- Products
responses:
200:
description: Updated replication's target successfully.
400:
description: The target is associated with policy which is enabled.
401:
description: User need to log in first.
404:
description: Target ID does not exist.
409:
description: Target name or endpoint is already used.
500:
description: Unexpected internal errors.
get:
summary: Get replication's target.
description: This endpoint is for get specific replication's target.
tags:
- Products
parameters:
- name: id
in: path
type: integer
format: int64
required: true
description: The replication's target ID.
responses:
200:
description: Get replication's target successfully.
schema:
type: array
items:
$ref: '#/definitions/RepTarget'
401:
description: User need to log in first.
404:
description: Replication's target not found
500:
description: Unexpected internal errors.
delete:
summary: Delete specific replication's target.
description: |
This endpoint is for to delete specific replication's target.
parameters:
- name: id
in: path
type: integer
format: int64
required: true
description: The replication's target ID.
tags:
- Products
responses:
200:
description: Replication's target deleted successfully.
400:
description: Replication's target ID is invalid or the target is used by policies.
401:
description: Only admin has this authority.
404:
description: Replication's target does not exist.
500:
description: Unexpected internal errors.
/targets/{id}/policies/:
get:
summary: List the target relevant policies.
description: |
This endpoint list policies filter with specific replication's target ID.
parameters:
- name: id
in: path
type: integer
format: int64
required: true
description: The replication's target ID.
tags:
- Products
responses:
200:
description: Get relevant policies successfully.
schema:
type: array
items:
$ref: '#/definitions/RepPolicy'
401:
description: User need to log in first.
404:
description: Replication's target not found
500:
description: Unexpected internal errors.
definitions:
Search:
type: object
@ -798,6 +1253,33 @@ definitions:
repo_count:
type: integer
description: The number of the repositories under this project.
Repository:
type: object
properties:
id:
type: string
description: Repository ID
parent:
type: string
description: Parent of the image.
created:
type: string
description: Repository create time.
duration_days:
type: string
description: Duration days of the image.
author:
type: string
description: Author of the image.
architecture:
type: string
description: Architecture of the image.
docker_version:
type: string
description: Docker version of the image.
os:
type: string
description: OS of the image.
User:
type: object
properties:
@ -924,4 +1406,171 @@ definitions:
type: integer
format: int32
description: The count of the total repositories, only be seen when the user is admin.
JobStatus:
type: object
properties:
id:
type: integer
format: int64
description: The job ID.
status:
type: string
description: The status of the job.
repository:
type: string
description: The repository handled by the job.
policy_id:
type: integer
format: int64
description: The ID of the policy that triggered this job.
operation:
type: string
description: The operation of the job.
tags:
type: array
description: The repository's used tag list.
items:
$ref: '#/definitions/Tags'
creation_time:
type: string
description: The creation time of the job.
update_time:
type: string
description: The update time of the job.
Tags:
type: object
properties:
tag:
type: string
description: The repository's used tag.
RepPolicy:
type: object
properties:
id:
type: integer
format: int64
description: The policy ID.
project_id:
type: integer
format: int64
description: The project ID.
project_name:
type: string
description: The project name.
target_id:
type: integer
format: int64
description: The target ID.
target_name:
type: string
description: The target name.
name:
type: string
description: The policy name.
enabled:
type: integer
format: int
description: The policy's enabled status.
description:
type: string
description: The description of the policy.
cron_str:
type: string
description: The cron string for schedule job.
start_time:
type: string
description: The start time of the policy.
creation_time:
type: string
description: The create time of the policy.
update_time:
type: string
description: The update time of the policy.
error_job_count:
format: int
description: The error job count number for the policy.
RepPolicyPost:
type: object
properties:
project_id:
type: integer
format: int64
description: The project ID.
target_id:
type: integer
format: int64
description: The target ID.
name:
type: string
description: The policy name.
RepPolicyUpdate:
type: object
properties:
target_id:
type: integer
format: int64
description: The target ID.
name:
type: string
description: The policy name.
enabled:
type: integer
format: int
description: The policy's enabled status.
description:
type: string
description: The description of the policy.
cron_str:
type: string
description: The cron string for schedule job.
RepPolicyEnablementReq:
type: object
properties:
enabled:
type: integer
format: int
description: The policy enablement flag.
RepTarget:
type: object
properties:
id:
type: integer
format: int64
description: The target ID.
endpoint:
type: string
description: The target address URL string.
name:
type: string
description: The target name.
username:
type: string
description: The target server username.
password:
type: string
description: The target server password.
type:
type: integer
format: int
description: Reserved field.
creation_time:
type: string
description: The create time of the policy.
update_time:
type: string
description: The update time of the policy.
RepTargetPost:
type: object
properties:
endpoint:
type: string
description: The target address URL string.
name:
type: string
description: The target name.
username:
type: string
description: The target server username.
password:
type: string
description: The target server password.

View File

@ -1 +0,0 @@
logrus

View File

@ -1,7 +0,0 @@
language: go
go:
- 1.3
- 1.4
- tip
install:
- go get -t ./...

View File

@ -1,9 +1,21 @@
# 0.9.0 (Unreleased)
# 0.10.0
* feature: Add a test hook (#180)
* feature: `ParseLevel` is now case-insensitive (#326)
* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
* performance: avoid re-allocations on `WithFields` (#335)
# 0.9.0
* logrus/text_formatter: don't emit empty msg
* logrus/hooks/airbrake: move out of main repository
* logrus/hooks/sentry: move out of main repository
* logrus/hooks/papertrail: move out of main repository
* logrus/hooks/bugsnag: move out of main repository
* logrus/core: run tests with `-race`
* logrus/core: detect TTY based on `stderr`
* logrus/core: support `WithError` on logger
* logrus/core: Solaris support
# 0.8.7

View File

@ -1,4 +1,4 @@
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)&nbsp;[![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc]
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)&nbsp;[![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
Logrus is a structured logger for Go (golang), completely API compatible with
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
@ -12,7 +12,7 @@ plain text):
![Colored](http://i.imgur.com/PY7qMwd.png)
With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
or Splunk:
```json
@ -32,7 +32,7 @@ ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
```
With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not
With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
attached, the output is compatible with the
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
@ -204,7 +204,7 @@ Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/v
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
@ -218,7 +218,17 @@ Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/v
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
#### Level logging
@ -296,15 +306,12 @@ The built-in logging formatters are:
field to `true`. To force no colored output even if there is a TTY set the
`DisableColors` field to `true`
* `logrus.JSONFormatter`. Logs fields as JSON.
* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events.
```go
logrus.SetFormatter(&logstash.LogstashFormatter{Type: “application_name"})
```
Third party logging formatters:
* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
You can define your formatter by implementing the `Formatter` interface,
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
@ -353,5 +360,43 @@ Log rotation is not provided with Logrus. Log rotation should be done by an
external program (like `logrotate(8)`) that can compress and delete old log
entries. It should not be a feature of the application-level logger.
#### Tools
[godoc]: https://godoc.org/github.com/Sirupsen/logrus
| Tool | Description |
| ---- | ----------- |
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
#### Testing
Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
```go
logger, hook := NewNullLogger()
logger.Error("Hello error")
assert.Equal(1, len(hook.Entries))
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
assert.Equal("Hello error", hook.LastEntry().Message)
hook.Reset()
assert.Nil(hook.LastEntry())
```
#### Fatal handlers
Logrus can register one or more functions that will be called when any `fatal`
level message is logged. The registered handlers will be executed before
logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
```
...
handler := func() {
// gracefully shutdown something...
}
logrus.RegisterExitHandler(handler)
...
```

64
vendor/github.com/Sirupsen/logrus/alt_exit.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
package logrus
// The following code was sourced and modified from the
// https://bitbucket.org/tebeka/atexit package governed by the following license:
//
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import (
"fmt"
"os"
)
var handlers = []func(){}
func runHandler(handler func()) {
defer func() {
if err := recover(); err != nil {
fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
}
}()
handler()
}
func runHandlers() {
for _, handler := range handlers {
runHandler(handler)
}
}
// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
func Exit(code int) {
runHandlers()
os.Exit(code)
}
// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
// all handlers. The handlers will also be invoked when any Fatal log entry is
// made.
//
// This method is useful when a caller wishes to use logrus to log a fatal
// message but also needs to gracefully shutdown. An example usecase could be
// closing database connections, or sending a alert that the application is
// closing.
func RegisterExitHandler(handler func()) {
handlers = append(handlers, handler)
}

View File

@ -68,7 +68,7 @@ func (entry *Entry) WithField(key string, value interface{}) *Entry {
// Add a map of fields to the Entry.
func (entry *Entry) WithFields(fields Fields) *Entry {
data := Fields{}
data := make(Fields, len(entry.Data)+len(fields))
for k, v := range entry.Data {
data[k] = v
}
@ -150,7 +150,7 @@ func (entry *Entry) Fatal(args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.log(FatalLevel, fmt.Sprint(args...))
}
os.Exit(1)
Exit(1)
}
func (entry *Entry) Panic(args ...interface{}) {
@ -198,7 +198,7 @@ func (entry *Entry) Fatalf(format string, args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.Fatal(fmt.Sprintf(format, args...))
}
os.Exit(1)
Exit(1)
}
func (entry *Entry) Panicf(format string, args ...interface{}) {
@ -245,7 +245,7 @@ func (entry *Entry) Fatalln(args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.Fatal(entry.sprintlnn(args...))
}
os.Exit(1)
Exit(1)
}
func (entry *Entry) Panicln(args ...interface{}) {

View File

@ -1,77 +0,0 @@
package logrus
import (
"bytes"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestEntryWithError(t *testing.T) {
assert := assert.New(t)
defer func() {
ErrorKey = "error"
}()
err := fmt.Errorf("kaboom at layer %d", 4711)
assert.Equal(err, WithError(err).Data["error"])
logger := New()
logger.Out = &bytes.Buffer{}
entry := NewEntry(logger)
assert.Equal(err, entry.WithError(err).Data["error"])
ErrorKey = "err"
assert.Equal(err, entry.WithError(err).Data["err"])
}
func TestEntryPanicln(t *testing.T) {
errBoom := fmt.Errorf("boom time")
defer func() {
p := recover()
assert.NotNil(t, p)
switch pVal := p.(type) {
case *Entry:
assert.Equal(t, "kaboom", pVal.Message)
assert.Equal(t, errBoom, pVal.Data["err"])
default:
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
}
}()
logger := New()
logger.Out = &bytes.Buffer{}
entry := NewEntry(logger)
entry.WithField("err", errBoom).Panicln("kaboom")
}
func TestEntryPanicf(t *testing.T) {
errBoom := fmt.Errorf("boom again")
defer func() {
p := recover()
assert.NotNil(t, p)
switch pVal := p.(type) {
case *Entry:
assert.Equal(t, "kaboom true", pVal.Message)
assert.Equal(t, errBoom, pVal.Data["err"])
default:
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
}
}()
logger := New()
logger.Out = &bytes.Buffer{}
entry := NewEntry(logger)
entry.WithField("err", errBoom).Panicf("kaboom %v", true)
}

View File

@ -1,50 +0,0 @@
package main
import (
"github.com/Sirupsen/logrus"
)
var log = logrus.New()
func init() {
log.Formatter = new(logrus.JSONFormatter)
log.Formatter = new(logrus.TextFormatter) // default
log.Level = logrus.DebugLevel
}
func main() {
defer func() {
err := recover()
if err != nil {
log.WithFields(logrus.Fields{
"omg": true,
"err": err,
"number": 100,
}).Fatal("The ice breaks!")
}
}()
log.WithFields(logrus.Fields{
"animal": "walrus",
"number": 8,
}).Debug("Started observing beach")
log.WithFields(logrus.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
log.WithFields(logrus.Fields{
"omg": true,
"number": 122,
}).Warn("The group's number increased tremendously!")
log.WithFields(logrus.Fields{
"temperature": -4,
}).Debug("Temperature changes")
log.WithFields(logrus.Fields{
"animal": "orca",
"size": 9009,
}).Panic("It's over 9000!")
}

View File

@ -1,30 +0,0 @@
package main
import (
"github.com/Sirupsen/logrus"
"gopkg.in/gemnasium/logrus-airbrake-hook.v2"
)
var log = logrus.New()
func init() {
log.Formatter = new(logrus.TextFormatter) // default
log.Hooks.Add(airbrake.NewHook(123, "xyz", "development"))
}
func main() {
log.WithFields(logrus.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
log.WithFields(logrus.Fields{
"omg": true,
"number": 122,
}).Warn("The group's number increased tremendously!")
log.WithFields(logrus.Fields{
"omg": true,
"number": 100,
}).Fatal("The ice breaks!")
}

View File

@ -31,18 +31,15 @@ type Formatter interface {
// It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters.
func prefixFieldClashes(data Fields) {
_, ok := data["time"]
if ok {
data["fields.time"] = data["time"]
if t, ok := data["time"]; ok {
data["fields.time"] = t
}
_, ok = data["msg"]
if ok {
data["fields.msg"] = data["msg"]
if m, ok := data["msg"]; ok {
data["fields.msg"] = m
}
_, ok = data["level"]
if ok {
data["fields.level"] = data["level"]
if l, ok := data["level"]; ok {
data["fields.level"] = l
}
}

View File

@ -1,98 +0,0 @@
package logrus
import (
"fmt"
"testing"
"time"
)
// smallFields is a small size data set for benchmarking
var smallFields = Fields{
"foo": "bar",
"baz": "qux",
"one": "two",
"three": "four",
}
// largeFields is a large size data set for benchmarking
var largeFields = Fields{
"foo": "bar",
"baz": "qux",
"one": "two",
"three": "four",
"five": "six",
"seven": "eight",
"nine": "ten",
"eleven": "twelve",
"thirteen": "fourteen",
"fifteen": "sixteen",
"seventeen": "eighteen",
"nineteen": "twenty",
"a": "b",
"c": "d",
"e": "f",
"g": "h",
"i": "j",
"k": "l",
"m": "n",
"o": "p",
"q": "r",
"s": "t",
"u": "v",
"w": "x",
"y": "z",
"this": "will",
"make": "thirty",
"entries": "yeah",
}
var errorFields = Fields{
"foo": fmt.Errorf("bar"),
"baz": fmt.Errorf("qux"),
}
func BenchmarkErrorTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields)
}
func BenchmarkSmallTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
}
func BenchmarkLargeTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields)
}
func BenchmarkSmallColoredTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields)
}
func BenchmarkLargeColoredTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields)
}
func BenchmarkSmallJSONFormatter(b *testing.B) {
doBenchmark(b, &JSONFormatter{}, smallFields)
}
func BenchmarkLargeJSONFormatter(b *testing.B) {
doBenchmark(b, &JSONFormatter{}, largeFields)
}
func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
entry := &Entry{
Time: time.Time{},
Level: InfoLevel,
Message: "message",
Data: fields,
}
var d []byte
var err error
for i := 0; i < b.N; i++ {
d, err = formatter.Format(entry)
if err != nil {
b.Fatal(err)
}
b.SetBytes(int64(len(d)))
}
}

View File

@ -1,56 +0,0 @@
package logstash
import (
"encoding/json"
"fmt"
"github.com/Sirupsen/logrus"
)
// Formatter generates json in logstash format.
// Logstash site: http://logstash.net/
type LogstashFormatter struct {
Type string // if not empty use for logstash type field.
// TimestampFormat sets the format used for timestamps.
TimestampFormat string
}
func (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) {
entry.Data["@version"] = 1
if f.TimestampFormat == "" {
f.TimestampFormat = logrus.DefaultTimestampFormat
}
entry.Data["@timestamp"] = entry.Time.Format(f.TimestampFormat)
// set message field
v, ok := entry.Data["message"]
if ok {
entry.Data["fields.message"] = v
}
entry.Data["message"] = entry.Message
// set level field
v, ok = entry.Data["level"]
if ok {
entry.Data["fields.level"] = v
}
entry.Data["level"] = entry.Level.String()
// set type field
if f.Type != "" {
v, ok = entry.Data["type"]
if ok {
entry.Data["fields.type"] = v
}
entry.Data["type"] = f.Type
}
serialized, err := json.Marshal(entry.Data)
if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
}

View File

@ -1,52 +0,0 @@
package logstash
import (
"bytes"
"encoding/json"
"github.com/Sirupsen/logrus"
"github.com/stretchr/testify/assert"
"testing"
)
func TestLogstashFormatter(t *testing.T) {
assert := assert.New(t)
lf := LogstashFormatter{Type: "abc"}
fields := logrus.Fields{
"message": "def",
"level": "ijk",
"type": "lmn",
"one": 1,
"pi": 3.14,
"bool": true,
}
entry := logrus.WithFields(fields)
entry.Message = "msg"
entry.Level = logrus.InfoLevel
b, _ := lf.Format(entry)
var data map[string]interface{}
dec := json.NewDecoder(bytes.NewReader(b))
dec.UseNumber()
dec.Decode(&data)
// base fields
assert.Equal(json.Number("1"), data["@version"])
assert.NotEmpty(data["@timestamp"])
assert.Equal("abc", data["type"])
assert.Equal("msg", data["message"])
assert.Equal("info", data["level"])
// substituted fields
assert.Equal("def", data["fields.message"])
assert.Equal("ijk", data["fields.level"])
assert.Equal("lmn", data["fields.type"])
// formats
assert.Equal(json.Number("1"), data["one"])
assert.Equal(json.Number("3.14"), data["pi"])
assert.Equal(true, data["bool"])
}

View File

@ -1,122 +0,0 @@
package logrus
import (
"testing"
"github.com/stretchr/testify/assert"
)
type TestHook struct {
Fired bool
}
func (hook *TestHook) Fire(entry *Entry) error {
hook.Fired = true
return nil
}
func (hook *TestHook) Levels() []Level {
return []Level{
DebugLevel,
InfoLevel,
WarnLevel,
ErrorLevel,
FatalLevel,
PanicLevel,
}
}
func TestHookFires(t *testing.T) {
hook := new(TestHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook)
assert.Equal(t, hook.Fired, false)
log.Print("test")
}, func(fields Fields) {
assert.Equal(t, hook.Fired, true)
})
}
type ModifyHook struct {
}
func (hook *ModifyHook) Fire(entry *Entry) error {
entry.Data["wow"] = "whale"
return nil
}
func (hook *ModifyHook) Levels() []Level {
return []Level{
DebugLevel,
InfoLevel,
WarnLevel,
ErrorLevel,
FatalLevel,
PanicLevel,
}
}
func TestHookCanModifyEntry(t *testing.T) {
hook := new(ModifyHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook)
log.WithField("wow", "elephant").Print("test")
}, func(fields Fields) {
assert.Equal(t, fields["wow"], "whale")
})
}
func TestCanFireMultipleHooks(t *testing.T) {
hook1 := new(ModifyHook)
hook2 := new(TestHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook1)
log.Hooks.Add(hook2)
log.WithField("wow", "elephant").Print("test")
}, func(fields Fields) {
assert.Equal(t, fields["wow"], "whale")
assert.Equal(t, hook2.Fired, true)
})
}
type ErrorHook struct {
Fired bool
}
func (hook *ErrorHook) Fire(entry *Entry) error {
hook.Fired = true
return nil
}
func (hook *ErrorHook) Levels() []Level {
return []Level{
ErrorLevel,
}
}
func TestErrorHookShouldntFireOnInfo(t *testing.T) {
hook := new(ErrorHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook)
log.Info("test")
}, func(fields Fields) {
assert.Equal(t, hook.Fired, false)
})
}
func TestErrorHookShouldFireOnError(t *testing.T) {
hook := new(ErrorHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook)
log.Error("test")
}, func(fields Fields) {
assert.Equal(t, hook.Fired, true)
})
}

View File

@ -1,68 +0,0 @@
package logrus_bugsnag
import (
"errors"
"github.com/Sirupsen/logrus"
"github.com/bugsnag/bugsnag-go"
)
type bugsnagHook struct{}
// ErrBugsnagUnconfigured is returned if NewBugsnagHook is called before
// bugsnag.Configure. Bugsnag must be configured before the hook.
var ErrBugsnagUnconfigured = errors.New("bugsnag must be configured before installing this logrus hook")
// ErrBugsnagSendFailed indicates that the hook failed to submit an error to
// bugsnag. The error was successfully generated, but `bugsnag.Notify()`
// failed.
type ErrBugsnagSendFailed struct {
err error
}
func (e ErrBugsnagSendFailed) Error() string {
return "failed to send error to Bugsnag: " + e.err.Error()
}
// NewBugsnagHook initializes a logrus hook which sends exceptions to an
// exception-tracking service compatible with the Bugsnag API. Before using
// this hook, you must call bugsnag.Configure(). The returned object should be
// registered with a log via `AddHook()`
//
// Entries that trigger an Error, Fatal or Panic should now include an "error"
// field to send to Bugsnag.
func NewBugsnagHook() (*bugsnagHook, error) {
if bugsnag.Config.APIKey == "" {
return nil, ErrBugsnagUnconfigured
}
return &bugsnagHook{}, nil
}
// Fire forwards an error to Bugsnag. Given a logrus.Entry, it extracts the
// "error" field (or the Message if the error isn't present) and sends it off.
func (hook *bugsnagHook) Fire(entry *logrus.Entry) error {
var notifyErr error
err, ok := entry.Data["error"].(error)
if ok {
notifyErr = err
} else {
notifyErr = errors.New(entry.Message)
}
bugsnagErr := bugsnag.Notify(notifyErr)
if bugsnagErr != nil {
return ErrBugsnagSendFailed{bugsnagErr}
}
return nil
}
// Levels enumerates the log levels on which the error should be forwarded to
// bugsnag: everything at or above the "Error" level.
func (hook *bugsnagHook) Levels() []logrus.Level {
return []logrus.Level{
logrus.ErrorLevel,
logrus.FatalLevel,
logrus.PanicLevel,
}
}

View File

@ -1,64 +0,0 @@
package logrus_bugsnag
import (
"encoding/json"
"errors"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/Sirupsen/logrus"
"github.com/bugsnag/bugsnag-go"
)
type notice struct {
Events []struct {
Exceptions []struct {
Message string `json:"message"`
} `json:"exceptions"`
} `json:"events"`
}
func TestNoticeReceived(t *testing.T) {
msg := make(chan string, 1)
expectedMsg := "foo"
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var notice notice
data, _ := ioutil.ReadAll(r.Body)
if err := json.Unmarshal(data, &notice); err != nil {
t.Error(err)
}
_ = r.Body.Close()
msg <- notice.Events[0].Exceptions[0].Message
}))
defer ts.Close()
hook := &bugsnagHook{}
bugsnag.Configure(bugsnag.Configuration{
Endpoint: ts.URL,
ReleaseStage: "production",
APIKey: "12345678901234567890123456789012",
Synchronous: true,
})
log := logrus.New()
log.Hooks.Add(hook)
log.WithFields(logrus.Fields{
"error": errors.New(expectedMsg),
}).Error("Bugsnag will not see this string")
select {
case received := <-msg:
if received != expectedMsg {
t.Errorf("Unexpected message received: %s", received)
}
case <-time.After(time.Second):
t.Error("Timed out; no notice received by Bugsnag API")
}
}

View File

@ -1,39 +0,0 @@
# Syslog Hooks for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>
## Usage
```go
import (
"log/syslog"
"github.com/Sirupsen/logrus"
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
)
func main() {
log := logrus.New()
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err == nil {
log.Hooks.Add(hook)
}
}
```
If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). Just assign empty string to the first two parameters of `NewSyslogHook`. It should look like the following.
```go
import (
"log/syslog"
"github.com/Sirupsen/logrus"
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
)
func main() {
log := logrus.New()
hook, err := logrus_syslog.NewSyslogHook("", "", syslog.LOG_INFO, "")
if err == nil {
log.Hooks.Add(hook)
}
}
```

View File

@ -1,59 +0,0 @@
package logrus_syslog
import (
"fmt"
"github.com/Sirupsen/logrus"
"log/syslog"
"os"
)
// SyslogHook to send logs via syslog.
type SyslogHook struct {
Writer *syslog.Writer
SyslogNetwork string
SyslogRaddr string
}
// Creates a hook to be added to an instance of logger. This is called with
// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
// `if err == nil { log.Hooks.Add(hook) }`
func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) {
w, err := syslog.Dial(network, raddr, priority, tag)
return &SyslogHook{w, network, raddr}, err
}
func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
line, err := entry.String()
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
return err
}
switch entry.Level {
case logrus.PanicLevel:
return hook.Writer.Crit(line)
case logrus.FatalLevel:
return hook.Writer.Crit(line)
case logrus.ErrorLevel:
return hook.Writer.Err(line)
case logrus.WarnLevel:
return hook.Writer.Warning(line)
case logrus.InfoLevel:
return hook.Writer.Info(line)
case logrus.DebugLevel:
return hook.Writer.Debug(line)
default:
return nil
}
}
func (hook *SyslogHook) Levels() []logrus.Level {
return []logrus.Level{
logrus.PanicLevel,
logrus.FatalLevel,
logrus.ErrorLevel,
logrus.WarnLevel,
logrus.InfoLevel,
logrus.DebugLevel,
}
}

View File

@ -1,26 +0,0 @@
package logrus_syslog
import (
"github.com/Sirupsen/logrus"
"log/syslog"
"testing"
)
func TestLocalhostAddAndPrint(t *testing.T) {
log := logrus.New()
hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err != nil {
t.Errorf("Unable to connect to local syslog.")
}
log.Hooks.Add(hook)
for _, level := range hook.Levels() {
if len(log.Hooks[level]) != 1 {
t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level]))
}
}
log.Info("Congratulations!")
}

View File

@ -1,120 +0,0 @@
package logrus
import (
"encoding/json"
"errors"
"testing"
)
func TestErrorNotLost(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("error", errors.New("wild walrus")))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["error"] != "wild walrus" {
t.Fatal("Error field not set")
}
}
func TestErrorNotLostOnFieldNotNamedError(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("omg", errors.New("wild walrus")))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["omg"] != "wild walrus" {
t.Fatal("Error field not set")
}
}
func TestFieldClashWithTime(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("time", "right now!"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["fields.time"] != "right now!" {
t.Fatal("fields.time not set to original time field")
}
if entry["time"] != "0001-01-01T00:00:00Z" {
t.Fatal("time field not set to current time, was: ", entry["time"])
}
}
func TestFieldClashWithMsg(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("msg", "something"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["fields.msg"] != "something" {
t.Fatal("fields.msg not set to original msg field")
}
}
func TestFieldClashWithLevel(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("level", "something"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["fields.level"] != "something" {
t.Fatal("fields.level not set to original level field")
}
}
func TestJSONEntryEndsWithNewline(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("level", "something"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
if b[len(b)-1] != '\n' {
t.Fatal("Expected JSON log entry to end with a newline")
}
}

View File

@ -51,7 +51,7 @@ func New() *Logger {
}
}
// Adds a field to the log entry, note that you it doesn't log until you call
// Adds a field to the log entry, note that it doesn't log until you call
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
// If you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry {
@ -64,6 +64,12 @@ func (logger *Logger) WithFields(fields Fields) *Entry {
return NewEntry(logger).WithFields(fields)
}
// Add an error as single field to the log entry. All it does is call
// `WithError` for the given `error`.
func (logger *Logger) WithError(err error) *Entry {
return NewEntry(logger).WithError(err)
}
func (logger *Logger) Debugf(format string, args ...interface{}) {
if logger.Level >= DebugLevel {
NewEntry(logger).Debugf(format, args...)
@ -102,7 +108,7 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatalf(format, args...)
}
os.Exit(1)
Exit(1)
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
@ -149,7 +155,7 @@ func (logger *Logger) Fatal(args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatal(args...)
}
os.Exit(1)
Exit(1)
}
func (logger *Logger) Panic(args ...interface{}) {
@ -196,7 +202,7 @@ func (logger *Logger) Fatalln(args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatalln(args...)
}
os.Exit(1)
Exit(1)
}
func (logger *Logger) Panicln(args ...interface{}) {

View File

@ -3,6 +3,7 @@ package logrus
import (
"fmt"
"log"
"strings"
)
// Fields type, used to pass to `WithFields`.
@ -33,7 +34,7 @@ func (level Level) String() string {
// ParseLevel takes a string level and returns the Logrus log level constant.
func ParseLevel(lvl string) (Level, error) {
switch lvl {
switch strings.ToLower(lvl) {
case "panic":
return PanicLevel, nil
case "fatal":
@ -52,6 +53,16 @@ func ParseLevel(lvl string) (Level, error) {
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
}
// A constant exposing all logging levels
var AllLevels = []Level{
PanicLevel,
FatalLevel,
ErrorLevel,
WarnLevel,
InfoLevel,
DebugLevel,
}
// These are the different logging levels. You can set the logging level to log
// on your instance of logger, obtained with `logrus.New()`.
const (
@ -96,3 +107,37 @@ type StdLogger interface {
Panicf(string, ...interface{})
Panicln(...interface{})
}
// The FieldLogger interface generalizes the Entry and Logger types
type FieldLogger interface {
WithField(key string, value interface{}) *Entry
WithFields(fields Fields) *Entry
WithError(err error) *Entry
Debugf(format string, args ...interface{})
Infof(format string, args ...interface{})
Printf(format string, args ...interface{})
Warnf(format string, args ...interface{})
Warningf(format string, args ...interface{})
Errorf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
Panicf(format string, args ...interface{})
Debug(args ...interface{})
Info(args ...interface{})
Print(args ...interface{})
Warn(args ...interface{})
Warning(args ...interface{})
Error(args ...interface{})
Fatal(args ...interface{})
Panic(args ...interface{})
Debugln(args ...interface{})
Infoln(args ...interface{})
Println(args ...interface{})
Warnln(args ...interface{})
Warningln(args ...interface{})
Errorln(args ...interface{})
Fatalln(args ...interface{})
Panicln(args ...interface{})
}

View File

@ -1,301 +0,0 @@
package logrus
import (
"bytes"
"encoding/json"
"strconv"
"strings"
"sync"
"testing"
"github.com/stretchr/testify/assert"
)
func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) {
var buffer bytes.Buffer
var fields Fields
logger := New()
logger.Out = &buffer
logger.Formatter = new(JSONFormatter)
log(logger)
err := json.Unmarshal(buffer.Bytes(), &fields)
assert.Nil(t, err)
assertions(fields)
}
func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) {
var buffer bytes.Buffer
logger := New()
logger.Out = &buffer
logger.Formatter = &TextFormatter{
DisableColors: true,
}
log(logger)
fields := make(map[string]string)
for _, kv := range strings.Split(buffer.String(), " ") {
if !strings.Contains(kv, "=") {
continue
}
kvArr := strings.Split(kv, "=")
key := strings.TrimSpace(kvArr[0])
val := kvArr[1]
if kvArr[1][0] == '"' {
var err error
val, err = strconv.Unquote(val)
assert.NoError(t, err)
}
fields[key] = val
}
assertions(fields)
}
func TestPrint(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Print("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
assert.Equal(t, fields["level"], "info")
})
}
func TestInfo(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
assert.Equal(t, fields["level"], "info")
})
}
func TestWarn(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Warn("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
assert.Equal(t, fields["level"], "warning")
})
}
func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Infoln("test", "test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test test")
})
}
func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Infoln("test", 10)
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test 10")
})
}
func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Infoln(10, 10)
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "10 10")
})
}
func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Infoln(10, 10)
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "10 10")
})
}
func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Info("test", 10)
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test10")
})
}
func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Info("test", "test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "testtest")
})
}
func TestWithFieldsShouldAllowAssignments(t *testing.T) {
var buffer bytes.Buffer
var fields Fields
logger := New()
logger.Out = &buffer
logger.Formatter = new(JSONFormatter)
localLog := logger.WithFields(Fields{
"key1": "value1",
})
localLog.WithField("key2", "value2").Info("test")
err := json.Unmarshal(buffer.Bytes(), &fields)
assert.Nil(t, err)
assert.Equal(t, "value2", fields["key2"])
assert.Equal(t, "value1", fields["key1"])
buffer = bytes.Buffer{}
fields = Fields{}
localLog.Info("test")
err = json.Unmarshal(buffer.Bytes(), &fields)
assert.Nil(t, err)
_, ok := fields["key2"]
assert.Equal(t, false, ok)
assert.Equal(t, "value1", fields["key1"])
}
func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("msg", "hello").Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
})
}
func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("msg", "hello").Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
assert.Equal(t, fields["fields.msg"], "hello")
})
}
func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("time", "hello").Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["fields.time"], "hello")
})
}
func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("level", 1).Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["level"], "info")
assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only
})
}
func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
LogAndAssertText(t, func(log *Logger) {
ll := log.WithField("herp", "derp")
ll.Info("hello")
ll.Info("bye")
}, func(fields map[string]string) {
for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} {
if _, ok := fields[fieldName]; ok {
t.Fatalf("should not have prefixed %q: %v", fieldName, fields)
}
}
})
}
func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) {
var buffer bytes.Buffer
var fields Fields
logger := New()
logger.Out = &buffer
logger.Formatter = new(JSONFormatter)
llog := logger.WithField("context", "eating raw fish")
llog.Info("looks delicious")
err := json.Unmarshal(buffer.Bytes(), &fields)
assert.NoError(t, err, "should have decoded first message")
assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
assert.Equal(t, fields["msg"], "looks delicious")
assert.Equal(t, fields["context"], "eating raw fish")
buffer.Reset()
llog.Warn("omg it is!")
err = json.Unmarshal(buffer.Bytes(), &fields)
assert.NoError(t, err, "should have decoded second message")
assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
assert.Equal(t, fields["msg"], "omg it is!")
assert.Equal(t, fields["context"], "eating raw fish")
assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry")
}
func TestConvertLevelToString(t *testing.T) {
assert.Equal(t, "debug", DebugLevel.String())
assert.Equal(t, "info", InfoLevel.String())
assert.Equal(t, "warning", WarnLevel.String())
assert.Equal(t, "error", ErrorLevel.String())
assert.Equal(t, "fatal", FatalLevel.String())
assert.Equal(t, "panic", PanicLevel.String())
}
func TestParseLevel(t *testing.T) {
l, err := ParseLevel("panic")
assert.Nil(t, err)
assert.Equal(t, PanicLevel, l)
l, err = ParseLevel("fatal")
assert.Nil(t, err)
assert.Equal(t, FatalLevel, l)
l, err = ParseLevel("error")
assert.Nil(t, err)
assert.Equal(t, ErrorLevel, l)
l, err = ParseLevel("warn")
assert.Nil(t, err)
assert.Equal(t, WarnLevel, l)
l, err = ParseLevel("warning")
assert.Nil(t, err)
assert.Equal(t, WarnLevel, l)
l, err = ParseLevel("info")
assert.Nil(t, err)
assert.Equal(t, InfoLevel, l)
l, err = ParseLevel("debug")
assert.Nil(t, err)
assert.Equal(t, DebugLevel, l)
l, err = ParseLevel("invalid")
assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error())
}
func TestGetSetLevelRace(t *testing.T) {
wg := sync.WaitGroup{}
for i := 0; i < 100; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
if i%2 == 0 {
SetLevel(InfoLevel)
} else {
GetLevel()
}
}(i)
}
wg.Wait()
}

View File

@ -12,9 +12,9 @@ import (
"unsafe"
)
// IsTerminal returns true if the given file descriptor is a terminal.
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stdout
fd := syscall.Stderr
var termios Termios
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0

15
vendor/github.com/Sirupsen/logrus/terminal_solaris.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
// +build solaris
package logrus
import (
"os"
"golang.org/x/sys/unix"
)
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal() bool {
_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
return err == nil
}

View File

@ -18,9 +18,9 @@ var (
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
)
// IsTerminal returns true if the given file descriptor is a terminal.
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stdout
fd := syscall.Stderr
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0

View File

@ -128,10 +128,10 @@ func needsQuoting(text string) bool {
(ch >= 'A' && ch <= 'Z') ||
(ch >= '0' && ch <= '9') ||
ch == '-' || ch == '.') {
return false
return true
}
}
return true
return false
}
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
@ -141,14 +141,14 @@ func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interf
switch value := value.(type) {
case string:
if needsQuoting(value) {
if !needsQuoting(value) {
b.WriteString(value)
} else {
fmt.Fprintf(b, "%q", value)
}
case error:
errmsg := value.Error()
if needsQuoting(errmsg) {
if !needsQuoting(errmsg) {
b.WriteString(errmsg)
} else {
fmt.Fprintf(b, "%q", value)

View File

@ -1,61 +0,0 @@
package logrus
import (
"bytes"
"errors"
"testing"
"time"
)
func TestQuoting(t *testing.T) {
tf := &TextFormatter{DisableColors: true}
checkQuoting := func(q bool, value interface{}) {
b, _ := tf.Format(WithField("test", value))
idx := bytes.Index(b, ([]byte)("test="))
cont := bytes.Contains(b[idx+5:], []byte{'"'})
if cont != q {
if q {
t.Errorf("quoting expected for: %#v", value)
} else {
t.Errorf("quoting not expected for: %#v", value)
}
}
}
checkQuoting(false, "abcd")
checkQuoting(false, "v1.0")
checkQuoting(false, "1234567890")
checkQuoting(true, "/foobar")
checkQuoting(true, "x y")
checkQuoting(true, "x,y")
checkQuoting(false, errors.New("invalid"))
checkQuoting(true, errors.New("invalid argument"))
}
func TestTimestampFormat(t *testing.T) {
checkTimeStr := func(format string) {
customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format}
customStr, _ := customFormatter.Format(WithField("test", "test"))
timeStart := bytes.Index(customStr, ([]byte)("time="))
timeEnd := bytes.Index(customStr, ([]byte)("level="))
timeStr := customStr[timeStart+5 : timeEnd-1]
if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' {
timeStr = timeStr[1 : len(timeStr)-1]
}
if format == "" {
format = time.RFC3339
}
_, e := time.Parse(format, (string)(timeStr))
if e != nil {
t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e)
}
}
checkTimeStr("2006-01-02T15:04:05.000000000Z07:00")
checkTimeStr("Mon Jan _2 15:04:05 2006")
checkTimeStr("")
}
// TODO add tests for sorting etc., this requires a parser for the text
// formatter output.

View File

@ -7,18 +7,40 @@ import (
)
func (logger *Logger) Writer() *io.PipeWriter {
return logger.WriterLevel(InfoLevel)
}
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
reader, writer := io.Pipe()
go logger.writerScanner(reader)
var printFunc func(args ...interface{})
switch level {
case DebugLevel:
printFunc = logger.Debug
case InfoLevel:
printFunc = logger.Info
case WarnLevel:
printFunc = logger.Warn
case ErrorLevel:
printFunc = logger.Error
case FatalLevel:
printFunc = logger.Fatal
case PanicLevel:
printFunc = logger.Panic
default:
printFunc = logger.Print
}
go logger.writerScanner(reader, printFunc)
runtime.SetFinalizer(writer, writerFinalizer)
return writer
}
func (logger *Logger) writerScanner(reader *io.PipeReader) {
func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
logger.Print(scanner.Text())
printFunc(scanner.Text())
}
if err := scanner.Err(); err != nil {
logger.Errorf("Error while reading from Writer: %s", err)

View File

@ -1,3 +0,0 @@
.DS_Store
*.iml
.idea

View File

@ -51,6 +51,11 @@ Please see [conf.ini](testdata/conf.ini) as an example.
- Finally, `SaveConfigFile` saves your configuration to local file system.
- Use method `Reload` in case someone else modified your file(s).
- Methods contains `Comment` help you manipulate comments.
- `LoadFromReader` allows loading data without an intermediate file.
- `SaveConfigData` added, which writes configuration to an arbitrary writer.
- `ReloadData` allows to reload data from memory.
Note that you cannot mix in-memory configuration with on-disk configuration.
## More Information

View File

@ -1,363 +0,0 @@
// Copyright 2013 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package goconfig
import (
"fmt"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func TestLoadConfigFile(t *testing.T) {
Convey("Load a single configuration file that does exist", t, func() {
c, err := LoadConfigFile("testdata/conf.ini")
So(err, ShouldBeNil)
So(c, ShouldNotBeNil)
Convey("Test GetSectionList", func() {
So(c.GetSectionList(), ShouldResemble,
[]string{"DEFAULT", "Demo", "What's this?", "url", "parent",
"parent.child", "parent.child.child", "auto increment"})
})
Convey("Get value that does exist", func() {
v, err := c.GetValue("Demo", "key2")
So(err, ShouldBeNil)
So(v, ShouldEqual, "test data")
})
Convey("Get value that does not exist", func() {
_, err := c.GetValue("Demo", "key4")
So(err, ShouldNotBeNil)
})
Convey("Get value that has empty value", func() {
_, err := c.GetValue("What's this?", "empty_value")
So(err, ShouldBeNil)
})
Convey("Get value that section does not exist", func() {
_, err := c.GetValue("Demo404", "key4")
So(err, ShouldNotBeNil)
})
Convey("Get value use parent-child feature", func() {
v, err := c.GetValue("parent.child", "sex")
So(err, ShouldBeNil)
So(v, ShouldEqual, "male")
})
Convey("Get value use recursive feature", func() {
v, err := c.GetValue("", "search")
So(err, ShouldBeNil)
So(v, ShouldEqual, "http://www.google.com")
v, err = c.GetValue("url", "google_url")
So(err, ShouldBeNil)
So(v, ShouldEqual, "http://www.google.fake")
})
Convey("Set value that does exist", func() {
So(c.SetValue("Demo", "key2", "hello man!"), ShouldBeFalse)
v, err := c.GetValue("Demo", "key2")
So(err, ShouldBeNil)
So(v, ShouldEqual, "hello man!")
})
Convey("Set value that does not exist", func() {
So(c.SetValue("Demo", "key4", "hello girl!"), ShouldBeTrue)
v, err := c.GetValue("Demo", "key4")
So(err, ShouldBeNil)
So(v, ShouldEqual, "hello girl!")
So(c.SetValue("", "gowalker", "https://gowalker.org"), ShouldBeTrue)
})
Convey("Test GetKeyList", func() {
So(c.GetKeyList("Demo"), ShouldResemble,
[]string{"key1", "key2", "key3", "quote", "key:1",
"key:2=key:1", "中国", "chinese-var", "array_key"})
})
Convey("Delete a key", func() {
So(c.DeleteKey("", "key404"), ShouldBeFalse)
So(c.DeleteKey("Demo", "key404"), ShouldBeFalse)
So(c.DeleteKey("Demo", "中国"), ShouldBeTrue)
_, err := c.GetValue("Demo", "中国")
So(err, ShouldNotBeNil)
So(c.DeleteKey("404", "key"), ShouldBeFalse)
})
Convey("Delete all the keys", func() {
for _, key := range c.GetKeyList("Demo") {
So(c.DeleteKey("Demo", key), ShouldBeTrue)
}
So(c.GetKeyList("Demo"), ShouldResemble, []string{})
So(len(c.GetKeyList("Demo")), ShouldEqual, 0)
})
Convey("Delete section that does not exist", func() {
So(c.DeleteSection(""), ShouldBeTrue)
So(c.DeleteSection("404"), ShouldBeFalse)
})
Convey("Get section that exists", func() {
_, err = c.GetSection("")
So(err, ShouldBeNil)
})
Convey("Get section that does not exist", func() {
_, err = c.GetSection("404")
So(err, ShouldNotBeNil)
})
Convey("Set section comments", func() {
So(c.SetSectionComments("", "default section comments"), ShouldBeTrue)
})
Convey("Get section comments", func() {
So(c.GetSectionComments(""), ShouldEqual, "")
})
Convey("Set key comments", func() {
So(c.SetKeyComments("", "search", "search comments"), ShouldBeTrue)
So(c.SetKeyComments("404", "search", ""), ShouldBeTrue)
})
Convey("Get key comments", func() {
So(c.GetKeyComments("", "google"), ShouldEqual, "; Google")
})
Convey("Delete all the sections", func() {
for _, sec := range c.GetSectionList() {
So(c.DeleteSection(sec), ShouldBeTrue)
}
So(c.GetSectionList(), ShouldResemble, []string{})
So(len(c.GetSectionList()), ShouldEqual, 0)
})
})
Convey("Load a single configuration file that does not exist", t, func() {
_, err := LoadConfigFile("testdata/conf404.ini")
So(err, ShouldNotBeNil)
})
Convey("Load multiple configuration files", t, func() {
c, err := LoadConfigFile("testdata/conf.ini", "testdata/conf2.ini")
So(err, ShouldBeNil)
So(c, ShouldNotBeNil)
Convey("Get value that does not exist in 1st file", func() {
v, err := c.GetValue("new section", "key1")
So(err, ShouldBeNil)
So(v, ShouldEqual, "conf.ini does not have this key")
})
Convey("Get value that overwrited in 2nd file", func() {
v, err := c.GetValue("Demo", "key2")
So(err, ShouldBeNil)
So(v, ShouldEqual, "rewrite this key of conf.ini")
})
})
}
func TestGetKeyList(t *testing.T) {
Convey("Get key list", t, func() {
c, err := LoadConfigFile("testdata/conf.ini")
So(err, ShouldBeNil)
So(c, ShouldNotBeNil)
Convey("Get ket list that does exist", func() {
So(c.GetKeyList("Demo"), ShouldResemble,
[]string{"key1", "key2", "key3", "quote", "key:1",
"key:2=key:1", "中国", "chinese-var", "array_key"})
So(c.GetKeyList(""), ShouldResemble, []string{"google", "search"})
})
Convey("Get key list that not exist", func() {
So(c.GetKeyList("404"), ShouldBeNil)
})
})
}
func TestSaveConfigFile(t *testing.T) {
Convey("Save a ConfigFile to file system", t, func() {
c, err := LoadConfigFile("testdata/conf.ini", "testdata/conf2.ini")
So(err, ShouldBeNil)
So(c, ShouldNotBeNil)
c.SetValue("", "", "empty")
So(SaveConfigFile(c, "testdata/conf_test.ini"), ShouldBeNil)
})
}
func TestReload(t *testing.T) {
Convey("Reload a configuration file", t, func() {
c, err := LoadConfigFile("testdata/conf.ini", "testdata/conf2.ini")
So(err, ShouldBeNil)
So(c, ShouldNotBeNil)
So(c.Reload(), ShouldBeNil)
})
}
func TestAppendFiles(t *testing.T) {
Convey("Reload a configuration file", t, func() {
c, err := LoadConfigFile("testdata/conf.ini")
So(err, ShouldBeNil)
So(c, ShouldNotBeNil)
So(c.AppendFiles("testdata/conf2.ini"), ShouldBeNil)
})
}
func TestTypes(t *testing.T) {
Convey("Return with types", t, func() {
c, err := LoadConfigFile("testdata/conf.ini")
So(err, ShouldBeNil)
So(c, ShouldNotBeNil)
Convey("Return bool", func() {
v, err := c.Bool("parent.child", "married")
So(err, ShouldBeNil)
So(v, ShouldBeTrue)
_, err = c.Bool("parent.child", "died")
So(err, ShouldNotBeNil)
})
Convey("Return float64", func() {
v, err := c.Float64("parent", "money")
So(err, ShouldBeNil)
So(v, ShouldEqual, 1.25)
_, err = c.Float64("parent", "balance")
So(err, ShouldNotBeNil)
})
Convey("Return int", func() {
v, err := c.Int("parent", "age")
So(err, ShouldBeNil)
So(v, ShouldEqual, 32)
_, err = c.Int("parent", "children")
So(err, ShouldNotBeNil)
})
Convey("Return int64", func() {
v, err := c.Int64("parent", "age")
So(err, ShouldBeNil)
So(v, ShouldEqual, 32)
_, err = c.Int64("parent", "children")
So(err, ShouldNotBeNil)
})
})
}
func TestMust(t *testing.T) {
Convey("Must return with type", t, func() {
c, err := LoadConfigFile("testdata/conf.ini")
So(err, ShouldBeNil)
So(c, ShouldNotBeNil)
Convey("Return string", func() {
So(c.MustValue("parent.child", "name"), ShouldEqual, "john")
So(c.MustValue("parent.child", "died"), ShouldEqual, "")
So(c.MustValue("parent.child", "died", "no"), ShouldEqual, "no")
})
Convey("Return string and bool", func() {
val, ok := c.MustValueSet("parent.child", "died")
So(val, ShouldEqual, "")
So(ok, ShouldBeFalse)
val, ok = c.MustValueSet("parent.child", "died", "no")
So(val, ShouldEqual, "no")
So(ok, ShouldBeTrue)
})
Convey("Return bool", func() {
So(c.MustBool("parent.child", "married"), ShouldBeTrue)
So(c.MustBool("parent.child", "died"), ShouldBeFalse)
So(c.MustBool("parent.child", "died", true), ShouldBeTrue)
})
Convey("Return float64", func() {
So(c.MustFloat64("parent", "money"), ShouldEqual, 1.25)
So(c.MustFloat64("parent", "balance"), ShouldEqual, 0.0)
So(c.MustFloat64("parent", "balance", 1.25), ShouldEqual, 1.25)
})
Convey("Return int", func() {
So(c.MustInt("parent", "age"), ShouldEqual, 32)
So(c.MustInt("parent", "children"), ShouldEqual, 0)
So(c.MustInt("parent", "children", 3), ShouldEqual, 3)
})
Convey("Return int64", func() {
So(c.MustInt64("parent", "age"), ShouldEqual, 32)
So(c.MustInt64("parent", "children"), ShouldEqual, 0)
So(c.MustInt64("parent", "children", 3), ShouldEqual, 3)
})
})
}
func TestRange(t *testing.T) {
Convey("Must return with range", t, func() {
c, err := LoadConfigFile("testdata/conf.ini")
So(err, ShouldBeNil)
So(c, ShouldNotBeNil)
So(c.MustValueRange("What's this?", "name", "joe", []string{"hello"}), ShouldEqual, "joe")
So(c.MustValueRange("What's this?", "name404", "joe", []string{"hello"}), ShouldEqual, "joe")
So(c.MustValueRange("What's this?", "name", "joe", []string{"hello", "try one more value ^-^"}),
ShouldEqual, "try one more value ^-^")
})
}
func TestArray(t *testing.T) {
Convey("Must return with string array", t, func() {
c, err := LoadConfigFile("testdata/conf.ini")
So(err, ShouldBeNil)
So(c, ShouldNotBeNil)
So(fmt.Sprintf("%s", c.MustValueArray("Demo", "array_key", ",")), ShouldEqual, "[1 2 3 4 5]")
So(fmt.Sprintf("%s", c.MustValueArray("Demo", "array_key404", ",")), ShouldEqual, "[]")
})
}
func TestLoadFromData(t *testing.T) {
Convey("Load config file from data", t, func() {
c, err := LoadFromData([]byte(""))
So(err, ShouldBeNil)
So(c, ShouldNotBeNil)
})
}
func Benchmark_GetValue(b *testing.B) {
c, _ := LoadConfigFile("testdata/conf.ini")
c.BlockMode = false
for i := 0; i < b.N; i++ {
c.GetValue("parent", "money")
}
}
func Benchmark_SetValue(b *testing.B) {
c, _ := LoadConfigFile("testdata/conf.ini")
for i := 0; i < b.N; i++ {
c.SetValue("parent", "money", "10")
}
}

View File

@ -176,10 +176,15 @@ func (c *ConfigFile) read(reader io.Reader) (err error) {
// LoadFromData accepts raw data directly from memory
// and returns a new configuration representation.
// Note that the configuration is written to the system
// temporary folder, so your file should not contain
// sensitive information.
func LoadFromData(data []byte) (c *ConfigFile, err error) {
// Save memory data to temporary file to support further operations.
tmpName := path.Join(os.TempDir(), "goconfig", fmt.Sprintf("%d", time.Now().Nanosecond()))
os.MkdirAll(path.Dir(tmpName), os.ModePerm)
if err = os.MkdirAll(path.Dir(tmpName), os.ModePerm); err != nil {
return nil, err
}
if err = ioutil.WriteFile(tmpName, data, 0655); err != nil {
return nil, err
}
@ -189,6 +194,16 @@ func LoadFromData(data []byte) (c *ConfigFile, err error) {
return c, err
}
// LoadFromReader accepts raw data directly from a reader
// and returns a new configuration representation.
// You must use ReloadData to reload.
// You cannot append files a configfile read this way.
func LoadFromReader(in io.Reader) (c *ConfigFile, err error) {
c = newConfigFile([]string{""})
err = c.read(in)
return c, err
}
func (c *ConfigFile) loadFile(fileName string) (err error) {
f, err := os.Open(fileName)
if err != nil {
@ -224,6 +239,9 @@ func LoadConfigFile(fileName string, moreFiles ...string) (c *ConfigFile, err er
func (c *ConfigFile) Reload() (err error) {
var cfg *ConfigFile
if len(c.fileNames) == 1 {
if c.fileNames[0] == "" {
return fmt.Errorf("file opened from in-memory data, use ReloadData to reload")
}
cfg, err = LoadConfigFile(c.fileNames[0])
} else {
cfg, err = LoadConfigFile(c.fileNames[0], c.fileNames[1:]...)
@ -235,8 +253,25 @@ func (c *ConfigFile) Reload() (err error) {
return err
}
// ReloadData reloads configuration file from memory
func (c *ConfigFile) ReloadData(in io.Reader) (err error) {
var cfg *ConfigFile
if len(c.fileNames) != 1 {
return fmt.Errorf("Multiple files loaded, unable to mix in-memory and file data")
}
cfg, err = LoadFromReader(in)
if err == nil {
*c = *cfg
}
return err
}
// AppendFiles appends more files to ConfigFile and reload automatically.
func (c *ConfigFile) AppendFiles(files ...string) error {
if len(c.fileNames) == 1 && c.fileNames[0] == "" {
return fmt.Errorf("Cannot append file data to in-memory data")
}
c.fileNames = append(c.fileNames, files...)
return c.Reload()
}

View File

@ -1,46 +0,0 @@
; Google
google = www.google.com
search = http://%(google)s
; Here are Comments
; Second line
[Demo]
# This symbol can also make this line to be comments
key1 = Let's us goconfig!!!
key2 = test data
key3 = this is based on key2:%(key2)s
quote = "special case for quote
"key:1" = This is the value of "key:1"
"key:2=key:1" = this is based on "key:2=key:1" => %(key:1)s
中国 = China
chinese-var = hello %(中国)s!
array_key = 1,2,3,4,5
[What's this?]
; Not Enough Comments!!
name = try one more value ^-^
empty_value =
[url]
google_fake = www.google.fake
google_url = http://%(google_fake)s
[parent]
name = john
relation = father
sex = male
age = 32
money = 1.25
[parent.child]
age = 3
married = true
[parent.child.child]
; Auto increment by setting key to "-"
[auto increment]
- = hello
- = go
- = config

View File

@ -1,37 +0,0 @@
; Google
google = www.google.com
search = http://%(google)s
; Here are Comments
; Second line
[Demo]
# This symbol can also make this line to be comments
key1 = Let's us goconfig!!!
key2 = rewrite this key of conf.ini
key3 = this is based on key2:%(key2)s
"key:1" = This is the value of "key:1"
"""key:2"""="""this is based on "key:1" => `%(key:1)s`"""
[What's this?]
; Not Enough Comments!!
name = try one more value ^-^
[parent]
name = john
relation = father
sex = male
age = 32
[parent.child]
age = 3
[parent.child.child]
; Auto increment by setting key to "-"
[auto increment]
- = hello
- = go
- = config
[new section]
key1 = conf.ini does not have this key

View File

@ -1,50 +0,0 @@
; Google
google = www.google.com
search = http://%(google)s
; Here are Comments
; Second line
[Demo]
# This symbol can also make this line to be comments
key1 = Let's us goconfig!!!
key2 = rewrite this key of conf.ini
key3 = this is based on key2:%(key2)s
quote = "special case for quote
`key:1` = This is the value of "key:1"
`key:2=key:1` = this is based on "key:2=key:1" => %(key:1)s
中国 = China
chinese-var = hello %(中国)s!
array_key = 1,2,3,4,5
`key:2` = """this is based on "key:1" => `%(key:1)s`"""
[What's this?]
; Not Enough Comments!!
name = try one more value ^-^
empty_value =
[url]
google_fake = www.google.fake
google_url = http://%(google_fake)s
[parent]
name = john
relation = father
sex = male
age = 32
money = 1.25
[parent.child]
age = 3
married = true
[parent.child.child]
; Auto increment by setting key to "-"
[auto increment]
- = hello
- = go
- = config
[new section]
key1 = conf.ini does not have this key

View File

@ -16,6 +16,7 @@ package goconfig
import (
"bytes"
"io"
"os"
"strings"
)
@ -23,14 +24,8 @@ import (
// Write spaces around "=" to look better.
var PrettyFormat = true
// SaveConfigFile writes configuration file to local file system
func SaveConfigFile(c *ConfigFile, filename string) (err error) {
// Write configuration file by filename.
var f *os.File
if f, err = os.Create(filename); err != nil {
return err
}
// SaveConfigData writes configuration to a writer
func SaveConfigData(c *ConfigFile, out io.Writer) (err error) {
equalSign := "="
if PrettyFormat {
equalSign = " = "
@ -101,7 +96,21 @@ func SaveConfigFile(c *ConfigFile, filename string) (err error) {
}
}
if _, err = buf.WriteTo(f); err != nil {
if _, err := buf.WriteTo(out); err != nil {
return err
}
return nil
}
// SaveConfigFile writes configuration file to local file system
func SaveConfigFile(c *ConfigFile, filename string) (err error) {
// Write configuration file by filename.
var f *os.File
if f, err = os.Create(filename); err != nil {
return err
}
if err := SaveConfigData(c, f); err != nil {
return err
}
return f.Close()

View File

@ -262,7 +262,7 @@ func (c *Controller) Abort(code string) {
// CustomAbort stops controller handler and show the error data, it's similar Aborts, but support status code and body.
func (c *Controller) CustomAbort(status int, body string) {
c.Ctx.ResponseWriter.WriteHeader(status)
//c.Ctx.Output.Status = status
// c.Ctx.Output.Status = status
// first panic from ErrorMaps, is is user defined error functions.
if _, ok := ErrorMaps[body]; ok {
panic(body)

View File

@ -1,22 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe

View File

@ -1,166 +0,0 @@
// Copyright 2013-2014 beego authors
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
// beei18n is a helper tool for beego/i18n package.
package main
import (
"flag"
"fmt"
"html/template"
"io"
"log"
"os"
"strings"
)
type Command struct {
// Run runs the command.
// The args are the arguments after the command name.
Run func(cmd *Command, args []string)
// UsageLine is the one-line usage message.
// The first word in the line is taken to be the command name.
UsageLine string
// Short is the short description shown in the 'go help' output.
Short string
// Long is the long message shown in the 'go help <this-command>' output.
Long string
// Flag is a set of flags specific to this command.
Flag flag.FlagSet
// CustomFlags indicates that the command will do its own
// flag parsing.
CustomFlags bool
}
// Name returns the command's name: the first word in the usage line.
func (c *Command) Name() string {
name := c.UsageLine
i := strings.Index(name, " ")
if i >= 0 {
name = name[:i]
}
return name
}
func (c *Command) Usage() {
fmt.Fprintf(os.Stderr, "usage: %s\n\n", c.UsageLine)
fmt.Fprintf(os.Stderr, "%s\n", strings.TrimSpace(c.Long))
os.Exit(2)
}
// Runnable reports whether the command can be run; otherwise
// it is a documentation pseudo-command such as importpath.
func (c *Command) Runnable() bool {
return c.Run != nil
}
var commands = []*Command{
cmdSync,
}
func main() {
flag.Usage = usage
flag.Parse()
log.SetFlags(0)
args := flag.Args()
if len(args) < 1 {
usage()
}
if args[0] == "help" {
help(args[1:])
return
}
for _, cmd := range commands {
if cmd.Name() == args[0] && cmd.Run != nil {
cmd.Flag.Usage = func() { cmd.Usage() }
if cmd.CustomFlags {
args = args[1:]
} else {
cmd.Flag.Parse(args[1:])
args = cmd.Flag.Args()
}
cmd.Run(cmd, args)
os.Exit(2)
return
}
}
fmt.Fprintf(os.Stderr, "beei18n: unknown subcommand %q\nRun 'bee help' for usage.\n", args[0])
os.Exit(2)
}
var usageTemplate = `beei18n is a helper tool for beego/i18n package.
Usage:
beei18n command [arguments]
The commands are:
{{range .}}{{if .Runnable}}
{{.Name | printf "%-11s"}} {{.Short}}{{end}}{{end}}
Use "beei18n help [command]" for more information about a command.
`
var helpTemplate = `{{if .Runnable}}usage: beei18n {{.UsageLine}}
{{end}}{{.Long | trim}}
`
func usage() {
tmpl(os.Stdout, usageTemplate, commands)
os.Exit(2)
}
func tmpl(w io.Writer, text string, data interface{}) {
t := template.New("top")
t.Funcs(template.FuncMap{"trim": strings.TrimSpace})
template.Must(t.Parse(text))
if err := t.Execute(w, data); err != nil {
panic(err)
}
}
func help(args []string) {
if len(args) == 0 {
usage()
// not exit 2: succeeded at 'go help'.
return
}
if len(args) != 1 {
fmt.Fprintf(os.Stdout, "usage: beei18n help command\n\nToo many arguments given.\n")
os.Exit(2) // failed at 'bee help'
}
arg := args[0]
for _, cmd := range commands {
if cmd.Name() == arg {
tmpl(os.Stdout, helpTemplate, cmd)
// not exit 2: succeeded at 'go help cmd'.
return
}
}
fmt.Fprintf(os.Stdout, "Unknown help topic %#q. Run 'beei18n help'.\n", arg)
os.Exit(2) // failed at 'bee help cmd'
}

View File

@ -1,86 +0,0 @@
// Copyright 2013-2014 beego authors
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package main
import (
"log"
"os"
"github.com/Unknwon/goconfig"
)
var cmdSync = &Command{
UsageLine: "sync [source file] [target files]",
Short: "sync keys for locale files",
Long: `to quickly sync keys for one or more locale files
based on the one you already have
`,
}
func init() {
cmdSync.Run = syncLocales
}
func syncLocales(cmd *Command, args []string) {
switch len(args) {
case 0:
log.Fatalln("No source locale file is specified")
case 1:
log.Fatalln("No target locale file is specified")
}
srcLocale, err := goconfig.LoadConfigFile(args[0])
if err != nil {
log.Fatalln(err)
}
// Load or create target locales.
targets := args[1:]
targetLocales := make([]*goconfig.ConfigFile, len(targets))
for i, target := range targets {
if !isExist(target) {
os.Create(target)
}
targetLocales[i], err = goconfig.LoadConfigFile(target)
if err != nil {
log.Fatalln(err)
}
}
for _, secName := range srcLocale.GetSectionList() {
keyList := srcLocale.GetKeyList(secName)
for _, target := range targetLocales {
for _, k := range keyList {
if _, err = target.GetValue(secName, k); err != nil {
target.SetValue(secName, k, "")
}
}
}
}
for i, target := range targetLocales {
err = goconfig.SaveConfigFile(target, targets[i])
if err != nil {
log.Fatalln(err)
}
}
}
// IsExist returns whether a file or directory exists.
func isExist(path string) bool {
_, err := os.Stat(path)
return err == nil || os.IsExist(err)
}

147
vendor/github.com/docker/distribution/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,147 @@
Aaron Lehmann <aaron.lehmann@docker.com>
Aaron Schlesinger <aschlesinger@deis.com>
Aaron Vinson <avinson.public@gmail.com>
Adam Enger <adamenger@gmail.com>
Adrian Mouat <adrian.mouat@gmail.com>
Ahmet Alp Balkan <ahmetalpbalkan@gmail.com>
Alex Chan <alex.chan@metaswitch.com>
Alex Elman <aelman@indeed.com>
Alexey Gladkov <gladkov.alexey@gmail.com>
allencloud <allen.sun@daocloud.io>
amitshukla <ashukla73@hotmail.com>
Amy Lindburg <amy.lindburg@docker.com>
Andrew Hsu <andrewhsu@acm.org>
Andrew Meredith <andymeredith@gmail.com>
Andrew T Nguyen <andrew.nguyen@docker.com>
Andrey Kostov <kostov.andrey@gmail.com>
Andy Goldstein <agoldste@redhat.com>
Anis Elleuch <vadmeste@gmail.com>
Anton Tiurin <noxiouz@yandex.ru>
Antonio Mercado <amercado@thinknode.com>
Antonio Murdaca <runcom@redhat.com>
Arien Holthuizen <aholthuizen@schubergphilis.com>
Arnaud Porterie <arnaud.porterie@docker.com>
Arthur Baars <arthur@semmle.com>
Asuka Suzuki <hello@tanksuzuki.com>
Avi Miller <avi.miller@oracle.com>
Ayose Cazorla <ayosec@gmail.com>
BadZen <dave.trombley@gmail.com>
Ben Firshman <ben@firshman.co.uk>
bin liu <liubin0329@gmail.com>
Brian Bland <brian.bland@docker.com>
burnettk <burnettk@gmail.com>
Carson A <ca@carsonoid.net>
Chris Dillon <squarism@gmail.com>
cyli <cyli@twistedmatrix.com>
Daisuke Fujita <dtanshi45@gmail.com>
Daniel Huhn <daniel@danielhuhn.de>
Darren Shepherd <darren@rancher.com>
Dave Trombley <dave.trombley@gmail.com>
Dave Tucker <dt@docker.com>
David Lawrence <david.lawrence@docker.com>
David Verhasselt <david@crowdway.com>
David Xia <dxia@spotify.com>
davidli <wenquan.li@hp.com>
Dejan Golja <dejan@golja.org>
Derek McGowan <derek@mcgstyle.net>
Diogo Mónica <diogo.monica@gmail.com>
DJ Enriquez <dj.enriquez@infospace.com>
Donald Huang <don.hcd@gmail.com>
Doug Davis <dug@us.ibm.com>
Eric Yang <windfarer@gmail.com>
Fabio Huser <fabio@fh1.ch>
farmerworking <farmerworking@gmail.com>
Felix Yan <felixonmars@archlinux.org>
Florentin Raud <florentin.raud@gmail.com>
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
gabriell nascimento <gabriell@bluesoft.com.br>
Gleb Schukin <gschukin@ptsecurity.com>
harche <p.harshal@gmail.com>
Henri Gomez <henri.gomez@gmail.com>
Hu Keping <hukeping@huawei.com>
Hua Wang <wanghua.humble@gmail.com>
HuKeping <hukeping@huawei.com>
Ian Babrou <ibobrik@gmail.com>
igayoso <igayoso@gmail.com>
Jack Griffin <jackpg14@gmail.com>
Jason Freidman <jason.freidman@gmail.com>
Jeff Nickoloff <jeff@allingeek.com>
Jessie Frazelle <jessie@docker.com>
jhaohai <jhaohai@foxmail.com>
Jianqing Wang <tsing@jianqing.org>
John Starks <jostarks@microsoft.com>
Jon Johnson <jonjohnson@google.com>
Jon Poler <jonathan.poler@apcera.com>
Jonathan Boulle <jonathanboulle@gmail.com>
Jordan Liggitt <jliggitt@redhat.com>
Josh Hawn <josh.hawn@docker.com>
Julien Fernandez <julien.fernandez@gmail.com>
Ke Xu <leonhartx.k@gmail.com>
Keerthan Mala <kmala@engineyard.com>
Kelsey Hightower <kelsey.hightower@gmail.com>
Kenneth Lim <kennethlimcp@gmail.com>
Kenny Leung <kleung@google.com>
Li Yi <denverdino@gmail.com>
Liu Hua <sdu.liu@huawei.com>
liuchang0812 <liuchang0812@gmail.com>
Louis Kottmann <louis.kottmann@gmail.com>
Luke Carpenter <x@rubynerd.net>
Mary Anthony <mary@docker.com>
Matt Bentley <mbentley@mbentley.net>
Matt Duch <matt@learnmetrics.com>
Matt Moore <mattmoor@google.com>
Matt Robenolt <matt@ydekproductions.com>
Michael Prokop <mika@grml.org>
Michal Minar <miminar@redhat.com>
Miquel Sabaté <msabate@suse.com>
Morgan Bauer <mbauer@us.ibm.com>
moxiegirl <mary@docker.com>
Nathan Sullivan <nathan@nightsys.net>
nevermosby <robolwq@qq.com>
Nghia Tran <tcnghia@gmail.com>
Nikita Tarasov <nikita@mygento.ru>
Nuutti Kotivuori <nuutti.kotivuori@poplatek.fi>
Oilbeater <liumengxinfly@gmail.com>
Olivier Gambier <olivier@docker.com>
Olivier Jacques <olivier.jacques@hp.com>
Omer Cohen <git@omer.io>
Patrick Devine <patrick.devine@docker.com>
Phil Estes <estesp@linux.vnet.ibm.com>
Philip Misiowiec <philip@atlashealth.com>
Richard Scothern <richard.scothern@docker.com>
Rodolfo Carvalho <rhcarvalho@gmail.com>
Rusty Conover <rusty@luckydinosaur.com>
Sean Boran <Boran@users.noreply.github.com>
Sebastiaan van Stijn <github@gone.nl>
Serge Dubrouski <sergeyfd@gmail.com>
Sharif Nassar <sharif@mrwacky.com>
Shawn Falkner-Horine <dreadpirateshawn@gmail.com>
Shreyas Karnik <karnik.shreyas@gmail.com>
Simon Thulbourn <simon+github@thulbourn.com>
Spencer Rinehart <anubis@overthemonkey.com>
Stefan Majewsky <stefan.majewsky@sap.com>
Stefan Weil <sw@weilnetz.de>
Stephen J Day <stephen.day@docker.com>
Sungho Moon <sungho.moon@navercorp.com>
Sven Dowideit <SvenDowideit@home.org.au>
Sylvain Baubeau <sbaubeau@redhat.com>
Ted Reed <ted.reed@gmail.com>
tgic <farmer1992@gmail.com>
Thomas Sjögren <konstruktoid@users.noreply.github.com>
Tianon Gravi <admwiggin@gmail.com>
Tibor Vass <teabee89@gmail.com>
Tonis Tiigi <tonistiigi@gmail.com>
Tony Holdstock-Brown <tony@docker.com>
Trevor Pounds <trevor.pounds@gmail.com>
Troels Thomsen <troels@thomsen.io>
Vincent Batts <vbatts@redhat.com>
Vincent Demeester <vincent@sbr.pm>
Vincent Giersch <vincent.giersch@ovh.net>
W. Trevor King <wking@tremily.us>
weiyuan.yl <weiyuan.yl@alibaba-inc.com>
xg.song <xg.song@venusource.com>
xiekeyang <xiekeyang@huawei.com>
Yann ROBERT <yann.robert@anantaplex.fr>
yuzou <zouyu7@huawei.com>
zhouhaibing089 <zhouhaibing089@gmail.com>
姜继忠 <jizhong.jiangjz@alibaba-inc.com>

119
vendor/github.com/docker/distribution/BUILDING.md generated vendored Normal file
View File

@ -0,0 +1,119 @@
# Building the registry source
## Use-case
This is useful if you intend to actively work on the registry.
### Alternatives
Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/).
People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`.
OS X users who want to run natively can do so following [the instructions here](osx-setup-guide.md).
### Gotchas
You are expected to know your way around with go & git.
If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you.
## Build the development environment
The first prerequisite of properly building distribution targets is to have a Go
development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html)
for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the
environment.
If a Go development environment is setup, one can use `go get` to install the
`registry` command from the current latest:
go get github.com/docker/distribution/cmd/registry
The above will install the source repository into the `GOPATH`.
Now create the directory for the registry data (this might require you to set permissions properly)
mkdir -p /var/lib/registry
... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location.
The `registry`
binary can then be run with the following:
$ $GOPATH/bin/registry --version
$GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown
> __NOTE:__ While you do not need to use `go get` to checkout the distribution
> project, for these build instructions to work, the project must be checked
> out in the correct location in the `GOPATH`. This should almost always be
> `$GOPATH/src/github.com/docker/distribution`.
The registry can be run with the default config using the following
incantation:
$ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml
INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
INFO[0000] debug server listening localhost:5001
If it is working, one should see the above log messages.
### Repeatable Builds
For the full development experience, one should `cd` into
`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go`
commands, such as `go test`, should work per package (please see
[Developing](#developing) if they don't work).
A `Makefile` has been provided as a convenience to support repeatable builds.
Please install the following into `GOPATH` for it to work:
go get github.com/tools/godep github.com/golang/lint/golint
**TODO(stevvooe):** Add a `make setup` command to Makefile to run this. Have to think about how to interact with Godeps properly.
Once these commands are available in the `GOPATH`, run `make` to get a full
build:
$ make
+ clean
+ fmt
+ vet
+ lint
+ build
github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar
github.com/Sirupsen/logrus
github.com/docker/libtrust
...
github.com/yvasiyarov/gorelic
github.com/docker/distribution/registry/handlers
github.com/docker/distribution/cmd/registry
+ test
...
ok github.com/docker/distribution/digest 7.875s
ok github.com/docker/distribution/manifest 0.028s
ok github.com/docker/distribution/notifications 17.322s
? github.com/docker/distribution/registry [no test files]
ok github.com/docker/distribution/registry/api/v2 0.101s
? github.com/docker/distribution/registry/auth [no test files]
ok github.com/docker/distribution/registry/auth/silly 0.011s
...
+ /Users/sday/go/src/github.com/docker/distribution/bin/registry
+ /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template
+ binaries
The above provides a repeatable build using the contents of the vendored
Godeps directory. This includes formatting, vetting, linting, building,
testing and generating tagged binaries. We can verify this worked by running
the registry binary generated in the "./bin" directory:
$ ./bin/registry -version
./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m
### Optional build tags
Optional [build tags](http://golang.org/pkg/go/build/) can be provided using
the environment variable `DOCKER_BUILDTAGS`.

35
vendor/github.com/docker/distribution/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,35 @@
# Changelog
## 2.5.0 (2016-06-14)
### Storage
- Ensure uploads directory is cleaned after upload is commited
- Add ability to cap concurrent operations in filesystem driver
- S3: Add 'us-gov-west-1' to the valid region list
- Swift: Handle ceph not returning Last-Modified header for HEAD requests
- Add redirect middleware
#### Registry
- Add support for blobAccessController middleware
- Add support for layers from foreign sources
- Remove signature store
- Add support for Let's Encrypt
- Correct yaml key names in configuration
#### Client
- Add option to get content digest from manifest get
#### Spec
- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported
- Clarify API documentation around catalog fetch behavior
### API
- Support returning HTTP 429 (Too Many Requests)
### Documentation
- Update auth documentation examples to show "expires in" as int
### Docker Image
- Use Alpine Linux as base image

140
vendor/github.com/docker/distribution/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,140 @@
# Contributing to the registry
## Before reporting an issue...
### If your problem is with...
- automated builds
- your account on the [Docker Hub](https://hub.docker.com/)
- any other [Docker Hub](https://hub.docker.com/) issue
Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com)
### If you...
- need help setting up your registry
- can't figure out something
- are not sure what's going on or what your problem is
Then please do not open an issue here yet - you should first try one of the following support forums:
- irc: #docker-distribution on freenode
- mailing-list: <distribution@dockerproject.org> or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
## Reporting an issue properly
By following these simple rules you will get better and faster feedback on your issue.
- search the bugtracker for an already reported issue
### If you found an issue that describes your problem:
- please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments
- please refrain from adding "same thing here" or "+1" comments
- you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button
- comment if you have some new, technical and relevant information to add to the case
- __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue.
### If you have not found an existing issue that describes your problem:
1. create a new issue, with a succinct title that describes your issue:
- bad title: "It doesn't work with my docker"
- good title: "Private registry push fail: 400 error with E_INVALID_DIGEST"
2. copy the output of:
- `docker version`
- `docker info`
- `docker exec <registry-container> registry -version`
3. copy the command line you used to launch your Registry
4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments)
5. reproduce your problem and get your docker daemon logs showing the error
6. if relevant, copy your registry logs that show the error
7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used)
8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry
## Contributing a patch for a known bug, or a small correction
You should follow the basic GitHub workflow:
1. fork
2. commit a change
3. make sure the tests pass
4. PR
Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple:
- configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com`
- sign your commits using `-s`: `git commit -s -m "My commit"`
Some simple rules to ensure quick merge:
- clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`)
- prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once
- if you need to amend your PR following comments, please squash instead of adding more commits
## Contributing new features
You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve.
If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning.
If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work.
Then you should submit your implementation, clearly linking to the issue (and possible proposal).
Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged.
It's mandatory to:
- interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines)
- address maintainers' comments and modify your submission accordingly
- write tests for any new code
Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry.
Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493)
## Coding Style
Unless explicitly stated, we follow all coding guidelines from the Go
community. While some of these standards may seem arbitrary, they somehow seem
to result in a solid, consistent codebase.
It is possible that the code base does not currently comply with these
guidelines. We are not looking for a massive PR that fixes this, since that
goes against the spirit of the guidelines. All new contributions should make a
best effort to clean up and make the code base better than they left it.
Obviously, apply your best judgement. Remember, the goal here is to make the
code base easier for humans to navigate and understand. Always keep that in
mind when nudging others to comply.
The rules:
1. All code should be formatted with `gofmt -s`.
2. All code should pass the default levels of
[`golint`](https://github.com/golang/lint).
3. All code should follow the guidelines covered in [Effective
Go](http://golang.org/doc/effective_go.html) and [Go Code Review
Comments](https://github.com/golang/go/wiki/CodeReviewComments).
4. Comment the code. Tell us the why, the history and the context.
5. Document _all_ declarations and methods, even private ones. Declare
expectations, caveats and anything else that may be important. If a type
gets exported, having the comments already there will ensure it's ready.
6. Variable name length should be proportional to its context and no longer.
`noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`.
In practice, short methods will have short variable names and globals will
have longer names.
7. No underscores in package names. If you need a compound name, step back,
and re-examine why you need a compound name. If you still think you need a
compound name, lose the underscore.
8. No utils or helpers packages. If a function is not general enough to
warrant its own package, it has not been written generally enough to be a
part of a util package. Just leave it unexported and well-documented.
9. All tests should run with `go test` and outside tooling should not be
required. No, we don't need another unit testing framework. Assertion
packages are acceptable if they provide _real_ incremental value.
10. Even though we call these "rules" above, they are actually just
guidelines. Since you've read all the rules, you now know that.
If you are having trouble getting into the mood of idiomatic Go, we recommend
reading through [Effective Go](http://golang.org/doc/effective_go.html). The
[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the
kool-aid is a lot easier than going thirsty.

18
vendor/github.com/docker/distribution/Dockerfile generated vendored Normal file
View File

@ -0,0 +1,18 @@
FROM golang:1.6-alpine
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
ENV DOCKER_BUILDTAGS include_oss include_gcs
WORKDIR $DISTRIBUTION_DIR
COPY . $DISTRIBUTION_DIR
COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml
RUN set -ex \
&& apk add --no-cache make git
RUN make PREFIX=/go clean binaries
VOLUME ["/var/lib/registry"]
EXPOSE 5000
ENTRYPOINT ["registry"]
CMD ["serve", "/etc/docker/registry/config.yml"]

202
vendor/github.com/docker/distribution/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

58
vendor/github.com/docker/distribution/MAINTAINERS generated vendored Normal file
View File

@ -0,0 +1,58 @@
# Distribution maintainers file
#
# This file describes who runs the docker/distribution project and how.
# This is a living document - if you see something out of date or missing, speak up!
#
# It is structured to be consumable by both humans and programs.
# To extract its contents programmatically, use any TOML-compliant parser.
#
# This file is compiled into the MAINTAINERS file in docker/opensource.
#
[Org]
[Org."Core maintainers"]
people = [
"aaronlehmann",
"dmcgowan",
"dmp42",
"richardscothern",
"shykes",
"stevvooe",
]
[people]
# A reference list of all people associated with the project.
# All other sections should refer to people by their canonical key
# in the people section.
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
[people.aaronlehmann]
Name = "Aaron Lehmann"
Email = "aaron.lehmann@docker.com"
GitHub = "aaronlehmann"
[people.dmcgowan]
Name = "Derek McGowan"
Email = "derek@mcgstyle.net"
GitHub = "dmcgowan"
[people.dmp42]
Name = "Olivier Gambier"
Email = "olivier@docker.com"
GitHub = "dmp42"
[people.richardscothern]
Name = "Richard Scothern"
Email = "richard.scothern@gmail.com"
GitHub = "richardscothern"
[people.shykes]
Name = "Solomon Hykes"
Email = "solomon@docker.com"
GitHub = "shykes"
[people.stevvooe]
Name = "Stephen Day"
Email = "stephen.day@docker.com"
GitHub = "stevvooe"

106
vendor/github.com/docker/distribution/Makefile generated vendored Normal file
View File

@ -0,0 +1,106 @@
# Set an output prefix, which is the local directory if not specified
PREFIX?=$(shell pwd)
# Used to populate version variable in main package.
VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
# Allow turning off function inlining and variable registerization
ifeq (${DISABLE_OPTIMIZATION},true)
GO_GCFLAGS=-gcflags "-N -l"
VERSION:="$(VERSION)-noopt"
endif
GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)"
.PHONY: clean all fmt vet lint build test binaries
.DEFAULT: all
all: fmt vet lint build test binaries
AUTHORS: .mailmap .git/HEAD
git log --format='%aN <%aE>' | sort -fu > $@
# This only needs to be generated by hand when cutting full releases.
version/version.go:
./version/version.sh > $@
# Required for go 1.5 to build
GO15VENDOREXPERIMENT := 1
# Package list
PKGS := $(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/)
# Resolving binary dependencies for specific targets
GOLINT := $(shell which golint || echo '')
GODEP := $(shell which godep || echo '')
${PREFIX}/bin/registry: $(wildcard **/*.go)
@echo "+ $@"
@go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry
${PREFIX}/bin/digest: $(wildcard **/*.go)
@echo "+ $@"
@go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest
${PREFIX}/bin/registry-api-descriptor-template: $(wildcard **/*.go)
@echo "+ $@"
@go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template
docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template
./bin/registry-api-descriptor-template $< > $@
vet:
@echo "+ $@"
@go vet -tags "${DOCKER_BUILDTAGS}" $(PKGS)
fmt:
@echo "+ $@"
@test -z "$$(gofmt -s -l . 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" || \
(echo >&2 "+ please format Go code with 'gofmt -s'" && false)
lint:
@echo "+ $@"
$(if $(GOLINT), , \
$(error Please install golint: `go get -u github.com/golang/lint/golint`))
@test -z "$$($(GOLINT) ./... 2>&1 | grep -v ^vendor/ | tee /dev/stderr)"
build:
@echo "+ $@"
@go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS)
test:
@echo "+ $@"
@go test -test.short -tags "${DOCKER_BUILDTAGS}" $(PKGS)
test-full:
@echo "+ $@"
@go test -tags "${DOCKER_BUILDTAGS}" $(PKGS)
binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template
@echo "+ $@"
clean:
@echo "+ $@"
@rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/digest" "${PREFIX}/bin/registry-api-descriptor-template"
dep-save:
@echo "+ $@"
$(if $(GODEP), , \
$(error Please install godep: go get github.com/tools/godep))
@$(GODEP) save $(PKGS)
dep-restore:
@echo "+ $@"
$(if $(GODEP), , \
$(error Please install godep: go get github.com/tools/godep))
@$(GODEP) restore -v
dep-validate: dep-restore
@echo "+ $@"
@rm -Rf .vendor.bak
@mv vendor .vendor.bak
@rm -Rf Godeps
@$(GODEP) save ./...
@test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \
(echo >&2 "+ borked dependencies! what you have in Godeps/Godeps.json does not match with what you have in vendor" && false)
@rm -Rf .vendor.bak

131
vendor/github.com/docker/distribution/README.md generated vendored Normal file
View File

@ -0,0 +1,131 @@
# Distribution
The Docker toolset to pack, ship, store, and deliver content.
This repository's main product is the Docker Registry 2.0 implementation
for storing and distributing Docker images. It supersedes the
[docker/docker-registry](https://github.com/docker/docker-registry)
project with a new API design, focused around security and performance.
<img src="https://www.docker.com/sites/default/files/oyster-registry-3.png" width=200px/>
[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master)
[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution)
This repository contains the following components:
|**Component** |Description |
|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. |
| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. |
| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) |
| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry. |
### How does this integrate with Docker engine?
This project should provide an implementation to a V2 API for use in the [Docker
core project](https://github.com/docker/docker). The API should be embeddable
and simplify the process of securely pulling and pushing content from `docker`
daemons.
### What are the long term goals of the Distribution project?
The _Distribution_ project has the further long term goal of providing a
secure tool chain for distributing content. The specifications, APIs and tools
should be as useful with Docker as they are without.
Our goal is to design a professional grade and extensible content distribution
system that allow users to:
* Enjoy an efficient, secured and reliable way to store, manage, package and
exchange content
* Hack/roll their own on top of healthy open-source components
* Implement their own home made solution through good specs, and solid
extensions mechanism.
## More about Registry 2.0
The new registry implementation provides the following benefits:
- faster push and pull
- new, more efficient implementation
- simplified deployment
- pluggable storage backend
- webhook notifications
For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md).
### Who needs to deploy a registry?
By default, Docker users pull images from Docker's public registry instance.
[Installing Docker](https://docs.docker.com/engine/installation/) gives users this
ability. Users can also push images to a repository on Docker's public registry,
if they have a [Docker Hub](https://hub.docker.com/) account.
For some users and even companies, this default behavior is sufficient. For
others, it is not.
For example, users with their own software products may want to maintain a
registry for private, company images. Also, you may wish to deploy your own
image repository for images used to test or in continuous integration. For these
use cases and others, [deploying your own registry instance](docs/deploying.md)
may be the better choice.
### Migration to Registry 2.0
For those who have previously deployed their own registry based on the Registry
1.0 implementation and wish to deploy a Registry 2.0 while retaining images,
data migration is required. A tool to assist with migration efforts has been
created. For more information see [docker/migrator]
(https://github.com/docker/migrator).
## Contribute
Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
issues, fixes, and patches to this project. If you are contributing code, see
the instructions for [building a development environment](docs/recipes/building.md).
## Support
If any issues are encountered while using the _Distribution_ project, several
avenues are available for support:
<table>
<tr>
<th align="left">
IRC
</th>
<td>
#docker-distribution on FreeNode
</td>
</tr>
<tr>
<th align="left">
Issue Tracker
</th>
<td>
github.com/docker/distribution/issues
</td>
</tr>
<tr>
<th align="left">
Google Groups
</th>
<td>
https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
</td>
</tr>
<tr>
<th align="left">
Mailing List
</th>
<td>
docker@dockerproject.org
</td>
</tr>
</table>
## License
This project is distributed under [Apache License, Version 2.0](LICENSE).

267
vendor/github.com/docker/distribution/ROADMAP.md generated vendored Normal file
View File

@ -0,0 +1,267 @@
# Roadmap
The Distribution Project consists of several components, some of which are
still being defined. This document defines the high-level goals of the
project, identifies the current components, and defines the release-
relationship to the Docker Platform.
* [Distribution Goals](#distribution-goals)
* [Distribution Components](#distribution-components)
* [Project Planning](#project-planning): release-relationship to the Docker Platform.
This road map is a living document, providing an overview of the goals and
considerations made in respect of the future of the project.
## Distribution Goals
- Replace the existing [docker registry](github.com/docker/docker-registry)
implementation as the primary implementation.
- Replace the existing push and pull code in the docker engine with the
distribution package.
- Define a strong data model for distributing docker images
- Provide a flexible distribution tool kit for use in the docker platform
- Unlock new distribution models
## Distribution Components
Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming
features and bugfixes for a component will be added to the relevant milestone. If a feature or
bugfix is not part of a milestone, it is currently unscheduled for
implementation.
* [Registry](#registry)
* [Distribution Package](#distribution-package)
***
### Registry
The new Docker registry is the main portion of the distribution repository.
Registry 2.0 is the first release of the next-generation registry. This was
primarily focused on implementing the [new registry
API](https://github.com/docker/distribution/blob/master/docs/spec/api.md),
with a focus on security and performance.
Following from the Distribution project goals above, we have a set of goals
for registry v2 that we would like to follow in the design. New features
should be compared against these goals.
#### Data Storage and Distribution First
The registry's first goal is to provide a reliable, consistent storage
location for Docker images. The registry should only provide the minimal
amount of indexing required to fetch image data and no more.
This means we should be selective in new features and API additions, including
those that may require expensive, ever growing indexes. Requests should be
servable in "constant time".
#### Content Addressability
All data objects used in the registry API should be content addressable.
Content identifiers should be secure and verifiable. This provides a secure,
reliable base from which to build more advanced content distribution systems.
#### Content Agnostic
In the past, changes to the image format would require large changes in Docker
and the Registry. By decoupling the distribution and image format, we can
allow the formats to progress without having to coordinate between the two.
This means that we should be focused on decoupling Docker from the registry
just as much as decoupling the registry from Docker. Such an approach will
allow us to unlock new distribution models that haven't been possible before.
We can take this further by saying that the new registry should be content
agnostic. The registry provides a model of names, tags, manifests and content
addresses and that model can be used to work with content.
#### Simplicity
The new registry should be closer to a microservice component than its
predecessor. This means it should have a narrower API and a low number of
service dependencies. It should be easy to deploy.
This means that other solutions should be explored before changing the API or
adding extra dependencies. If functionality is required, can it be added as an
extension or companion service.
#### Extensibility
The registry should provide extension points to add functionality. By keeping
the scope narrow, but providing the ability to add functionality.
Features like search, indexing, synchronization and registry explorers fall
into this category. No such feature should be added unless we've found it
impossible to do through an extension.
#### Active Feature Discussions
The following are feature discussions that are currently active.
If you don't see your favorite, unimplemented feature, feel free to contact us
via IRC or the mailing list and we can talk about adding it. The goal here is
to make sure that new features go through a rigid design process before
landing in the registry.
##### Proxying to other Registries
A _pull-through caching_ mode exists for the registry, but is restricted from
within the docker client to only mirror the official Docker Hub. This functionality
can be expanded when image provenance has been specified and implemented in the
distribution project.
##### Metadata storage
Metadata for the registry is currently stored with the manifest and layer data on
the storage backend. While this is a big win for simplicity and reliably maintaining
state, it comes with the cost of consistency and high latency. The mutable registry
metadata operations should be abstracted behind an API which will allow ACID compliant
storage systems to handle metadata.
##### Peer to Peer transfer
Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit
##### Indexing, Search and Discovery
The original registry provided some implementation of search for use with
private registries. Support has been elided from V2 since we'd like to both
decouple search functionality from the registry. The makes the registry
simpler to deploy, especially in use cases where search is not needed, and
let's us decouple the image format from the registry.
There are explorations into using the catalog API and notification system to
build external indexes. The current line of thought is that we will define a
common search API to index and query docker images. Such a system could be run
as a companion to a registry or set of registries to power discovery.
The main issue with search and discovery is that there are so many ways to
accomplish it. There are two aspects to this project. The first is deciding on
how it will be done, including an API definition that can work with changing
data formats. The second is the process of integrating with `docker search`.
We expect that someone attempts to address the problem with the existing tools
and propose it as a standard search API or uses it to inform a standardization
process. Once this has been explored, we integrate with the docker client.
Please see the following for more detail:
- https://github.com/docker/distribution/issues/206
##### Deletes
> __NOTE:__ Deletes are a much asked for feature. Before requesting this
feature or participating in discussion, we ask that you read this section in
full and understand the problems behind deletes.
While, at first glance, implementing deleting seems simple, there are a number
mitigating factors that make many solutions not ideal or even pathological in
the context of a registry. The following paragraph discuss the background and
approaches that could be applied to arrive at a solution.
The goal of deletes in any system is to remove unused or unneeded data. Only
data requested for deletion should be removed and no other data. Removing
unintended data is worse than _not_ removing data that was requested for
removal but ideally, both are supported. Generally, according to this rule, we
err on holding data longer than needed, ensuring that it is only removed when
we can be certain that it can be removed. With the current behavior, we opt to
hold onto the data forever, ensuring that data cannot be incorrectly removed.
To understand the problems with implementing deletes, one must understand the
data model. All registry data is stored in a filesystem layout, implemented on
a "storage driver", effectively a _virtual file system_ (VFS). The storage
system must assume that this VFS layer will be eventually consistent and has
poor read- after-write consistency, since this is the lower common denominator
among the storage drivers. This is mitigated by writing values in reverse-
dependent order, but makes wider transactional operations unsafe.
Layered on the VFS model is a content-addressable _directed, acyclic graph_
(DAG) made up of blobs. Manifests reference layers. Tags reference manifests.
Since the same data can be referenced by multiple manifests, we only store
data once, even if it is in different repositories. Thus, we have a set of
blobs, referenced by tags and manifests. If we want to delete a blob we need
to be certain that it is no longer referenced by another manifest or tag. When
we delete a manifest, we also can try to delete the referenced blobs. Deciding
whether or not a blob has an active reference is the crux of the problem.
Conceptually, deleting a manifest and its resources is quite simple. Just find
all the manifests, enumerate the referenced blobs and delete the blobs not in
that set. An astute observer will recognize this as a garbage collection
problem. As with garbage collection in programming languages, this is very
simple when one always has a consistent view. When one adds parallelism and an
inconsistent view of data, it becomes very challenging.
A simple example can demonstrate this. Let's say we are deleting a manifest
_A_ in one process. We scan the manifest and decide that all the blobs are
ready for deletion. Concurrently, we have another process accepting a new
manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_
is accepted and all the blobs are considered present, so the operation
proceeds. The original process then deletes the referenced blobs, assuming
they were unreferenced. The manifest _B_, which we thought had all of its data
present, can no longer be served by the registry, since the dependent data has
been deleted.
Deleting data from the registry safely requires some way to coordinate this
operation. The following approaches are being considered:
- _Reference Counting_ - Maintain a count of references to each blob. This is
challenging for a number of reasons: 1. maintaining a consistent consensus
of reference counts across a set of Registries and 2. Building the initial
list of reference counts for an existing registry. These challenges can be
met with a consensus protocol like Paxos or Raft in the first case and a
necessary but simple scan in the second..
- _Lock the World GC_ - Halt all writes to the data store. Walk the data store
and find all blob references. Delete all unreferenced blobs. This approach
is very simple but requires disabling writes for a period of time while the
service reads all data. This is slow and expensive but very accurate and
effective.
- _Generational GC_ - Do something similar to above but instead of blocking
writes, writes are sent to another storage backend while reads are broadcast
to the new and old backends. GC is then performed on the read-only portion.
Because writes land in the new backend, the data in the read-only section
can be safely deleted. The main drawbacks of this approach are complexity
and coordination.
- _Centralized Oracle_ - Using a centralized, transactional database, we can
know exactly which data is referenced at any given time. This avoids
coordination problem by managing this data in a single location. We trade
off metadata scalability for simplicity and performance. This is a very good
option for most registry deployments. This would create a bottleneck for
registry metadata. However, metadata is generally not the main bottleneck
when serving images.
Please let us know if other solutions exist that we have yet to enumerate.
Note that for any approach, implementation is a massive consideration. For
example, a mark-sweep based solution may seem simple but the amount of work in
coordination offset the extra work it might take to build a _Centralized
Oracle_. We'll accept proposals for any solution but please coordinate with us
before dropping code.
At this time, we have traded off simplicity and ease of deployment for disk
space. Simplicity and ease of deployment tend to reduce developer involvement,
which is currently the most expensive resource in software engineering. Taking
on any solution for deletes will greatly effect these factors, trading off
very cheap disk space for a complex deployment and operational story.
Please see the following issues for more detail:
- https://github.com/docker/distribution/issues/422
- https://github.com/docker/distribution/issues/461
- https://github.com/docker/distribution/issues/462
### Distribution Package
At its core, the Distribution Project is a set of Go packages that make up
Distribution Components. At this time, most of these packages make up the
Registry implementation.
The package itself is considered unstable. If you're using it, please take care to vendor the dependent version.
For feature additions, please see the Registry section. In the future, we may break out a
separate Roadmap for distribution-specific features that apply to more than
just the registry.
***
### Project Planning
An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress.

245
vendor/github.com/docker/distribution/blobs.go generated vendored Normal file
View File

@ -0,0 +1,245 @@
package distribution
import (
"errors"
"fmt"
"io"
"net/http"
"time"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/reference"
)
var (
// ErrBlobExists returned when blob already exists
ErrBlobExists = errors.New("blob exists")
// ErrBlobDigestUnsupported when blob digest is an unsupported version.
ErrBlobDigestUnsupported = errors.New("unsupported blob digest")
// ErrBlobUnknown when blob is not found.
ErrBlobUnknown = errors.New("unknown blob")
// ErrBlobUploadUnknown returned when upload is not found.
ErrBlobUploadUnknown = errors.New("blob upload unknown")
// ErrBlobInvalidLength returned when the blob has an expected length on
// commit, meaning mismatched with the descriptor or an invalid value.
ErrBlobInvalidLength = errors.New("blob invalid length")
)
// ErrBlobInvalidDigest returned when digest check fails.
type ErrBlobInvalidDigest struct {
Digest digest.Digest
Reason error
}
func (err ErrBlobInvalidDigest) Error() string {
return fmt.Sprintf("invalid digest for referenced layer: %v, %v",
err.Digest, err.Reason)
}
// ErrBlobMounted returned when a blob is mounted from another repository
// instead of initiating an upload session.
type ErrBlobMounted struct {
From reference.Canonical
Descriptor Descriptor
}
func (err ErrBlobMounted) Error() string {
return fmt.Sprintf("blob mounted from: %v to: %v",
err.From, err.Descriptor)
}
// Descriptor describes targeted content. Used in conjunction with a blob
// store, a descriptor can be used to fetch, store and target any kind of
// blob. The struct also describes the wire protocol format. Fields should
// only be added but never changed.
type Descriptor struct {
// MediaType describe the type of the content. All text based formats are
// encoded as utf-8.
MediaType string `json:"mediaType,omitempty"`
// Size in bytes of content.
Size int64 `json:"size,omitempty"`
// Digest uniquely identifies the content. A byte stream can be verified
// against against this digest.
Digest digest.Digest `json:"digest,omitempty"`
// URLs contains the source URLs of this content.
URLs []string `json:"urls,omitempty"`
// NOTE: Before adding a field here, please ensure that all
// other options have been exhausted. Much of the type relationships
// depend on the simplicity of this type.
}
// Descriptor returns the descriptor, to make it satisfy the Describable
// interface. Note that implementations of Describable are generally objects
// which can be described, not simply descriptors; this exception is in place
// to make it more convenient to pass actual descriptors to functions that
// expect Describable objects.
func (d Descriptor) Descriptor() Descriptor {
return d
}
// BlobStatter makes blob descriptors available by digest. The service may
// provide a descriptor of a different digest if the provided digest is not
// canonical.
type BlobStatter interface {
// Stat provides metadata about a blob identified by the digest. If the
// blob is unknown to the describer, ErrBlobUnknown will be returned.
Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error)
}
// BlobDeleter enables deleting blobs from storage.
type BlobDeleter interface {
Delete(ctx context.Context, dgst digest.Digest) error
}
// BlobEnumerator enables iterating over blobs from storage
type BlobEnumerator interface {
Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error
}
// BlobDescriptorService manages metadata about a blob by digest. Most
// implementations will not expose such an interface explicitly. Such mappings
// should be maintained by interacting with the BlobIngester. Hence, this is
// left off of BlobService and BlobStore.
type BlobDescriptorService interface {
BlobStatter
// SetDescriptor assigns the descriptor to the digest. The provided digest and
// the digest in the descriptor must map to identical content but they may
// differ on their algorithm. The descriptor must have the canonical
// digest of the content and the digest algorithm must match the
// annotators canonical algorithm.
//
// Such a facility can be used to map blobs between digest domains, with
// the restriction that the algorithm of the descriptor must match the
// canonical algorithm (ie sha256) of the annotator.
SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error
// Clear enables descriptors to be unlinked
Clear(ctx context.Context, dgst digest.Digest) error
}
// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService.
type BlobDescriptorServiceFactory interface {
BlobAccessController(svc BlobDescriptorService) BlobDescriptorService
}
// ReadSeekCloser is the primary reader type for blob data, combining
// io.ReadSeeker with io.Closer.
type ReadSeekCloser interface {
io.ReadSeeker
io.Closer
}
// BlobProvider describes operations for getting blob data.
type BlobProvider interface {
// Get returns the entire blob identified by digest along with the descriptor.
Get(ctx context.Context, dgst digest.Digest) ([]byte, error)
// Open provides a ReadSeekCloser to the blob identified by the provided
// descriptor. If the blob is not known to the service, an error will be
// returned.
Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error)
}
// BlobServer can serve blobs via http.
type BlobServer interface {
// ServeBlob attempts to serve the blob, identifed by dgst, via http. The
// service may decide to redirect the client elsewhere or serve the data
// directly.
//
// This handler only issues successful responses, such as 2xx or 3xx,
// meaning it serves data or issues a redirect. If the blob is not
// available, an error will be returned and the caller may still issue a
// response.
//
// The implementation may serve the same blob from a different digest
// domain. The appropriate headers will be set for the blob, unless they
// have already been set by the caller.
ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error
}
// BlobIngester ingests blob data.
type BlobIngester interface {
// Put inserts the content p into the blob service, returning a descriptor
// or an error.
Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error)
// Create allocates a new blob writer to add a blob to this service. The
// returned handle can be written to and later resumed using an opaque
// identifier. With this approach, one can Close and Resume a BlobWriter
// multiple times until the BlobWriter is committed or cancelled.
Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error)
// Resume attempts to resume a write to a blob, identified by an id.
Resume(ctx context.Context, id string) (BlobWriter, error)
}
// BlobCreateOption is a general extensible function argument for blob creation
// methods. A BlobIngester may choose to honor any or none of the given
// BlobCreateOptions, which can be specific to the implementation of the
// BlobIngester receiving them.
// TODO (brianbland): unify this with ManifestServiceOption in the future
type BlobCreateOption interface {
Apply(interface{}) error
}
// BlobWriter provides a handle for inserting data into a blob store.
// Instances should be obtained from BlobWriteService.Writer and
// BlobWriteService.Resume. If supported by the store, a writer can be
// recovered with the id.
type BlobWriter interface {
io.WriteCloser
io.ReaderFrom
// Size returns the number of bytes written to this blob.
Size() int64
// ID returns the identifier for this writer. The ID can be used with the
// Blob service to later resume the write.
ID() string
// StartedAt returns the time this blob write was started.
StartedAt() time.Time
// Commit completes the blob writer process. The content is verified
// against the provided provisional descriptor, which may result in an
// error. Depending on the implementation, written data may be validated
// against the provisional descriptor fields. If MediaType is not present,
// the implementation may reject the commit or assign "application/octet-
// stream" to the blob. The returned descriptor may have a different
// digest depending on the blob store, referred to as the canonical
// descriptor.
Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error)
// Cancel ends the blob write without storing any data and frees any
// associated resources. Any data written thus far will be lost. Cancel
// implementations should allow multiple calls even after a commit that
// result in a no-op. This allows use of Cancel in a defer statement,
// increasing the assurance that it is correctly called.
Cancel(ctx context.Context) error
}
// BlobService combines the operations to access, read and write blobs. This
// can be used to describe remote blob services.
type BlobService interface {
BlobStatter
BlobProvider
BlobIngester
}
// BlobStore represent the entire suite of blob related operations. Such an
// implementation can access, read, write, delete and serve blobs.
type BlobStore interface {
BlobService
BlobServer
BlobDeleter
}

89
vendor/github.com/docker/distribution/circle.yml generated vendored Normal file
View File

@ -0,0 +1,89 @@
# Pony-up!
machine:
pre:
# Install gvm
- bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer)
# Install codecov for coverage
- pip install --user codecov
post:
# go
- gvm install go1.6 --prefer-binary --name=stable
environment:
# Convenient shortcuts to "common" locations
CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME
BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME
# Trick circle brainflat "no absolute path" behavior
BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR
DOCKER_BUILDTAGS: "include_oss include_gcs"
# Workaround Circle parsing dumb bugs and/or YAML wonkyness
CIRCLE_PAIN: "mode: set"
hosts:
# Not used yet
fancy: 127.0.0.1
dependencies:
pre:
# Copy the code to the gopath of all go versions
- >
gvm use stable &&
mkdir -p "$(dirname $BASE_STABLE)" &&
cp -R "$CHECKOUT" "$BASE_STABLE"
override:
# Install dependencies for every copied clone/go version
- gvm use stable && go get github.com/tools/godep:
pwd: $BASE_STABLE
post:
# For the stable go version, additionally install linting tools
- >
gvm use stable &&
go get github.com/axw/gocov/gocov github.com/golang/lint/golint
test:
pre:
# Output the go versions we are going to test
# - gvm use old && go version
- gvm use stable && go version
# Ensure validation of dependencies
- gvm use stable && if test -n "`git diff --stat=1000 master | grep -Ei \"vendor|godeps\"`"; then make dep-validate; fi:
pwd: $BASE_STABLE
# First thing: build everything. This will catch compile errors, and it's
# also necessary for go vet to work properly (see #807).
- gvm use stable && godep go install $(go list ./... | grep -v "/vendor/"):
pwd: $BASE_STABLE
# FMT
- gvm use stable && make fmt:
pwd: $BASE_STABLE
# VET
- gvm use stable && make vet:
pwd: $BASE_STABLE
# LINT
- gvm use stable && make lint:
pwd: $BASE_STABLE
override:
# Test stable, and report
- gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE':
timeout: 600
pwd: $BASE_STABLE
post:
# Report to codecov
- bash <(curl -s https://codecov.io/bash):
pwd: $BASE_STABLE
## Notes
# Disabled the -race detector due to massive memory usage.
# Do we want these as well?
# - go get code.google.com/p/go.tools/cmd/goimports
# - test -z "$(goimports -l -w ./... | tee /dev/stderr)"
# http://labix.org/gocheck

View File

@ -0,0 +1,85 @@
package context
import (
"sync"
"github.com/docker/distribution/uuid"
"golang.org/x/net/context"
)
// Context is a copy of Context from the golang.org/x/net/context package.
type Context interface {
context.Context
}
// instanceContext is a context that provides only an instance id. It is
// provided as the main background context.
type instanceContext struct {
Context
id string // id of context, logged as "instance.id"
once sync.Once // once protect generation of the id
}
func (ic *instanceContext) Value(key interface{}) interface{} {
if key == "instance.id" {
ic.once.Do(func() {
// We want to lazy initialize the UUID such that we don't
// call a random generator from the package initialization
// code. For various reasons random could not be available
// https://github.com/docker/distribution/issues/782
ic.id = uuid.Generate().String()
})
return ic.id
}
return ic.Context.Value(key)
}
var background = &instanceContext{
Context: context.Background(),
}
// Background returns a non-nil, empty Context. The background context
// provides a single key, "instance.id" that is globally unique to the
// process.
func Background() Context {
return background
}
// WithValue returns a copy of parent in which the value associated with key is
// val. Use context Values only for request-scoped data that transits processes
// and APIs, not for passing optional parameters to functions.
func WithValue(parent Context, key, val interface{}) Context {
return context.WithValue(parent, key, val)
}
// stringMapContext is a simple context implementation that checks a map for a
// key, falling back to a parent if not present.
type stringMapContext struct {
context.Context
m map[string]interface{}
}
// WithValues returns a context that proxies lookups through a map. Only
// supports string keys.
func WithValues(ctx context.Context, m map[string]interface{}) context.Context {
mo := make(map[string]interface{}, len(m)) // make our own copy.
for k, v := range m {
mo[k] = v
}
return stringMapContext{
Context: ctx,
m: mo,
}
}
func (smc stringMapContext) Value(key interface{}) interface{} {
if ks, ok := key.(string); ok {
if v, ok := smc.m[ks]; ok {
return v
}
}
return smc.Context.Value(key)
}

89
vendor/github.com/docker/distribution/context/doc.go generated vendored Normal file
View File

@ -0,0 +1,89 @@
// Package context provides several utilities for working with
// golang.org/x/net/context in http requests. Primarily, the focus is on
// logging relevant request information but this package is not limited to
// that purpose.
//
// The easiest way to get started is to get the background context:
//
// ctx := context.Background()
//
// The returned context should be passed around your application and be the
// root of all other context instances. If the application has a version, this
// line should be called before anything else:
//
// ctx := context.WithVersion(context.Background(), version)
//
// The above will store the version in the context and will be available to
// the logger.
//
// Logging
//
// The most useful aspect of this package is GetLogger. This function takes
// any context.Context interface and returns the current logger from the
// context. Canonical usage looks like this:
//
// GetLogger(ctx).Infof("something interesting happened")
//
// GetLogger also takes optional key arguments. The keys will be looked up in
// the context and reported with the logger. The following example would
// return a logger that prints the version with each log message:
//
// ctx := context.Context(context.Background(), "version", version)
// GetLogger(ctx, "version").Infof("this log message has a version field")
//
// The above would print out a log message like this:
//
// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m
//
// When used with WithLogger, we gain the ability to decorate the context with
// loggers that have information from disparate parts of the call stack.
// Following from the version example, we can build a new context with the
// configured logger such that we always print the version field:
//
// ctx = WithLogger(ctx, GetLogger(ctx, "version"))
//
// Since the logger has been pushed to the context, we can now get the version
// field for free with our log messages. Future calls to GetLogger on the new
// context will have the version field:
//
// GetLogger(ctx).Infof("this log message has a version field")
//
// This becomes more powerful when we start stacking loggers. Let's say we
// have the version logger from above but also want a request id. Using the
// context above, in our request scoped function, we place another logger in
// the context:
//
// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context
// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id"))
//
// When GetLogger is called on the new context, "http.request.id" will be
// included as a logger field, along with the original "version" field:
//
// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m
//
// Note that this only affects the new context, the previous context, with the
// version field, can be used independently. Put another way, the new logger,
// added to the request context, is unique to that context and can have
// request scoped varaibles.
//
// HTTP Requests
//
// This package also contains several methods for working with http requests.
// The concepts are very similar to those described above. We simply place the
// request in the context using WithRequest. This makes the request variables
// available. GetRequestLogger can then be called to get request specific
// variables in a log line:
//
// ctx = WithRequest(ctx, req)
// GetRequestLogger(ctx).Infof("request variables")
//
// Like above, if we want to include the request data in all log messages in
// the context, we push the logger to a new context and use that one:
//
// ctx = WithLogger(ctx, GetRequestLogger(ctx))
//
// The concept is fairly powerful and ensures that calls throughout the stack
// can be traced in log messages. Using the fields like "http.request.id", one
// can analyze call flow for a particular request with a simple grep of the
// logs.
package context

364
vendor/github.com/docker/distribution/context/http.go generated vendored Normal file
View File

@ -0,0 +1,364 @@
package context
import (
"errors"
"net"
"net/http"
"strings"
"sync"
"time"
log "github.com/Sirupsen/logrus"
"github.com/docker/distribution/uuid"
"github.com/gorilla/mux"
)
// Common errors used with this package.
var (
ErrNoRequestContext = errors.New("no http request in context")
ErrNoResponseWriterContext = errors.New("no http response in context")
)
func parseIP(ipStr string) net.IP {
ip := net.ParseIP(ipStr)
if ip == nil {
log.Warnf("invalid remote IP address: %q", ipStr)
}
return ip
}
// RemoteAddr extracts the remote address of the request, taking into
// account proxy headers.
func RemoteAddr(r *http.Request) string {
if prior := r.Header.Get("X-Forwarded-For"); prior != "" {
proxies := strings.Split(prior, ",")
if len(proxies) > 0 {
remoteAddr := strings.Trim(proxies[0], " ")
if parseIP(remoteAddr) != nil {
return remoteAddr
}
}
}
// X-Real-Ip is less supported, but worth checking in the
// absence of X-Forwarded-For
if realIP := r.Header.Get("X-Real-Ip"); realIP != "" {
if parseIP(realIP) != nil {
return realIP
}
}
return r.RemoteAddr
}
// RemoteIP extracts the remote IP of the request, taking into
// account proxy headers.
func RemoteIP(r *http.Request) string {
addr := RemoteAddr(r)
// Try parsing it as "IP:port"
if ip, _, err := net.SplitHostPort(addr); err == nil {
return ip
}
return addr
}
// WithRequest places the request on the context. The context of the request
// is assigned a unique id, available at "http.request.id". The request itself
// is available at "http.request". Other common attributes are available under
// the prefix "http.request.". If a request is already present on the context,
// this method will panic.
func WithRequest(ctx Context, r *http.Request) Context {
if ctx.Value("http.request") != nil {
// NOTE(stevvooe): This needs to be considered a programming error. It
// is unlikely that we'd want to have more than one request in
// context.
panic("only one request per context")
}
return &httpRequestContext{
Context: ctx,
startedAt: time.Now(),
id: uuid.Generate().String(),
r: r,
}
}
// GetRequest returns the http request in the given context. Returns
// ErrNoRequestContext if the context does not have an http request associated
// with it.
func GetRequest(ctx Context) (*http.Request, error) {
if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok {
return r, nil
}
return nil, ErrNoRequestContext
}
// GetRequestID attempts to resolve the current request id, if possible. An
// error is return if it is not available on the context.
func GetRequestID(ctx Context) string {
return GetStringValue(ctx, "http.request.id")
}
// WithResponseWriter returns a new context and response writer that makes
// interesting response statistics available within the context.
func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) {
irw := instrumentedResponseWriter{
ResponseWriter: w,
Context: ctx,
}
if closeNotifier, ok := w.(http.CloseNotifier); ok {
irwCN := &instrumentedResponseWriterCN{
instrumentedResponseWriter: irw,
CloseNotifier: closeNotifier,
}
return irwCN, irwCN
}
return &irw, &irw
}
// GetResponseWriter returns the http.ResponseWriter from the provided
// context. If not present, ErrNoResponseWriterContext is returned. The
// returned instance provides instrumentation in the context.
func GetResponseWriter(ctx Context) (http.ResponseWriter, error) {
v := ctx.Value("http.response")
rw, ok := v.(http.ResponseWriter)
if !ok || rw == nil {
return nil, ErrNoResponseWriterContext
}
return rw, nil
}
// getVarsFromRequest let's us change request vars implementation for testing
// and maybe future changes.
var getVarsFromRequest = mux.Vars
// WithVars extracts gorilla/mux vars and makes them available on the returned
// context. Variables are available at keys with the prefix "vars.". For
// example, if looking for the variable "name", it can be accessed as
// "vars.name". Implementations that are accessing values need not know that
// the underlying context is implemented with gorilla/mux vars.
func WithVars(ctx Context, r *http.Request) Context {
return &muxVarsContext{
Context: ctx,
vars: getVarsFromRequest(r),
}
}
// GetRequestLogger returns a logger that contains fields from the request in
// the current context. If the request is not available in the context, no
// fields will display. Request loggers can safely be pushed onto the context.
func GetRequestLogger(ctx Context) Logger {
return GetLogger(ctx,
"http.request.id",
"http.request.method",
"http.request.host",
"http.request.uri",
"http.request.referer",
"http.request.useragent",
"http.request.remoteaddr",
"http.request.contenttype")
}
// GetResponseLogger reads the current response stats and builds a logger.
// Because the values are read at call time, pushing a logger returned from
// this function on the context will lead to missing or invalid data. Only
// call this at the end of a request, after the response has been written.
func GetResponseLogger(ctx Context) Logger {
l := getLogrusLogger(ctx,
"http.response.written",
"http.response.status",
"http.response.contenttype")
duration := Since(ctx, "http.request.startedat")
if duration > 0 {
l = l.WithField("http.response.duration", duration.String())
}
return l
}
// httpRequestContext makes information about a request available to context.
type httpRequestContext struct {
Context
startedAt time.Time
id string
r *http.Request
}
// Value returns a keyed element of the request for use in the context. To get
// the request itself, query "request". For other components, access them as
// "request.<component>". For example, r.RequestURI
func (ctx *httpRequestContext) Value(key interface{}) interface{} {
if keyStr, ok := key.(string); ok {
if keyStr == "http.request" {
return ctx.r
}
if !strings.HasPrefix(keyStr, "http.request.") {
goto fallback
}
parts := strings.Split(keyStr, ".")
if len(parts) != 3 {
goto fallback
}
switch parts[2] {
case "uri":
return ctx.r.RequestURI
case "remoteaddr":
return RemoteAddr(ctx.r)
case "method":
return ctx.r.Method
case "host":
return ctx.r.Host
case "referer":
referer := ctx.r.Referer()
if referer != "" {
return referer
}
case "useragent":
return ctx.r.UserAgent()
case "id":
return ctx.id
case "startedat":
return ctx.startedAt
case "contenttype":
ct := ctx.r.Header.Get("Content-Type")
if ct != "" {
return ct
}
}
}
fallback:
return ctx.Context.Value(key)
}
type muxVarsContext struct {
Context
vars map[string]string
}
func (ctx *muxVarsContext) Value(key interface{}) interface{} {
if keyStr, ok := key.(string); ok {
if keyStr == "vars" {
return ctx.vars
}
if strings.HasPrefix(keyStr, "vars.") {
keyStr = strings.TrimPrefix(keyStr, "vars.")
}
if v, ok := ctx.vars[keyStr]; ok {
return v
}
}
return ctx.Context.Value(key)
}
// instrumentedResponseWriterCN provides response writer information in a
// context. It implements http.CloseNotifier so that users can detect
// early disconnects.
type instrumentedResponseWriterCN struct {
instrumentedResponseWriter
http.CloseNotifier
}
// instrumentedResponseWriter provides response writer information in a
// context. This variant is only used in the case where CloseNotifier is not
// implemented by the parent ResponseWriter.
type instrumentedResponseWriter struct {
http.ResponseWriter
Context
mu sync.Mutex
status int
written int64
}
func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) {
n, err = irw.ResponseWriter.Write(p)
irw.mu.Lock()
irw.written += int64(n)
// Guess the likely status if not set.
if irw.status == 0 {
irw.status = http.StatusOK
}
irw.mu.Unlock()
return
}
func (irw *instrumentedResponseWriter) WriteHeader(status int) {
irw.ResponseWriter.WriteHeader(status)
irw.mu.Lock()
irw.status = status
irw.mu.Unlock()
}
func (irw *instrumentedResponseWriter) Flush() {
if flusher, ok := irw.ResponseWriter.(http.Flusher); ok {
flusher.Flush()
}
}
func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} {
if keyStr, ok := key.(string); ok {
if keyStr == "http.response" {
return irw
}
if !strings.HasPrefix(keyStr, "http.response.") {
goto fallback
}
parts := strings.Split(keyStr, ".")
if len(parts) != 3 {
goto fallback
}
irw.mu.Lock()
defer irw.mu.Unlock()
switch parts[2] {
case "written":
return irw.written
case "status":
return irw.status
case "contenttype":
contentType := irw.Header().Get("Content-Type")
if contentType != "" {
return contentType
}
}
}
fallback:
return irw.Context.Value(key)
}
func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} {
if keyStr, ok := key.(string); ok {
if keyStr == "http.response" {
return irw
}
}
return irw.instrumentedResponseWriter.Value(key)
}

116
vendor/github.com/docker/distribution/context/logger.go generated vendored Normal file
View File

@ -0,0 +1,116 @@
package context
import (
"fmt"
"github.com/Sirupsen/logrus"
"runtime"
)
// Logger provides a leveled-logging interface.
type Logger interface {
// standard logger methods
Print(args ...interface{})
Printf(format string, args ...interface{})
Println(args ...interface{})
Fatal(args ...interface{})
Fatalf(format string, args ...interface{})
Fatalln(args ...interface{})
Panic(args ...interface{})
Panicf(format string, args ...interface{})
Panicln(args ...interface{})
// Leveled methods, from logrus
Debug(args ...interface{})
Debugf(format string, args ...interface{})
Debugln(args ...interface{})
Error(args ...interface{})
Errorf(format string, args ...interface{})
Errorln(args ...interface{})
Info(args ...interface{})
Infof(format string, args ...interface{})
Infoln(args ...interface{})
Warn(args ...interface{})
Warnf(format string, args ...interface{})
Warnln(args ...interface{})
}
// WithLogger creates a new context with provided logger.
func WithLogger(ctx Context, logger Logger) Context {
return WithValue(ctx, "logger", logger)
}
// GetLoggerWithField returns a logger instance with the specified field key
// and value without affecting the context. Extra specified keys will be
// resolved from the context.
func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger {
return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value)
}
// GetLoggerWithFields returns a logger instance with the specified fields
// without affecting the context. Extra specified keys will be resolved from
// the context.
func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger {
// must convert from interface{} -> interface{} to string -> interface{} for logrus.
lfields := make(logrus.Fields, len(fields))
for key, value := range fields {
lfields[fmt.Sprint(key)] = value
}
return getLogrusLogger(ctx, keys...).WithFields(lfields)
}
// GetLogger returns the logger from the current context, if present. If one
// or more keys are provided, they will be resolved on the context and
// included in the logger. While context.Value takes an interface, any key
// argument passed to GetLogger will be passed to fmt.Sprint when expanded as
// a logging key field. If context keys are integer constants, for example,
// its recommended that a String method is implemented.
func GetLogger(ctx Context, keys ...interface{}) Logger {
return getLogrusLogger(ctx, keys...)
}
// GetLogrusLogger returns the logrus logger for the context. If one more keys
// are provided, they will be resolved on the context and included in the
// logger. Only use this function if specific logrus functionality is
// required.
func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry {
var logger *logrus.Entry
// Get a logger, if it is present.
loggerInterface := ctx.Value("logger")
if loggerInterface != nil {
if lgr, ok := loggerInterface.(*logrus.Entry); ok {
logger = lgr
}
}
if logger == nil {
fields := logrus.Fields{}
// Fill in the instance id, if we have it.
instanceID := ctx.Value("instance.id")
if instanceID != nil {
fields["instance.id"] = instanceID
}
fields["go.version"] = runtime.Version()
// If no logger is found, just return the standard logger.
logger = logrus.StandardLogger().WithFields(fields)
}
fields := logrus.Fields{}
for _, key := range keys {
v := ctx.Value(key)
if v != nil {
fields[fmt.Sprint(key)] = v
}
}
return logger.WithFields(fields)
}

104
vendor/github.com/docker/distribution/context/trace.go generated vendored Normal file
View File

@ -0,0 +1,104 @@
package context
import (
"runtime"
"time"
"github.com/docker/distribution/uuid"
)
// WithTrace allocates a traced timing span in a new context. This allows a
// caller to track the time between calling WithTrace and the returned done
// function. When the done function is called, a log message is emitted with a
// "trace.duration" field, corresponding to the elapsed time and a
// "trace.func" field, corresponding to the function that called WithTrace.
//
// The logging keys "trace.id" and "trace.parent.id" are provided to implement
// dapper-like tracing. This function should be complemented with a WithSpan
// method that could be used for tracing distributed RPC calls.
//
// The main benefit of this function is to post-process log messages or
// intercept them in a hook to provide timing data. Trace ids and parent ids
// can also be linked to provide call tracing, if so required.
//
// Here is an example of the usage:
//
// func timedOperation(ctx Context) {
// ctx, done := WithTrace(ctx)
// defer done("this will be the log message")
// // ... function body ...
// }
//
// If the function ran for roughly 1s, such a usage would emit a log message
// as follows:
//
// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id=<id> ...
//
// Notice that the function name is automatically resolved, along with the
// package and a trace id is emitted that can be linked with parent ids.
func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) {
if ctx == nil {
ctx = Background()
}
pc, file, line, _ := runtime.Caller(1)
f := runtime.FuncForPC(pc)
ctx = &traced{
Context: ctx,
id: uuid.Generate().String(),
start: time.Now(),
parent: GetStringValue(ctx, "trace.id"),
fnname: f.Name(),
file: file,
line: line,
}
return ctx, func(format string, a ...interface{}) {
GetLogger(ctx,
"trace.duration",
"trace.id",
"trace.parent.id",
"trace.func",
"trace.file",
"trace.line").
Debugf(format, a...)
}
}
// traced represents a context that is traced for function call timing. It
// also provides fast lookup for the various attributes that are available on
// the trace.
type traced struct {
Context
id string
parent string
start time.Time
fnname string
file string
line int
}
func (ts *traced) Value(key interface{}) interface{} {
switch key {
case "trace.start":
return ts.start
case "trace.duration":
return time.Since(ts.start)
case "trace.id":
return ts.id
case "trace.parent.id":
if ts.parent == "" {
return nil // must return nil to signal no parent.
}
return ts.parent
case "trace.func":
return ts.fnname
case "trace.file":
return ts.file
case "trace.line":
return ts.line
}
return ts.Context.Value(key)
}

24
vendor/github.com/docker/distribution/context/util.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
package context
import (
"time"
)
// Since looks up key, which should be a time.Time, and returns the duration
// since that time. If the key is not found, the value returned will be zero.
// This is helpful when inferring metrics related to context execution times.
func Since(ctx Context, key interface{}) time.Duration {
if startedAt, ok := ctx.Value(key).(time.Time); ok {
return time.Since(startedAt)
}
return 0
}
// GetStringValue returns a string value from the context. The empty string
// will be returned if not found.
func GetStringValue(ctx Context, key interface{}) (value string) {
if valuev, ok := ctx.Value(key).(string); ok {
value = valuev
}
return value
}

View File

@ -0,0 +1,16 @@
package context
// WithVersion stores the application version in the context. The new context
// gets a logger to ensure log messages are marked with the application
// version.
func WithVersion(ctx Context, version string) Context {
ctx = WithValue(ctx, "version", version)
// push a new logger onto the stack
return WithLogger(ctx, GetLogger(ctx, "version"))
}
// GetVersion returns the application version from the context. An empty
// string may returned if the version was not set on the context.
func GetVersion(ctx Context) string {
return GetStringValue(ctx, "version")
}

7
vendor/github.com/docker/distribution/coverpkg.sh generated vendored Executable file
View File

@ -0,0 +1,7 @@
#!/usr/bin/env bash
# Given a subpackage and the containing package, figures out which packages
# need to be passed to `go test -coverpkg`: this includes all of the
# subpackage's dependencies within the containing package, as well as the
# subpackage itself.
DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2} | grep -v github.com/docker/distribution/vendor)"
echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ','

139
vendor/github.com/docker/distribution/digest/digest.go generated vendored Normal file
View File

@ -0,0 +1,139 @@
package digest
import (
"fmt"
"hash"
"io"
"regexp"
"strings"
)
const (
// DigestSha256EmptyTar is the canonical sha256 digest of empty data
DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
)
// Digest allows simple protection of hex formatted digest strings, prefixed
// by their algorithm. Strings of type Digest have some guarantee of being in
// the correct format and it provides quick access to the components of a
// digest string.
//
// The following is an example of the contents of Digest types:
//
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
//
// This allows to abstract the digest behind this type and work only in those
// terms.
type Digest string
// NewDigest returns a Digest from alg and a hash.Hash object.
func NewDigest(alg Algorithm, h hash.Hash) Digest {
return NewDigestFromBytes(alg, h.Sum(nil))
}
// NewDigestFromBytes returns a new digest from the byte contents of p.
// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...)
// functions. This is also useful for rebuilding digests from binary
// serializations.
func NewDigestFromBytes(alg Algorithm, p []byte) Digest {
return Digest(fmt.Sprintf("%s:%x", alg, p))
}
// NewDigestFromHex returns a Digest from alg and a the hex encoded digest.
func NewDigestFromHex(alg, hex string) Digest {
return Digest(fmt.Sprintf("%s:%s", alg, hex))
}
// DigestRegexp matches valid digest types.
var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`)
// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match.
var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`)
var (
// ErrDigestInvalidFormat returned when digest format invalid.
ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format")
// ErrDigestInvalidLength returned when digest has invalid length.
ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length")
// ErrDigestUnsupported returned when the digest algorithm is unsupported.
ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm")
)
// ParseDigest parses s and returns the validated digest object. An error will
// be returned if the format is invalid.
func ParseDigest(s string) (Digest, error) {
d := Digest(s)
return d, d.Validate()
}
// FromReader returns the most valid digest for the underlying content using
// the canonical digest algorithm.
func FromReader(rd io.Reader) (Digest, error) {
return Canonical.FromReader(rd)
}
// FromBytes digests the input and returns a Digest.
func FromBytes(p []byte) Digest {
return Canonical.FromBytes(p)
}
// Validate checks that the contents of d is a valid digest, returning an
// error if not.
func (d Digest) Validate() error {
s := string(d)
if !DigestRegexpAnchored.MatchString(s) {
return ErrDigestInvalidFormat
}
i := strings.Index(s, ":")
if i < 0 {
return ErrDigestInvalidFormat
}
// case: "sha256:" with no hex.
if i+1 == len(s) {
return ErrDigestInvalidFormat
}
switch algorithm := Algorithm(s[:i]); algorithm {
case SHA256, SHA384, SHA512:
if algorithm.Size()*2 != len(s[i+1:]) {
return ErrDigestInvalidLength
}
break
default:
return ErrDigestUnsupported
}
return nil
}
// Algorithm returns the algorithm portion of the digest. This will panic if
// the underlying digest is not in a valid format.
func (d Digest) Algorithm() Algorithm {
return Algorithm(d[:d.sepIndex()])
}
// Hex returns the hex digest portion of the digest. This will panic if the
// underlying digest is not in a valid format.
func (d Digest) Hex() string {
return string(d[d.sepIndex()+1:])
}
func (d Digest) String() string {
return string(d)
}
func (d Digest) sepIndex() int {
i := strings.Index(string(d), ":")
if i < 0 {
panic("could not find ':' in digest: " + d)
}
return i
}

View File

@ -0,0 +1,155 @@
package digest
import (
"crypto"
"fmt"
"hash"
"io"
)
// Algorithm identifies and implementation of a digester by an identifier.
// Note the that this defines both the hash algorithm used and the string
// encoding.
type Algorithm string
// supported digest types
const (
SHA256 Algorithm = "sha256" // sha256 with hex encoding
SHA384 Algorithm = "sha384" // sha384 with hex encoding
SHA512 Algorithm = "sha512" // sha512 with hex encoding
// Canonical is the primary digest algorithm used with the distribution
// project. Other digests may be used but this one is the primary storage
// digest.
Canonical = SHA256
)
var (
// TODO(stevvooe): Follow the pattern of the standard crypto package for
// registration of digests. Effectively, we are a registerable set and
// common symbol access.
// algorithms maps values to hash.Hash implementations. Other algorithms
// may be available but they cannot be calculated by the digest package.
algorithms = map[Algorithm]crypto.Hash{
SHA256: crypto.SHA256,
SHA384: crypto.SHA384,
SHA512: crypto.SHA512,
}
)
// Available returns true if the digest type is available for use. If this
// returns false, New and Hash will return nil.
func (a Algorithm) Available() bool {
h, ok := algorithms[a]
if !ok {
return false
}
// check availability of the hash, as well
return h.Available()
}
func (a Algorithm) String() string {
return string(a)
}
// Size returns number of bytes returned by the hash.
func (a Algorithm) Size() int {
h, ok := algorithms[a]
if !ok {
return 0
}
return h.Size()
}
// Set implemented to allow use of Algorithm as a command line flag.
func (a *Algorithm) Set(value string) error {
if value == "" {
*a = Canonical
} else {
// just do a type conversion, support is queried with Available.
*a = Algorithm(value)
}
return nil
}
// New returns a new digester for the specified algorithm. If the algorithm
// does not have a digester implementation, nil will be returned. This can be
// checked by calling Available before calling New.
func (a Algorithm) New() Digester {
return &digester{
alg: a,
hash: a.Hash(),
}
}
// Hash returns a new hash as used by the algorithm. If not available, the
// method will panic. Check Algorithm.Available() before calling.
func (a Algorithm) Hash() hash.Hash {
if !a.Available() {
// NOTE(stevvooe): A missing hash is usually a programming error that
// must be resolved at compile time. We don't import in the digest
// package to allow users to choose their hash implementation (such as
// when using stevvooe/resumable or a hardware accelerated package).
//
// Applications that may want to resolve the hash at runtime should
// call Algorithm.Available before call Algorithm.Hash().
panic(fmt.Sprintf("%v not available (make sure it is imported)", a))
}
return algorithms[a].New()
}
// FromReader returns the digest of the reader using the algorithm.
func (a Algorithm) FromReader(rd io.Reader) (Digest, error) {
digester := a.New()
if _, err := io.Copy(digester.Hash(), rd); err != nil {
return "", err
}
return digester.Digest(), nil
}
// FromBytes digests the input and returns a Digest.
func (a Algorithm) FromBytes(p []byte) Digest {
digester := a.New()
if _, err := digester.Hash().Write(p); err != nil {
// Writes to a Hash should never fail. None of the existing
// hash implementations in the stdlib or hashes vendored
// here can return errors from Write. Having a panic in this
// condition instead of having FromBytes return an error value
// avoids unnecessary error handling paths in all callers.
panic("write to hash function returned error: " + err.Error())
}
return digester.Digest()
}
// TODO(stevvooe): Allow resolution of verifiers using the digest type and
// this registration system.
// Digester calculates the digest of written data. Writes should go directly
// to the return value of Hash, while calling Digest will return the current
// value of the digest.
type Digester interface {
Hash() hash.Hash // provides direct access to underlying hash instance.
Digest() Digest
}
// digester provides a simple digester definition that embeds a hasher.
type digester struct {
alg Algorithm
hash hash.Hash
}
func (d *digester) Hash() hash.Hash {
return d.hash
}
func (d *digester) Digest() Digest {
return NewDigest(d.alg, d.hash)
}

42
vendor/github.com/docker/distribution/digest/doc.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
// Package digest provides a generalized type to opaquely represent message
// digests and their operations within the registry. The Digest type is
// designed to serve as a flexible identifier in a content-addressable system.
// More importantly, it provides tools and wrappers to work with
// hash.Hash-based digests with little effort.
//
// Basics
//
// The format of a digest is simply a string with two parts, dubbed the
// "algorithm" and the "digest", separated by a colon:
//
// <algorithm>:<digest>
//
// An example of a sha256 digest representation follows:
//
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
//
// In this case, the string "sha256" is the algorithm and the hex bytes are
// the "digest".
//
// Because the Digest type is simply a string, once a valid Digest is
// obtained, comparisons are cheap, quick and simple to express with the
// standard equality operator.
//
// Verification
//
// The main benefit of using the Digest type is simple verification against a
// given digest. The Verifier interface, modeled after the stdlib hash.Hash
// interface, provides a common write sink for digest verification. After
// writing is complete, calling the Verifier.Verified method will indicate
// whether or not the stream of bytes matches the target digest.
//
// Missing Features
//
// In addition to the above, we intend to add the following features to this
// package:
//
// 1. A Digester type that supports write sink digest calculation.
//
// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry.
//
package digest

245
vendor/github.com/docker/distribution/digest/set.go generated vendored Normal file
View File

@ -0,0 +1,245 @@
package digest
import (
"errors"
"sort"
"strings"
"sync"
)
var (
// ErrDigestNotFound is used when a matching digest
// could not be found in a set.
ErrDigestNotFound = errors.New("digest not found")
// ErrDigestAmbiguous is used when multiple digests
// are found in a set. None of the matching digests
// should be considered valid matches.
ErrDigestAmbiguous = errors.New("ambiguous digest string")
)
// Set is used to hold a unique set of digests which
// may be easily referenced by easily referenced by a string
// representation of the digest as well as short representation.
// The uniqueness of the short representation is based on other
// digests in the set. If digests are omitted from this set,
// collisions in a larger set may not be detected, therefore it
// is important to always do short representation lookups on
// the complete set of digests. To mitigate collisions, an
// appropriately long short code should be used.
type Set struct {
mutex sync.RWMutex
entries digestEntries
}
// NewSet creates an empty set of digests
// which may have digests added.
func NewSet() *Set {
return &Set{
entries: digestEntries{},
}
}
// checkShortMatch checks whether two digests match as either whole
// values or short values. This function does not test equality,
// rather whether the second value could match against the first
// value.
func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool {
if len(hex) == len(shortHex) {
if hex != shortHex {
return false
}
if len(shortAlg) > 0 && string(alg) != shortAlg {
return false
}
} else if !strings.HasPrefix(hex, shortHex) {
return false
} else if len(shortAlg) > 0 && string(alg) != shortAlg {
return false
}
return true
}
// Lookup looks for a digest matching the given string representation.
// If no digests could be found ErrDigestNotFound will be returned
// with an empty digest value. If multiple matches are found
// ErrDigestAmbiguous will be returned with an empty digest value.
func (dst *Set) Lookup(d string) (Digest, error) {
dst.mutex.RLock()
defer dst.mutex.RUnlock()
if len(dst.entries) == 0 {
return "", ErrDigestNotFound
}
var (
searchFunc func(int) bool
alg Algorithm
hex string
)
dgst, err := ParseDigest(d)
if err == ErrDigestInvalidFormat {
hex = d
searchFunc = func(i int) bool {
return dst.entries[i].val >= d
}
} else {
hex = dgst.Hex()
alg = dgst.Algorithm()
searchFunc = func(i int) bool {
if dst.entries[i].val == hex {
return dst.entries[i].alg >= alg
}
return dst.entries[i].val >= hex
}
}
idx := sort.Search(len(dst.entries), searchFunc)
if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
return "", ErrDigestNotFound
}
if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
return dst.entries[idx].digest, nil
}
if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
return "", ErrDigestAmbiguous
}
return dst.entries[idx].digest, nil
}
// Add adds the given digest to the set. An error will be returned
// if the given digest is invalid. If the digest already exists in the
// set, this operation will be a no-op.
func (dst *Set) Add(d Digest) error {
if err := d.Validate(); err != nil {
return err
}
dst.mutex.Lock()
defer dst.mutex.Unlock()
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
searchFunc := func(i int) bool {
if dst.entries[i].val == entry.val {
return dst.entries[i].alg >= entry.alg
}
return dst.entries[i].val >= entry.val
}
idx := sort.Search(len(dst.entries), searchFunc)
if idx == len(dst.entries) {
dst.entries = append(dst.entries, entry)
return nil
} else if dst.entries[idx].digest == d {
return nil
}
entries := append(dst.entries, nil)
copy(entries[idx+1:], entries[idx:len(entries)-1])
entries[idx] = entry
dst.entries = entries
return nil
}
// Remove removes the given digest from the set. An err will be
// returned if the given digest is invalid. If the digest does
// not exist in the set, this operation will be a no-op.
func (dst *Set) Remove(d Digest) error {
if err := d.Validate(); err != nil {
return err
}
dst.mutex.Lock()
defer dst.mutex.Unlock()
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
searchFunc := func(i int) bool {
if dst.entries[i].val == entry.val {
return dst.entries[i].alg >= entry.alg
}
return dst.entries[i].val >= entry.val
}
idx := sort.Search(len(dst.entries), searchFunc)
// Not found if idx is after or value at idx is not digest
if idx == len(dst.entries) || dst.entries[idx].digest != d {
return nil
}
entries := dst.entries
copy(entries[idx:], entries[idx+1:])
entries = entries[:len(entries)-1]
dst.entries = entries
return nil
}
// All returns all the digests in the set
func (dst *Set) All() []Digest {
dst.mutex.RLock()
defer dst.mutex.RUnlock()
retValues := make([]Digest, len(dst.entries))
for i := range dst.entries {
retValues[i] = dst.entries[i].digest
}
return retValues
}
// ShortCodeTable returns a map of Digest to unique short codes. The
// length represents the minimum value, the maximum length may be the
// entire value of digest if uniqueness cannot be achieved without the
// full value. This function will attempt to make short codes as short
// as possible to be unique.
func ShortCodeTable(dst *Set, length int) map[Digest]string {
dst.mutex.RLock()
defer dst.mutex.RUnlock()
m := make(map[Digest]string, len(dst.entries))
l := length
resetIdx := 0
for i := 0; i < len(dst.entries); i++ {
var short string
extended := true
for extended {
extended = false
if len(dst.entries[i].val) <= l {
short = dst.entries[i].digest.String()
} else {
short = dst.entries[i].val[:l]
for j := i + 1; j < len(dst.entries); j++ {
if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
if j > resetIdx {
resetIdx = j
}
extended = true
} else {
break
}
}
if extended {
l++
}
}
}
m[dst.entries[i].digest] = short
if i >= resetIdx {
l = length
}
}
return m
}
type digestEntry struct {
alg Algorithm
val string
digest Digest
}
type digestEntries []*digestEntry
func (d digestEntries) Len() int {
return len(d)
}
func (d digestEntries) Less(i, j int) bool {
if d[i].val != d[j].val {
return d[i].val < d[j].val
}
return d[i].alg < d[j].alg
}
func (d digestEntries) Swap(i, j int) {
d[i], d[j] = d[j], d[i]
}

View File

@ -0,0 +1,44 @@
package digest
import (
"hash"
"io"
)
// Verifier presents a general verification interface to be used with message
// digests and other byte stream verifications. Users instantiate a Verifier
// from one of the various methods, write the data under test to it then check
// the result with the Verified method.
type Verifier interface {
io.Writer
// Verified will return true if the content written to Verifier matches
// the digest.
Verified() bool
}
// NewDigestVerifier returns a verifier that compares the written bytes
// against a passed in digest.
func NewDigestVerifier(d Digest) (Verifier, error) {
if err := d.Validate(); err != nil {
return nil, err
}
return hashVerifier{
hash: d.Algorithm().Hash(),
digest: d,
}, nil
}
type hashVerifier struct {
digest Digest
hash hash.Hash
}
func (hv hashVerifier) Write(p []byte) (n int, err error) {
return hv.hash.Write(p)
}
func (hv hashVerifier) Verified() bool {
return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash)
}

7
vendor/github.com/docker/distribution/doc.go generated vendored Normal file
View File

@ -0,0 +1,7 @@
// Package distribution will define the interfaces for the components of
// docker distribution. The goal is to allow users to reliably package, ship
// and store content related to docker images.
//
// This is currently a work in progress. More details are available in the
// README.md.
package distribution

115
vendor/github.com/docker/distribution/errors.go generated vendored Normal file
View File

@ -0,0 +1,115 @@
package distribution
import (
"errors"
"fmt"
"strings"
"github.com/docker/distribution/digest"
)
// ErrAccessDenied is returned when an access to a requested resource is
// denied.
var ErrAccessDenied = errors.New("access denied")
// ErrManifestNotModified is returned when a conditional manifest GetByTag
// returns nil due to the client indicating it has the latest version
var ErrManifestNotModified = errors.New("manifest not modified")
// ErrUnsupported is returned when an unimplemented or unsupported action is
// performed
var ErrUnsupported = errors.New("operation unsupported")
// ErrTagUnknown is returned if the given tag is not known by the tag service
type ErrTagUnknown struct {
Tag string
}
func (err ErrTagUnknown) Error() string {
return fmt.Sprintf("unknown tag=%s", err.Tag)
}
// ErrRepositoryUnknown is returned if the named repository is not known by
// the registry.
type ErrRepositoryUnknown struct {
Name string
}
func (err ErrRepositoryUnknown) Error() string {
return fmt.Sprintf("unknown repository name=%s", err.Name)
}
// ErrRepositoryNameInvalid should be used to denote an invalid repository
// name. Reason may set, indicating the cause of invalidity.
type ErrRepositoryNameInvalid struct {
Name string
Reason error
}
func (err ErrRepositoryNameInvalid) Error() string {
return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason)
}
// ErrManifestUnknown is returned if the manifest is not known by the
// registry.
type ErrManifestUnknown struct {
Name string
Tag string
}
func (err ErrManifestUnknown) Error() string {
return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag)
}
// ErrManifestUnknownRevision is returned when a manifest cannot be found by
// revision within a repository.
type ErrManifestUnknownRevision struct {
Name string
Revision digest.Digest
}
func (err ErrManifestUnknownRevision) Error() string {
return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision)
}
// ErrManifestUnverified is returned when the registry is unable to verify
// the manifest.
type ErrManifestUnverified struct{}
func (ErrManifestUnverified) Error() string {
return fmt.Sprintf("unverified manifest")
}
// ErrManifestVerification provides a type to collect errors encountered
// during manifest verification. Currently, it accepts errors of all types,
// but it may be narrowed to those involving manifest verification.
type ErrManifestVerification []error
func (errs ErrManifestVerification) Error() string {
var parts []string
for _, err := range errs {
parts = append(parts, err.Error())
}
return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ","))
}
// ErrManifestBlobUnknown returned when a referenced blob cannot be found.
type ErrManifestBlobUnknown struct {
Digest digest.Digest
}
func (err ErrManifestBlobUnknown) Error() string {
return fmt.Sprintf("unknown blob %v on manifest", err.Digest)
}
// ErrManifestNameInvalid should be used to denote an invalid manifest
// name. Reason may set, indicating the cause of invalidity.
type ErrManifestNameInvalid struct {
Name string
Reason error
}
func (err ErrManifestNameInvalid) Error() string {
return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason)
}

View File

@ -0,0 +1 @@
package manifest

View File

@ -0,0 +1,283 @@
package schema1
import (
"crypto/sha512"
"encoding/json"
"errors"
"fmt"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
"github.com/docker/libtrust"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest"
)
type diffID digest.Digest
// gzippedEmptyTar is a gzip-compressed version of an empty tar file
// (1024 NULL bytes)
var gzippedEmptyTar = []byte{
31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
}
// digestSHA256GzippedEmptyTar is the canonical sha256 digest of
// gzippedEmptyTar
const digestSHA256GzippedEmptyTar = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
// configManifestBuilder is a type for constructing manifests from an image
// configuration and generic descriptors.
type configManifestBuilder struct {
// bs is a BlobService used to create empty layer tars in the
// blob store if necessary.
bs distribution.BlobService
// pk is the libtrust private key used to sign the final manifest.
pk libtrust.PrivateKey
// configJSON is configuration supplied when the ManifestBuilder was
// created.
configJSON []byte
// ref contains the name and optional tag provided to NewConfigManifestBuilder.
ref reference.Named
// descriptors is the set of descriptors referencing the layers.
descriptors []distribution.Descriptor
// emptyTarDigest is set to a valid digest if an empty tar has been
// put in the blob store; otherwise it is empty.
emptyTarDigest digest.Digest
}
// NewConfigManifestBuilder is used to build new manifests for the current
// schema version from an image configuration and a set of descriptors.
// It takes a BlobService so that it can add an empty tar to the blob store
// if the resulting manifest needs empty layers.
func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, ref reference.Named, configJSON []byte) distribution.ManifestBuilder {
return &configManifestBuilder{
bs: bs,
pk: pk,
configJSON: configJSON,
ref: ref,
}
}
// Build produces a final manifest from the given references
func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) {
type imageRootFS struct {
Type string `json:"type"`
DiffIDs []diffID `json:"diff_ids,omitempty"`
BaseLayer string `json:"base_layer,omitempty"`
}
type imageHistory struct {
Created time.Time `json:"created"`
Author string `json:"author,omitempty"`
CreatedBy string `json:"created_by,omitempty"`
Comment string `json:"comment,omitempty"`
EmptyLayer bool `json:"empty_layer,omitempty"`
}
type imageConfig struct {
RootFS *imageRootFS `json:"rootfs,omitempty"`
History []imageHistory `json:"history,omitempty"`
Architecture string `json:"architecture,omitempty"`
}
var img imageConfig
if err := json.Unmarshal(mb.configJSON, &img); err != nil {
return nil, err
}
if len(img.History) == 0 {
return nil, errors.New("empty history when trying to create schema1 manifest")
}
if len(img.RootFS.DiffIDs) != len(mb.descriptors) {
return nil, errors.New("number of descriptors and number of layers in rootfs must match")
}
// Generate IDs for each layer
// For non-top-level layers, create fake V1Compatibility strings that
// fit the format and don't collide with anything else, but don't
// result in runnable images on their own.
type v1Compatibility struct {
ID string `json:"id"`
Parent string `json:"parent,omitempty"`
Comment string `json:"comment,omitempty"`
Created time.Time `json:"created"`
ContainerConfig struct {
Cmd []string
} `json:"container_config,omitempty"`
Author string `json:"author,omitempty"`
ThrowAway bool `json:"throwaway,omitempty"`
}
fsLayerList := make([]FSLayer, len(img.History))
history := make([]History, len(img.History))
parent := ""
layerCounter := 0
for i, h := range img.History[:len(img.History)-1] {
var blobsum digest.Digest
if h.EmptyLayer {
if blobsum, err = mb.emptyTar(ctx); err != nil {
return nil, err
}
} else {
if len(img.RootFS.DiffIDs) <= layerCounter {
return nil, errors.New("too many non-empty layers in History section")
}
blobsum = mb.descriptors[layerCounter].Digest
layerCounter++
}
v1ID := digest.FromBytes([]byte(blobsum.Hex() + " " + parent)).Hex()
if i == 0 && img.RootFS.BaseLayer != "" {
// windows-only baselayer setup
baseID := sha512.Sum384([]byte(img.RootFS.BaseLayer))
parent = fmt.Sprintf("%x", baseID[:32])
}
v1Compatibility := v1Compatibility{
ID: v1ID,
Parent: parent,
Comment: h.Comment,
Created: h.Created,
Author: h.Author,
}
v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy}
if h.EmptyLayer {
v1Compatibility.ThrowAway = true
}
jsonBytes, err := json.Marshal(&v1Compatibility)
if err != nil {
return nil, err
}
reversedIndex := len(img.History) - i - 1
history[reversedIndex].V1Compatibility = string(jsonBytes)
fsLayerList[reversedIndex] = FSLayer{BlobSum: blobsum}
parent = v1ID
}
latestHistory := img.History[len(img.History)-1]
var blobsum digest.Digest
if latestHistory.EmptyLayer {
if blobsum, err = mb.emptyTar(ctx); err != nil {
return nil, err
}
} else {
if len(img.RootFS.DiffIDs) <= layerCounter {
return nil, errors.New("too many non-empty layers in History section")
}
blobsum = mb.descriptors[layerCounter].Digest
}
fsLayerList[0] = FSLayer{BlobSum: blobsum}
dgst := digest.FromBytes([]byte(blobsum.Hex() + " " + parent + " " + string(mb.configJSON)))
// Top-level v1compatibility string should be a modified version of the
// image config.
transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Hex(), parent, latestHistory.EmptyLayer)
if err != nil {
return nil, err
}
history[0].V1Compatibility = string(transformedConfig)
tag := ""
if tagged, isTagged := mb.ref.(reference.Tagged); isTagged {
tag = tagged.Tag()
}
mfst := Manifest{
Versioned: manifest.Versioned{
SchemaVersion: 1,
},
Name: mb.ref.Name(),
Tag: tag,
Architecture: img.Architecture,
FSLayers: fsLayerList,
History: history,
}
return Sign(&mfst, mb.pk)
}
// emptyTar pushes a compressed empty tar to the blob store if one doesn't
// already exist, and returns its blobsum.
func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, error) {
if mb.emptyTarDigest != "" {
// Already put an empty tar
return mb.emptyTarDigest, nil
}
descriptor, err := mb.bs.Stat(ctx, digestSHA256GzippedEmptyTar)
switch err {
case nil:
mb.emptyTarDigest = descriptor.Digest
return descriptor.Digest, nil
case distribution.ErrBlobUnknown:
// nop
default:
return "", err
}
// Add gzipped empty tar to the blob store
descriptor, err = mb.bs.Put(ctx, "", gzippedEmptyTar)
if err != nil {
return "", err
}
mb.emptyTarDigest = descriptor.Digest
return descriptor.Digest, nil
}
// AppendReference adds a reference to the current ManifestBuilder
func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error {
// todo: verification here?
mb.descriptors = append(mb.descriptors, d.Descriptor())
return nil
}
// References returns the current references added to this builder
func (mb *configManifestBuilder) References() []distribution.Descriptor {
return mb.descriptors
}
// MakeV1ConfigFromConfig creates an legacy V1 image config from image config JSON
func MakeV1ConfigFromConfig(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
// Top-level v1compatibility string should be a modified version of the
// image config.
var configAsMap map[string]*json.RawMessage
if err := json.Unmarshal(configJSON, &configAsMap); err != nil {
return nil, err
}
// Delete fields that didn't exist in old manifest
delete(configAsMap, "rootfs")
delete(configAsMap, "history")
configAsMap["id"] = rawJSON(v1ID)
if parentV1ID != "" {
configAsMap["parent"] = rawJSON(parentV1ID)
}
if throwaway {
configAsMap["throwaway"] = rawJSON(true)
}
return json.Marshal(configAsMap)
}
func rawJSON(value interface{}) *json.RawMessage {
jsonval, err := json.Marshal(value)
if err != nil {
return nil
}
return (*json.RawMessage)(&jsonval)
}

View File

@ -0,0 +1,184 @@
package schema1
import (
"encoding/json"
"fmt"
"github.com/docker/distribution"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest"
"github.com/docker/libtrust"
)
const (
// MediaTypeManifest specifies the mediaType for the current version. Note
// that for schema version 1, the the media is optionally "application/json".
MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json"
// MediaTypeSignedManifest specifies the mediatype for current SignedManifest version
MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws"
// MediaTypeManifestLayer specifies the media type for manifest layers
MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar"
)
var (
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
SchemaVersion = manifest.Versioned{
SchemaVersion: 1,
}
)
func init() {
schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
sm := new(SignedManifest)
err := sm.UnmarshalJSON(b)
if err != nil {
return nil, distribution.Descriptor{}, err
}
desc := distribution.Descriptor{
Digest: digest.FromBytes(sm.Canonical),
Size: int64(len(sm.Canonical)),
MediaType: MediaTypeSignedManifest,
}
return sm, desc, err
}
err := distribution.RegisterManifestSchema(MediaTypeSignedManifest, schema1Func)
if err != nil {
panic(fmt.Sprintf("Unable to register manifest: %s", err))
}
err = distribution.RegisterManifestSchema("", schema1Func)
if err != nil {
panic(fmt.Sprintf("Unable to register manifest: %s", err))
}
err = distribution.RegisterManifestSchema("application/json", schema1Func)
if err != nil {
panic(fmt.Sprintf("Unable to register manifest: %s", err))
}
}
// FSLayer is a container struct for BlobSums defined in an image manifest
type FSLayer struct {
// BlobSum is the tarsum of the referenced filesystem image layer
BlobSum digest.Digest `json:"blobSum"`
}
// History stores unstructured v1 compatibility information
type History struct {
// V1Compatibility is the raw v1 compatibility information
V1Compatibility string `json:"v1Compatibility"`
}
// Manifest provides the base accessible fields for working with V2 image
// format in the registry.
type Manifest struct {
manifest.Versioned
// Name is the name of the image's repository
Name string `json:"name"`
// Tag is the tag of the image specified by this manifest
Tag string `json:"tag"`
// Architecture is the host architecture on which this image is intended to
// run
Architecture string `json:"architecture"`
// FSLayers is a list of filesystem layer blobSums contained in this image
FSLayers []FSLayer `json:"fsLayers"`
// History is a list of unstructured historical data for v1 compatibility
History []History `json:"history"`
}
// SignedManifest provides an envelope for a signed image manifest, including
// the format sensitive raw bytes.
type SignedManifest struct {
Manifest
// Canonical is the canonical byte representation of the ImageManifest,
// without any attached signatures. The manifest byte
// representation cannot change or it will have to be re-signed.
Canonical []byte `json:"-"`
// all contains the byte representation of the Manifest including signatures
// and is returned by Payload()
all []byte
}
// UnmarshalJSON populates a new SignedManifest struct from JSON data.
func (sm *SignedManifest) UnmarshalJSON(b []byte) error {
sm.all = make([]byte, len(b), len(b))
// store manifest and signatures in all
copy(sm.all, b)
jsig, err := libtrust.ParsePrettySignature(b, "signatures")
if err != nil {
return err
}
// Resolve the payload in the manifest.
bytes, err := jsig.Payload()
if err != nil {
return err
}
// sm.Canonical stores the canonical manifest JSON
sm.Canonical = make([]byte, len(bytes), len(bytes))
copy(sm.Canonical, bytes)
// Unmarshal canonical JSON into Manifest object
var manifest Manifest
if err := json.Unmarshal(sm.Canonical, &manifest); err != nil {
return err
}
sm.Manifest = manifest
return nil
}
// References returnes the descriptors of this manifests references
func (sm SignedManifest) References() []distribution.Descriptor {
dependencies := make([]distribution.Descriptor, len(sm.FSLayers))
for i, fsLayer := range sm.FSLayers {
dependencies[i] = distribution.Descriptor{
MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar",
Digest: fsLayer.BlobSum,
}
}
return dependencies
}
// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner
// contents. Applications requiring a marshaled signed manifest should simply
// use Raw directly, since the the content produced by json.Marshal will be
// compacted and will fail signature checks.
func (sm *SignedManifest) MarshalJSON() ([]byte, error) {
if len(sm.all) > 0 {
return sm.all, nil
}
// If the raw data is not available, just dump the inner content.
return json.Marshal(&sm.Manifest)
}
// Payload returns the signed content of the signed manifest.
func (sm SignedManifest) Payload() (string, []byte, error) {
return MediaTypeSignedManifest, sm.all, nil
}
// Signatures returns the signatures as provided by
// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws
// signatures.
func (sm *SignedManifest) Signatures() ([][]byte, error) {
jsig, err := libtrust.ParsePrettySignature(sm.all, "signatures")
if err != nil {
return nil, err
}
// Resolve the payload in the manifest.
return jsig.Signatures()
}

View File

@ -0,0 +1,98 @@
package schema1
import (
"fmt"
"errors"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest"
"github.com/docker/distribution/reference"
"github.com/docker/libtrust"
)
// referenceManifestBuilder is a type for constructing manifests from schema1
// dependencies.
type referenceManifestBuilder struct {
Manifest
pk libtrust.PrivateKey
}
// NewReferenceManifestBuilder is used to build new manifests for the current
// schema version using schema1 dependencies.
func NewReferenceManifestBuilder(pk libtrust.PrivateKey, ref reference.Named, architecture string) distribution.ManifestBuilder {
tag := ""
if tagged, isTagged := ref.(reference.Tagged); isTagged {
tag = tagged.Tag()
}
return &referenceManifestBuilder{
Manifest: Manifest{
Versioned: manifest.Versioned{
SchemaVersion: 1,
},
Name: ref.Name(),
Tag: tag,
Architecture: architecture,
},
pk: pk,
}
}
func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Manifest, error) {
m := mb.Manifest
if len(m.FSLayers) == 0 {
return nil, errors.New("cannot build manifest with zero layers or history")
}
m.FSLayers = make([]FSLayer, len(mb.Manifest.FSLayers))
m.History = make([]History, len(mb.Manifest.History))
copy(m.FSLayers, mb.Manifest.FSLayers)
copy(m.History, mb.Manifest.History)
return Sign(&m, mb.pk)
}
// AppendReference adds a reference to the current ManifestBuilder
func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error {
r, ok := d.(Reference)
if !ok {
return fmt.Errorf("Unable to add non-reference type to v1 builder")
}
// Entries need to be prepended
mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...)
mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...)
return nil
}
// References returns the current references added to this builder
func (mb *referenceManifestBuilder) References() []distribution.Descriptor {
refs := make([]distribution.Descriptor, len(mb.Manifest.FSLayers))
for i := range mb.Manifest.FSLayers {
layerDigest := mb.Manifest.FSLayers[i].BlobSum
history := mb.Manifest.History[i]
ref := Reference{layerDigest, 0, history}
refs[i] = ref.Descriptor()
}
return refs
}
// Reference describes a manifest v2, schema version 1 dependency.
// An FSLayer associated with a history entry.
type Reference struct {
Digest digest.Digest
Size int64 // if we know it, set it for the descriptor.
History History
}
// Descriptor describes a reference
func (r Reference) Descriptor() distribution.Descriptor {
return distribution.Descriptor{
MediaType: MediaTypeManifestLayer,
Digest: r.Digest,
Size: r.Size,
}
}

View File

@ -0,0 +1,68 @@
package schema1
import (
"crypto/x509"
"encoding/json"
"github.com/docker/libtrust"
)
// Sign signs the manifest with the provided private key, returning a
// SignedManifest. This typically won't be used within the registry, except
// for testing.
func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) {
p, err := json.MarshalIndent(m, "", " ")
if err != nil {
return nil, err
}
js, err := libtrust.NewJSONSignature(p)
if err != nil {
return nil, err
}
if err := js.Sign(pk); err != nil {
return nil, err
}
pretty, err := js.PrettySignature("signatures")
if err != nil {
return nil, err
}
return &SignedManifest{
Manifest: *m,
all: pretty,
Canonical: p,
}, nil
}
// SignWithChain signs the manifest with the given private key and x509 chain.
// The public key of the first element in the chain must be the public key
// corresponding with the sign key.
func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) {
p, err := json.MarshalIndent(m, "", " ")
if err != nil {
return nil, err
}
js, err := libtrust.NewJSONSignature(p)
if err != nil {
return nil, err
}
if err := js.SignWithChain(key, chain); err != nil {
return nil, err
}
pretty, err := js.PrettySignature("signatures")
if err != nil {
return nil, err
}
return &SignedManifest{
Manifest: *m,
all: pretty,
Canonical: p,
}, nil
}

View File

@ -0,0 +1,32 @@
package schema1
import (
"crypto/x509"
"github.com/Sirupsen/logrus"
"github.com/docker/libtrust"
)
// Verify verifies the signature of the signed manifest returning the public
// keys used during signing.
func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) {
js, err := libtrust.ParsePrettySignature(sm.all, "signatures")
if err != nil {
logrus.WithField("err", err).Debugf("(*SignedManifest).Verify")
return nil, err
}
return js.Verify()
}
// VerifyChains verifies the signature of the signed manifest against the
// certificate pool returning the list of verified chains. Signatures without
// an x509 chain are not checked.
func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) {
js, err := libtrust.ParsePrettySignature(sm.all, "signatures")
if err != nil {
return nil, err
}
return js.VerifyChains(ca)
}

View File

@ -0,0 +1,80 @@
package schema2
import (
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
)
// builder is a type for constructing manifests.
type builder struct {
// bs is a BlobService used to publish the configuration blob.
bs distribution.BlobService
// configJSON references
configJSON []byte
// layers is a list of layer descriptors that gets built by successive
// calls to AppendReference.
layers []distribution.Descriptor
}
// NewManifestBuilder is used to build new manifests for the current schema
// version. It takes a BlobService so it can publish the configuration blob
// as part of the Build process.
func NewManifestBuilder(bs distribution.BlobService, configJSON []byte) distribution.ManifestBuilder {
mb := &builder{
bs: bs,
configJSON: make([]byte, len(configJSON)),
}
copy(mb.configJSON, configJSON)
return mb
}
// Build produces a final manifest from the given references.
func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) {
m := Manifest{
Versioned: SchemaVersion,
Layers: make([]distribution.Descriptor, len(mb.layers)),
}
copy(m.Layers, mb.layers)
configDigest := digest.FromBytes(mb.configJSON)
var err error
m.Config, err = mb.bs.Stat(ctx, configDigest)
switch err {
case nil:
// Override MediaType, since Put always replaces the specified media
// type with application/octet-stream in the descriptor it returns.
m.Config.MediaType = MediaTypeConfig
return FromStruct(m)
case distribution.ErrBlobUnknown:
// nop
default:
return nil, err
}
// Add config to the blob store
m.Config, err = mb.bs.Put(ctx, MediaTypeConfig, mb.configJSON)
// Override MediaType, since Put always replaces the specified media
// type with application/octet-stream in the descriptor it returns.
m.Config.MediaType = MediaTypeConfig
if err != nil {
return nil, err
}
return FromStruct(m)
}
// AppendReference adds a reference to the current ManifestBuilder.
func (mb *builder) AppendReference(d distribution.Describable) error {
mb.layers = append(mb.layers, d.Descriptor())
return nil
}
// References returns the current references added to this builder.
func (mb *builder) References() []distribution.Descriptor {
return mb.layers
}

View File

@ -0,0 +1,128 @@
package schema2
import (
"encoding/json"
"errors"
"fmt"
"github.com/docker/distribution"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest"
)
const (
// MediaTypeManifest specifies the mediaType for the current version.
MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json"
// MediaTypeConfig specifies the mediaType for the image configuration.
MediaTypeConfig = "application/vnd.docker.container.image.v1+json"
// MediaTypeLayer is the mediaType used for layers referenced by the
// manifest.
MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip"
// MediaTypeForeignLayer is the mediaType used for layers that must be
// downloaded from foreign URLs.
MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
)
var (
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
SchemaVersion = manifest.Versioned{
SchemaVersion: 2,
MediaType: MediaTypeManifest,
}
)
func init() {
schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
m := new(DeserializedManifest)
err := m.UnmarshalJSON(b)
if err != nil {
return nil, distribution.Descriptor{}, err
}
dgst := digest.FromBytes(b)
return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err
}
err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func)
if err != nil {
panic(fmt.Sprintf("Unable to register manifest: %s", err))
}
}
// Manifest defines a schema2 manifest.
type Manifest struct {
manifest.Versioned
// Config references the image configuration as a blob.
Config distribution.Descriptor `json:"config"`
// Layers lists descriptors for the layers referenced by the
// configuration.
Layers []distribution.Descriptor `json:"layers"`
}
// References returnes the descriptors of this manifests references.
func (m Manifest) References() []distribution.Descriptor {
return m.Layers
}
// Target returns the target of this signed manifest.
func (m Manifest) Target() distribution.Descriptor {
return m.Config
}
// DeserializedManifest wraps Manifest with a copy of the original JSON.
// It satisfies the distribution.Manifest interface.
type DeserializedManifest struct {
Manifest
// canonical is the canonical byte representation of the Manifest.
canonical []byte
}
// FromStruct takes a Manifest structure, marshals it to JSON, and returns a
// DeserializedManifest which contains the manifest and its JSON representation.
func FromStruct(m Manifest) (*DeserializedManifest, error) {
var deserialized DeserializedManifest
deserialized.Manifest = m
var err error
deserialized.canonical, err = json.MarshalIndent(&m, "", " ")
return &deserialized, err
}
// UnmarshalJSON populates a new Manifest struct from JSON data.
func (m *DeserializedManifest) UnmarshalJSON(b []byte) error {
m.canonical = make([]byte, len(b), len(b))
// store manifest in canonical
copy(m.canonical, b)
// Unmarshal canonical JSON into Manifest object
var manifest Manifest
if err := json.Unmarshal(m.canonical, &manifest); err != nil {
return err
}
m.Manifest = manifest
return nil
}
// MarshalJSON returns the contents of canonical. If canonical is empty,
// marshals the inner contents.
func (m *DeserializedManifest) MarshalJSON() ([]byte, error) {
if len(m.canonical) > 0 {
return m.canonical, nil
}
return nil, errors.New("JSON representation not initialized in DeserializedManifest")
}
// Payload returns the raw content of the manifest. The contents can be used to
// calculate the content identifier.
func (m DeserializedManifest) Payload() (string, []byte, error) {
return m.MediaType, m.canonical, nil
}

View File

@ -0,0 +1,12 @@
package manifest
// Versioned provides a struct with the manifest schemaVersion and . Incoming
// content with unknown schema version can be decoded against this struct to
// check the version.
type Versioned struct {
// SchemaVersion is the image manifest schema that this image follows
SchemaVersion int `json:"schemaVersion"`
// MediaType is the media type of this schema.
MediaType string `json:"mediaType,omitempty"`
}

117
vendor/github.com/docker/distribution/manifests.go generated vendored Normal file
View File

@ -0,0 +1,117 @@
package distribution
import (
"fmt"
"mime"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
)
// Manifest represents a registry object specifying a set of
// references and an optional target
type Manifest interface {
// References returns a list of objects which make up this manifest.
// The references are strictly ordered from base to head. A reference
// is anything which can be represented by a distribution.Descriptor
References() []Descriptor
// Payload provides the serialized format of the manifest, in addition to
// the mediatype.
Payload() (mediatype string, payload []byte, err error)
}
// ManifestBuilder creates a manifest allowing one to include dependencies.
// Instances can be obtained from a version-specific manifest package. Manifest
// specific data is passed into the function which creates the builder.
type ManifestBuilder interface {
// Build creates the manifest from his builder.
Build(ctx context.Context) (Manifest, error)
// References returns a list of objects which have been added to this
// builder. The dependencies are returned in the order they were added,
// which should be from base to head.
References() []Descriptor
// AppendReference includes the given object in the manifest after any
// existing dependencies. If the add fails, such as when adding an
// unsupported dependency, an error may be returned.
AppendReference(dependency Describable) error
}
// ManifestService describes operations on image manifests.
type ManifestService interface {
// Exists returns true if the manifest exists.
Exists(ctx context.Context, dgst digest.Digest) (bool, error)
// Get retrieves the manifest specified by the given digest
Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error)
// Put creates or updates the given manifest returning the manifest digest
Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error)
// Delete removes the manifest specified by the given digest. Deleting
// a manifest that doesn't exist will return ErrManifestNotFound
Delete(ctx context.Context, dgst digest.Digest) error
}
// ManifestEnumerator enables iterating over manifests
type ManifestEnumerator interface {
// Enumerate calls ingester for each manifest.
Enumerate(ctx context.Context, ingester func(digest.Digest) error) error
}
// Describable is an interface for descriptors
type Describable interface {
Descriptor() Descriptor
}
// ManifestMediaTypes returns the supported media types for manifests.
func ManifestMediaTypes() (mediaTypes []string) {
for t := range mappings {
if t != "" {
mediaTypes = append(mediaTypes, t)
}
}
return
}
// UnmarshalFunc implements manifest unmarshalling a given MediaType
type UnmarshalFunc func([]byte) (Manifest, Descriptor, error)
var mappings = make(map[string]UnmarshalFunc, 0)
// UnmarshalManifest looks up manifest unmarshal functions based on
// MediaType
func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) {
// Need to look up by the actual media type, not the raw contents of
// the header. Strip semicolons and anything following them.
var mediatype string
if ctHeader != "" {
var err error
mediatype, _, err = mime.ParseMediaType(ctHeader)
if err != nil {
return nil, Descriptor{}, err
}
}
unmarshalFunc, ok := mappings[mediatype]
if !ok {
unmarshalFunc, ok = mappings[""]
if !ok {
return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype and no default available: %s", mediatype)
}
}
return unmarshalFunc(p)
}
// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This
// should be called from specific
func RegisterManifestSchema(mediatype string, u UnmarshalFunc) error {
if _, ok := mappings[mediatype]; ok {
return fmt.Errorf("manifest mediatype registration would overwrite existing: %s", mediatype)
}
mappings[mediatype] = u
return nil
}

View File

@ -0,0 +1,334 @@
// Package reference provides a general type to represent any way of referencing images within the registry.
// Its main purpose is to abstract tags and digests (content-addressable hash).
//
// Grammar
//
// reference := name [ ":" tag ] [ "@" digest ]
// name := [hostname '/'] component ['/' component]*
// hostname := hostcomponent ['.' hostcomponent]* [':' port-number]
// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
// port-number := /[0-9]+/
// component := alpha-numeric [separator alpha-numeric]*
// alpha-numeric := /[a-z0-9]+/
// separator := /[_.]|__|[-]*/
//
// tag := /[\w][\w.-]{0,127}/
//
// digest := digest-algorithm ":" digest-hex
// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]
// digest-algorithm-separator := /[+.-_]/
// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
package reference
import (
"errors"
"fmt"
"github.com/docker/distribution/digest"
)
const (
// NameTotalLengthMax is the maximum total number of characters in a repository name.
NameTotalLengthMax = 255
)
var (
// ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
ErrReferenceInvalidFormat = errors.New("invalid reference format")
// ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
ErrTagInvalidFormat = errors.New("invalid tag format")
// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
ErrDigestInvalidFormat = errors.New("invalid digest format")
// ErrNameEmpty is returned for empty, invalid repository names.
ErrNameEmpty = errors.New("repository name must have at least one component")
// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
)
// Reference is an opaque object reference identifier that may include
// modifiers such as a hostname, name, tag, and digest.
type Reference interface {
// String returns the full reference
String() string
}
// Field provides a wrapper type for resolving correct reference types when
// working with encoding.
type Field struct {
reference Reference
}
// AsField wraps a reference in a Field for encoding.
func AsField(reference Reference) Field {
return Field{reference}
}
// Reference unwraps the reference type from the field to
// return the Reference object. This object should be
// of the appropriate type to further check for different
// reference types.
func (f Field) Reference() Reference {
return f.reference
}
// MarshalText serializes the field to byte text which
// is the string of the reference.
func (f Field) MarshalText() (p []byte, err error) {
return []byte(f.reference.String()), nil
}
// UnmarshalText parses text bytes by invoking the
// reference parser to ensure the appropriately
// typed reference object is wrapped by field.
func (f *Field) UnmarshalText(p []byte) error {
r, err := Parse(string(p))
if err != nil {
return err
}
f.reference = r
return nil
}
// Named is an object with a full name
type Named interface {
Reference
Name() string
}
// Tagged is an object which has a tag
type Tagged interface {
Reference
Tag() string
}
// NamedTagged is an object including a name and tag.
type NamedTagged interface {
Named
Tag() string
}
// Digested is an object which has a digest
// in which it can be referenced by
type Digested interface {
Reference
Digest() digest.Digest
}
// Canonical reference is an object with a fully unique
// name including a name with hostname and digest
type Canonical interface {
Named
Digest() digest.Digest
}
// SplitHostname splits a named reference into a
// hostname and name string. If no valid hostname is
// found, the hostname is empty and the full value
// is returned as name
func SplitHostname(named Named) (string, string) {
name := named.Name()
match := anchoredNameRegexp.FindStringSubmatch(name)
if match == nil || len(match) != 3 {
return "", name
}
return match[1], match[2]
}
// Parse parses s and returns a syntactically valid Reference.
// If an error was encountered it is returned, along with a nil Reference.
// NOTE: Parse will not handle short digests.
func Parse(s string) (Reference, error) {
matches := ReferenceRegexp.FindStringSubmatch(s)
if matches == nil {
if s == "" {
return nil, ErrNameEmpty
}
// TODO(dmcgowan): Provide more specific and helpful error
return nil, ErrReferenceInvalidFormat
}
if len(matches[1]) > NameTotalLengthMax {
return nil, ErrNameTooLong
}
ref := reference{
name: matches[1],
tag: matches[2],
}
if matches[3] != "" {
var err error
ref.digest, err = digest.ParseDigest(matches[3])
if err != nil {
return nil, err
}
}
r := getBestReferenceType(ref)
if r == nil {
return nil, ErrNameEmpty
}
return r, nil
}
// ParseNamed parses s and returns a syntactically valid reference implementing
// the Named interface. The reference must have a name, otherwise an error is
// returned.
// If an error was encountered it is returned, along with a nil Reference.
// NOTE: ParseNamed will not handle short digests.
func ParseNamed(s string) (Named, error) {
ref, err := Parse(s)
if err != nil {
return nil, err
}
named, isNamed := ref.(Named)
if !isNamed {
return nil, fmt.Errorf("reference %s has no name", ref.String())
}
return named, nil
}
// WithName returns a named object representing the given string. If the input
// is invalid ErrReferenceInvalidFormat will be returned.
func WithName(name string) (Named, error) {
if len(name) > NameTotalLengthMax {
return nil, ErrNameTooLong
}
if !anchoredNameRegexp.MatchString(name) {
return nil, ErrReferenceInvalidFormat
}
return repository(name), nil
}
// WithTag combines the name from "name" and the tag from "tag" to form a
// reference incorporating both the name and the tag.
func WithTag(name Named, tag string) (NamedTagged, error) {
if !anchoredTagRegexp.MatchString(tag) {
return nil, ErrTagInvalidFormat
}
return taggedReference{
name: name.Name(),
tag: tag,
}, nil
}
// WithDigest combines the name from "name" and the digest from "digest" to form
// a reference incorporating both the name and the digest.
func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
if !anchoredDigestRegexp.MatchString(digest.String()) {
return nil, ErrDigestInvalidFormat
}
return canonicalReference{
name: name.Name(),
digest: digest,
}, nil
}
func getBestReferenceType(ref reference) Reference {
if ref.name == "" {
// Allow digest only references
if ref.digest != "" {
return digestReference(ref.digest)
}
return nil
}
if ref.tag == "" {
if ref.digest != "" {
return canonicalReference{
name: ref.name,
digest: ref.digest,
}
}
return repository(ref.name)
}
if ref.digest == "" {
return taggedReference{
name: ref.name,
tag: ref.tag,
}
}
return ref
}
type reference struct {
name string
tag string
digest digest.Digest
}
func (r reference) String() string {
return r.name + ":" + r.tag + "@" + r.digest.String()
}
func (r reference) Name() string {
return r.name
}
func (r reference) Tag() string {
return r.tag
}
func (r reference) Digest() digest.Digest {
return r.digest
}
type repository string
func (r repository) String() string {
return string(r)
}
func (r repository) Name() string {
return string(r)
}
type digestReference digest.Digest
func (d digestReference) String() string {
return d.String()
}
func (d digestReference) Digest() digest.Digest {
return digest.Digest(d)
}
type taggedReference struct {
name string
tag string
}
func (t taggedReference) String() string {
return t.name + ":" + t.tag
}
func (t taggedReference) Name() string {
return t.name
}
func (t taggedReference) Tag() string {
return t.tag
}
type canonicalReference struct {
name string
digest digest.Digest
}
func (c canonicalReference) String() string {
return c.name + "@" + c.digest.String()
}
func (c canonicalReference) Name() string {
return c.name
}
func (c canonicalReference) Digest() digest.Digest {
return c.digest
}

View File

@ -0,0 +1,124 @@
package reference
import "regexp"
var (
// alphaNumericRegexp defines the alpha numeric atom, typically a
// component of names. This only allows lower case characters and digits.
alphaNumericRegexp = match(`[a-z0-9]+`)
// separatorRegexp defines the separators allowed to be embedded in name
// components. This allow one period, one or two underscore and multiple
// dashes.
separatorRegexp = match(`(?:[._]|__|[-]*)`)
// nameComponentRegexp restricts registry path component names to start
// with at least one letter or number, with following parts able to be
// separated by one period, one or two underscore and multiple dashes.
nameComponentRegexp = expression(
alphaNumericRegexp,
optional(repeated(separatorRegexp, alphaNumericRegexp)))
// hostnameComponentRegexp restricts the registry hostname component of a
// repository name to start with a component as defined by hostnameRegexp
// and followed by an optional port.
hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
// hostnameRegexp defines the structure of potential hostname components
// that may be part of image names. This is purposely a subset of what is
// allowed by DNS to ensure backwards compatibility with Docker image
// names.
hostnameRegexp = expression(
hostnameComponentRegexp,
optional(repeated(literal(`.`), hostnameComponentRegexp)),
optional(literal(`:`), match(`[0-9]+`)))
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
TagRegexp = match(`[\w][\w.-]{0,127}`)
// anchoredTagRegexp matches valid tag names, anchored at the start and
// end of the matched string.
anchoredTagRegexp = anchored(TagRegexp)
// DigestRegexp matches valid digests.
DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
// anchoredDigestRegexp matches valid digests, anchored at the start and
// end of the matched string.
anchoredDigestRegexp = anchored(DigestRegexp)
// NameRegexp is the format for the name component of references. The
// regexp has capturing groups for the hostname and name part omitting
// the separating forward slash from either.
NameRegexp = expression(
optional(hostnameRegexp, literal(`/`)),
nameComponentRegexp,
optional(repeated(literal(`/`), nameComponentRegexp)))
// anchoredNameRegexp is used to parse a name value, capturing the
// hostname and trailing components.
anchoredNameRegexp = anchored(
optional(capture(hostnameRegexp), literal(`/`)),
capture(nameComponentRegexp,
optional(repeated(literal(`/`), nameComponentRegexp))))
// ReferenceRegexp is the full supported format of a reference. The regexp
// is anchored and has capturing groups for name, tag, and digest
// components.
ReferenceRegexp = anchored(capture(NameRegexp),
optional(literal(":"), capture(TagRegexp)),
optional(literal("@"), capture(DigestRegexp)))
)
// match compiles the string to a regular expression.
var match = regexp.MustCompile
// literal compiles s into a literal regular expression, escaping any regexp
// reserved characters.
func literal(s string) *regexp.Regexp {
re := match(regexp.QuoteMeta(s))
if _, complete := re.LiteralPrefix(); !complete {
panic("must be a literal")
}
return re
}
// expression defines a full expression, where each regular expression must
// follow the previous.
func expression(res ...*regexp.Regexp) *regexp.Regexp {
var s string
for _, re := range res {
s += re.String()
}
return match(s)
}
// optional wraps the expression in a non-capturing group and makes the
// production optional.
func optional(res ...*regexp.Regexp) *regexp.Regexp {
return match(group(expression(res...)).String() + `?`)
}
// repeated wraps the regexp in a non-capturing group to get one or more
// matches.
func repeated(res ...*regexp.Regexp) *regexp.Regexp {
return match(group(expression(res...)).String() + `+`)
}
// group wraps the regexp in a non-capturing group.
func group(res ...*regexp.Regexp) *regexp.Regexp {
return match(`(?:` + expression(res...).String() + `)`)
}
// capture wraps the expression in a capturing group.
func capture(res ...*regexp.Regexp) *regexp.Regexp {
return match(`(` + expression(res...).String() + `)`)
}
// anchored anchors the regular expression by adding start and end delimiters.
func anchored(res ...*regexp.Regexp) *regexp.Regexp {
return match(`^` + expression(res...).String() + `$`)
}

97
vendor/github.com/docker/distribution/registry.go generated vendored Normal file
View File

@ -0,0 +1,97 @@
package distribution
import (
"github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
)
// Scope defines the set of items that match a namespace.
type Scope interface {
// Contains returns true if the name belongs to the namespace.
Contains(name string) bool
}
type fullScope struct{}
func (f fullScope) Contains(string) bool {
return true
}
// GlobalScope represents the full namespace scope which contains
// all other scopes.
var GlobalScope = Scope(fullScope{})
// Namespace represents a collection of repositories, addressable by name.
// Generally, a namespace is backed by a set of one or more services,
// providing facilities such as registry access, trust, and indexing.
type Namespace interface {
// Scope describes the names that can be used with this Namespace. The
// global namespace will have a scope that matches all names. The scope
// effectively provides an identity for the namespace.
Scope() Scope
// Repository should return a reference to the named repository. The
// registry may or may not have the repository but should always return a
// reference.
Repository(ctx context.Context, name reference.Named) (Repository, error)
// Repositories fills 'repos' with a lexigraphically sorted catalog of repositories
// up to the size of 'repos' and returns the value 'n' for the number of entries
// which were filled. 'last' contains an offset in the catalog, and 'err' will be
// set to io.EOF if there are no more entries to obtain.
Repositories(ctx context.Context, repos []string, last string) (n int, err error)
// Blobs returns a blob enumerator to access all blobs
Blobs() BlobEnumerator
// BlobStatter returns a BlobStatter to control
BlobStatter() BlobStatter
}
// RepositoryEnumerator describes an operation to enumerate repositories
type RepositoryEnumerator interface {
Enumerate(ctx context.Context, ingester func(string) error) error
}
// ManifestServiceOption is a function argument for Manifest Service methods
type ManifestServiceOption interface {
Apply(ManifestService) error
}
// WithTag allows a tag to be passed into Put
func WithTag(tag string) ManifestServiceOption {
return WithTagOption{tag}
}
// WithTagOption holds a tag
type WithTagOption struct{ Tag string }
// Apply conforms to the ManifestServiceOption interface
func (o WithTagOption) Apply(m ManifestService) error {
// no implementation
return nil
}
// Repository is a named collection of manifests and layers.
type Repository interface {
// Named returns the name of the repository.
Named() reference.Named
// Manifests returns a reference to this repository's manifest service.
// with the supplied options applied.
Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error)
// Blobs returns a reference to this repository's blob service.
Blobs(ctx context.Context) BlobStore
// TODO(stevvooe): The above BlobStore return can probably be relaxed to
// be a BlobService for use with clients. This will allow such
// implementations to avoid implementing ServeBlob.
// Tags returns a reference to this repositories tag service
Tags(ctx context.Context) TagService
}
// TODO(stevvooe): Must add close methods to all these. May want to change the
// way instances are created to better reflect internal dependency
// relationships.

View File

@ -0,0 +1,267 @@
package errcode
import (
"encoding/json"
"fmt"
"strings"
)
// ErrorCoder is the base interface for ErrorCode and Error allowing
// users of each to just call ErrorCode to get the real ID of each
type ErrorCoder interface {
ErrorCode() ErrorCode
}
// ErrorCode represents the error type. The errors are serialized via strings
// and the integer format may change and should *never* be exported.
type ErrorCode int
var _ error = ErrorCode(0)
// ErrorCode just returns itself
func (ec ErrorCode) ErrorCode() ErrorCode {
return ec
}
// Error returns the ID/Value
func (ec ErrorCode) Error() string {
// NOTE(stevvooe): Cannot use message here since it may have unpopulated args.
return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1))
}
// Descriptor returns the descriptor for the error code.
func (ec ErrorCode) Descriptor() ErrorDescriptor {
d, ok := errorCodeToDescriptors[ec]
if !ok {
return ErrorCodeUnknown.Descriptor()
}
return d
}
// String returns the canonical identifier for this error code.
func (ec ErrorCode) String() string {
return ec.Descriptor().Value
}
// Message returned the human-readable error message for this error code.
func (ec ErrorCode) Message() string {
return ec.Descriptor().Message
}
// MarshalText encodes the receiver into UTF-8-encoded text and returns the
// result.
func (ec ErrorCode) MarshalText() (text []byte, err error) {
return []byte(ec.String()), nil
}
// UnmarshalText decodes the form generated by MarshalText.
func (ec *ErrorCode) UnmarshalText(text []byte) error {
desc, ok := idToDescriptors[string(text)]
if !ok {
desc = ErrorCodeUnknown.Descriptor()
}
*ec = desc.Code
return nil
}
// WithMessage creates a new Error struct based on the passed-in info and
// overrides the Message property.
func (ec ErrorCode) WithMessage(message string) Error {
return Error{
Code: ec,
Message: message,
}
}
// WithDetail creates a new Error struct based on the passed-in info and
// set the Detail property appropriately
func (ec ErrorCode) WithDetail(detail interface{}) Error {
return Error{
Code: ec,
Message: ec.Message(),
}.WithDetail(detail)
}
// WithArgs creates a new Error struct and sets the Args slice
func (ec ErrorCode) WithArgs(args ...interface{}) Error {
return Error{
Code: ec,
Message: ec.Message(),
}.WithArgs(args...)
}
// Error provides a wrapper around ErrorCode with extra Details provided.
type Error struct {
Code ErrorCode `json:"code"`
Message string `json:"message"`
Detail interface{} `json:"detail,omitempty"`
// TODO(duglin): See if we need an "args" property so we can do the
// variable substitution right before showing the message to the user
}
var _ error = Error{}
// ErrorCode returns the ID/Value of this Error
func (e Error) ErrorCode() ErrorCode {
return e.Code
}
// Error returns a human readable representation of the error.
func (e Error) Error() string {
return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message)
}
// WithDetail will return a new Error, based on the current one, but with
// some Detail info added
func (e Error) WithDetail(detail interface{}) Error {
return Error{
Code: e.Code,
Message: e.Message,
Detail: detail,
}
}
// WithArgs uses the passed-in list of interface{} as the substitution
// variables in the Error's Message string, but returns a new Error
func (e Error) WithArgs(args ...interface{}) Error {
return Error{
Code: e.Code,
Message: fmt.Sprintf(e.Code.Message(), args...),
Detail: e.Detail,
}
}
// ErrorDescriptor provides relevant information about a given error code.
type ErrorDescriptor struct {
// Code is the error code that this descriptor describes.
Code ErrorCode
// Value provides a unique, string key, often captilized with
// underscores, to identify the error code. This value is used as the
// keyed value when serializing api errors.
Value string
// Message is a short, human readable decription of the error condition
// included in API responses.
Message string
// Description provides a complete account of the errors purpose, suitable
// for use in documentation.
Description string
// HTTPStatusCode provides the http status code that is associated with
// this error condition.
HTTPStatusCode int
}
// ParseErrorCode returns the value by the string error code.
// `ErrorCodeUnknown` will be returned if the error is not known.
func ParseErrorCode(value string) ErrorCode {
ed, ok := idToDescriptors[value]
if ok {
return ed.Code
}
return ErrorCodeUnknown
}
// Errors provides the envelope for multiple errors and a few sugar methods
// for use within the application.
type Errors []error
var _ error = Errors{}
func (errs Errors) Error() string {
switch len(errs) {
case 0:
return "<nil>"
case 1:
return errs[0].Error()
default:
msg := "errors:\n"
for _, err := range errs {
msg += err.Error() + "\n"
}
return msg
}
}
// Len returns the current number of errors.
func (errs Errors) Len() int {
return len(errs)
}
// MarshalJSON converts slice of error, ErrorCode or Error into a
// slice of Error - then serializes
func (errs Errors) MarshalJSON() ([]byte, error) {
var tmpErrs struct {
Errors []Error `json:"errors,omitempty"`
}
for _, daErr := range errs {
var err Error
switch daErr.(type) {
case ErrorCode:
err = daErr.(ErrorCode).WithDetail(nil)
case Error:
err = daErr.(Error)
default:
err = ErrorCodeUnknown.WithDetail(daErr)
}
// If the Error struct was setup and they forgot to set the
// Message field (meaning its "") then grab it from the ErrCode
msg := err.Message
if msg == "" {
msg = err.Code.Message()
}
tmpErrs.Errors = append(tmpErrs.Errors, Error{
Code: err.Code,
Message: msg,
Detail: err.Detail,
})
}
return json.Marshal(tmpErrs)
}
// UnmarshalJSON deserializes []Error and then converts it into slice of
// Error or ErrorCode
func (errs *Errors) UnmarshalJSON(data []byte) error {
var tmpErrs struct {
Errors []Error
}
if err := json.Unmarshal(data, &tmpErrs); err != nil {
return err
}
var newErrs Errors
for _, daErr := range tmpErrs.Errors {
// If Message is empty or exactly matches the Code's message string
// then just use the Code, no need for a full Error struct
if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) {
// Error's w/o details get converted to ErrorCode
newErrs = append(newErrs, daErr.Code)
} else {
// Error's w/ details are untouched
newErrs = append(newErrs, Error{
Code: daErr.Code,
Message: daErr.Message,
Detail: daErr.Detail,
})
}
}
*errs = newErrs
return nil
}

View File

@ -0,0 +1,44 @@
package errcode
import (
"encoding/json"
"net/http"
)
// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err
// and sets the content-type header to 'application/json'. It will handle
// ErrorCoder and Errors, and if necessary will create an envelope.
func ServeJSON(w http.ResponseWriter, err error) error {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
var sc int
switch errs := err.(type) {
case Errors:
if len(errs) < 1 {
break
}
if err, ok := errs[0].(ErrorCoder); ok {
sc = err.ErrorCode().Descriptor().HTTPStatusCode
}
case ErrorCoder:
sc = errs.ErrorCode().Descriptor().HTTPStatusCode
err = Errors{err} // create an envelope.
default:
// We just have an unhandled error type, so just place in an envelope
// and move along.
err = Errors{err}
}
if sc == 0 {
sc = http.StatusInternalServerError
}
w.WriteHeader(sc)
if err := json.NewEncoder(w).Encode(err); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,138 @@
package errcode
import (
"fmt"
"net/http"
"sort"
"sync"
)
var (
errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{}
idToDescriptors = map[string]ErrorDescriptor{}
groupToDescriptors = map[string][]ErrorDescriptor{}
)
var (
// ErrorCodeUnknown is a generic error that can be used as a last
// resort if there is no situation-specific error message that can be used
ErrorCodeUnknown = Register("errcode", ErrorDescriptor{
Value: "UNKNOWN",
Message: "unknown error",
Description: `Generic error returned when the error does not have an
API classification.`,
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeUnsupported is returned when an operation is not supported.
ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{
Value: "UNSUPPORTED",
Message: "The operation is unsupported.",
Description: `The operation was unsupported due to a missing
implementation or invalid set of parameters.`,
HTTPStatusCode: http.StatusMethodNotAllowed,
})
// ErrorCodeUnauthorized is returned if a request requires
// authentication.
ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{
Value: "UNAUTHORIZED",
Message: "authentication required",
Description: `The access controller was unable to authenticate
the client. Often this will be accompanied by a
Www-Authenticate HTTP response header indicating how to
authenticate.`,
HTTPStatusCode: http.StatusUnauthorized,
})
// ErrorCodeDenied is returned if a client does not have sufficient
// permission to perform an action.
ErrorCodeDenied = Register("errcode", ErrorDescriptor{
Value: "DENIED",
Message: "requested access to the resource is denied",
Description: `The access controller denied access for the
operation on a resource.`,
HTTPStatusCode: http.StatusForbidden,
})
// ErrorCodeUnavailable provides a common error to report unavailability
// of a service or endpoint.
ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{
Value: "UNAVAILABLE",
Message: "service unavailable",
Description: "Returned when a service is not available",
HTTPStatusCode: http.StatusServiceUnavailable,
})
// ErrorCodeTooManyRequests is returned if a client attempts too many
// times to contact a service endpoint.
ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{
Value: "TOOMANYREQUESTS",
Message: "too many requests",
Description: `Returned when a client attempts to contact a
service too many times`,
HTTPStatusCode: http.StatusTooManyRequests,
})
)
var nextCode = 1000
var registerLock sync.Mutex
// Register will make the passed-in error known to the environment and
// return a new ErrorCode
func Register(group string, descriptor ErrorDescriptor) ErrorCode {
registerLock.Lock()
defer registerLock.Unlock()
descriptor.Code = ErrorCode(nextCode)
if _, ok := idToDescriptors[descriptor.Value]; ok {
panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value))
}
if _, ok := errorCodeToDescriptors[descriptor.Code]; ok {
panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code))
}
groupToDescriptors[group] = append(groupToDescriptors[group], descriptor)
errorCodeToDescriptors[descriptor.Code] = descriptor
idToDescriptors[descriptor.Value] = descriptor
nextCode++
return descriptor.Code
}
type byValue []ErrorDescriptor
func (a byValue) Len() int { return len(a) }
func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value }
// GetGroupNames returns the list of Error group names that are registered
func GetGroupNames() []string {
keys := []string{}
for k := range groupToDescriptors {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
// GetErrorCodeGroup returns the named group of error descriptors
func GetErrorCodeGroup(name string) []ErrorDescriptor {
desc := groupToDescriptors[name]
sort.Sort(byValue(desc))
return desc
}
// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are
// registered, irrespective of what group they're in
func GetErrorAllDescriptors() []ErrorDescriptor {
result := []ErrorDescriptor{}
for _, group := range GetGroupNames() {
result = append(result, GetErrorCodeGroup(group)...)
}
sort.Sort(byValue(result))
return result
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,9 @@
// Package v2 describes routes, urls and the error codes used in the Docker
// Registry JSON HTTP API V2. In addition to declarations, descriptors are
// provided for routes and error codes that can be used for implementation and
// automatically generating documentation.
//
// Definitions here are considered to be locked down for the V2 registry api.
// Any changes must be considered carefully and should not proceed without a
// change proposal in docker core.
package v2

View File

@ -0,0 +1,136 @@
package v2
import (
"net/http"
"github.com/docker/distribution/registry/api/errcode"
)
const errGroup = "registry.api.v2"
var (
// ErrorCodeDigestInvalid is returned when uploading a blob if the
// provided digest does not match the blob contents.
ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "DIGEST_INVALID",
Message: "provided digest did not match uploaded content",
Description: `When a blob is uploaded, the registry will check that
the content matches the digest provided by the client. The error may
include a detail structure with the key "digest", including the
invalid digest string. This error may also be returned when a manifest
includes an invalid layer digest.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeSizeInvalid is returned when uploading a blob if the provided
ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "SIZE_INVALID",
Message: "provided length did not match content length",
Description: `When a layer is uploaded, the provided size will be
checked against the uploaded content. If they do not match, this error
will be returned.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeNameInvalid is returned when the name in the manifest does not
// match the provided name.
ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NAME_INVALID",
Message: "invalid repository name",
Description: `Invalid repository name encountered either during
manifest validation or any API operation.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeTagInvalid is returned when the tag in the manifest does not
// match the provided tag.
ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "TAG_INVALID",
Message: "manifest tag did not match URI",
Description: `During a manifest upload, if the tag in the manifest
does not match the uri tag, this error will be returned.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeNameUnknown when the repository name is not known.
ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NAME_UNKNOWN",
Message: "repository name not known to registry",
Description: `This is returned if the name used during an operation is
unknown to the registry.`,
HTTPStatusCode: http.StatusNotFound,
})
// ErrorCodeManifestUnknown returned when image manifest is unknown.
ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "MANIFEST_UNKNOWN",
Message: "manifest unknown",
Description: `This error is returned when the manifest, identified by
name and tag is unknown to the repository.`,
HTTPStatusCode: http.StatusNotFound,
})
// ErrorCodeManifestInvalid returned when an image manifest is invalid,
// typically during a PUT operation. This error encompasses all errors
// encountered during manifest validation that aren't signature errors.
ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "MANIFEST_INVALID",
Message: "manifest invalid",
Description: `During upload, manifests undergo several checks ensuring
validity. If those checks fail, this error may be returned, unless a
more specific error is included. The detail will contain information
the failed validation.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeManifestUnverified is returned when the manifest fails
// signature verification.
ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "MANIFEST_UNVERIFIED",
Message: "manifest failed signature verification",
Description: `During manifest upload, if the manifest fails signature
verification, this error will be returned.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeManifestBlobUnknown is returned when a manifest blob is
// unknown to the registry.
ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "MANIFEST_BLOB_UNKNOWN",
Message: "blob unknown to registry",
Description: `This error may be returned when a manifest blob is
unknown to the registry.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeBlobUnknown is returned when a blob is unknown to the
// registry. This can happen when the manifest references a nonexistent
// layer or the result is not found by a blob fetch.
ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "BLOB_UNKNOWN",
Message: "blob unknown to registry",
Description: `This error may be returned when a blob is unknown to the
registry in a specified repository. This can be returned with a
standard get or if a manifest references an unknown layer during
upload.`,
HTTPStatusCode: http.StatusNotFound,
})
// ErrorCodeBlobUploadUnknown is returned when an upload is unknown.
ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "BLOB_UPLOAD_UNKNOWN",
Message: "blob upload unknown to registry",
Description: `If a blob upload has been cancelled or was never
started, this error code may be returned.`,
HTTPStatusCode: http.StatusNotFound,
})
// ErrorCodeBlobUploadInvalid is returned when an upload is invalid.
ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "BLOB_UPLOAD_INVALID",
Message: "blob upload invalid",
Description: `The blob upload encountered an error and can no
longer proceed.`,
HTTPStatusCode: http.StatusNotFound,
})
)

View File

@ -0,0 +1,49 @@
package v2
import "github.com/gorilla/mux"
// The following are definitions of the name under which all V2 routes are
// registered. These symbols can be used to look up a route based on the name.
const (
RouteNameBase = "base"
RouteNameManifest = "manifest"
RouteNameTags = "tags"
RouteNameBlob = "blob"
RouteNameBlobUpload = "blob-upload"
RouteNameBlobUploadChunk = "blob-upload-chunk"
RouteNameCatalog = "catalog"
)
var allEndpoints = []string{
RouteNameManifest,
RouteNameCatalog,
RouteNameTags,
RouteNameBlob,
RouteNameBlobUpload,
RouteNameBlobUploadChunk,
}
// Router builds a gorilla router with named routes for the various API
// methods. This can be used directly by both server implementations and
// clients.
func Router() *mux.Router {
return RouterWithPrefix("")
}
// RouterWithPrefix builds a gorilla router with a configured prefix
// on all routes.
func RouterWithPrefix(prefix string) *mux.Router {
rootRouter := mux.NewRouter()
router := rootRouter
if prefix != "" {
router = router.PathPrefix(prefix).Subrouter()
}
router.StrictSlash(true)
for _, descriptor := range routeDescriptors {
router.Path(descriptor.Path).Name(descriptor.Name)
}
return rootRouter
}

View File

@ -0,0 +1,251 @@
package v2
import (
"net/http"
"net/url"
"strings"
"github.com/docker/distribution/reference"
"github.com/gorilla/mux"
)
// URLBuilder creates registry API urls from a single base endpoint. It can be
// used to create urls for use in a registry client or server.
//
// All urls will be created from the given base, including the api version.
// For example, if a root of "/foo/" is provided, urls generated will be fall
// under "/foo/v2/...". Most application will only provide a schema, host and
// port, such as "https://localhost:5000/".
type URLBuilder struct {
root *url.URL // url root (ie http://localhost/)
router *mux.Router
relative bool
}
// NewURLBuilder creates a URLBuilder with provided root url object.
func NewURLBuilder(root *url.URL, relative bool) *URLBuilder {
return &URLBuilder{
root: root,
router: Router(),
relative: relative,
}
}
// NewURLBuilderFromString workes identically to NewURLBuilder except it takes
// a string argument for the root, returning an error if it is not a valid
// url.
func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) {
u, err := url.Parse(root)
if err != nil {
return nil, err
}
return NewURLBuilder(u, relative), nil
}
// NewURLBuilderFromRequest uses information from an *http.Request to
// construct the root url.
func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder {
var scheme string
forwardedProto := r.Header.Get("X-Forwarded-Proto")
switch {
case len(forwardedProto) > 0:
scheme = forwardedProto
case r.TLS != nil:
scheme = "https"
case len(r.URL.Scheme) > 0:
scheme = r.URL.Scheme
default:
scheme = "http"
}
host := r.Host
forwardedHost := r.Header.Get("X-Forwarded-Host")
if len(forwardedHost) > 0 {
// According to the Apache mod_proxy docs, X-Forwarded-Host can be a
// comma-separated list of hosts, to which each proxy appends the
// requested host. We want to grab the first from this comma-separated
// list.
hosts := strings.SplitN(forwardedHost, ",", 2)
host = strings.TrimSpace(hosts[0])
}
basePath := routeDescriptorsMap[RouteNameBase].Path
requestPath := r.URL.Path
index := strings.Index(requestPath, basePath)
u := &url.URL{
Scheme: scheme,
Host: host,
}
if index > 0 {
// N.B. index+1 is important because we want to include the trailing /
u.Path = requestPath[0 : index+1]
}
return NewURLBuilder(u, relative)
}
// BuildBaseURL constructs a base url for the API, typically just "/v2/".
func (ub *URLBuilder) BuildBaseURL() (string, error) {
route := ub.cloneRoute(RouteNameBase)
baseURL, err := route.URL()
if err != nil {
return "", err
}
return baseURL.String(), nil
}
// BuildCatalogURL constructs a url get a catalog of repositories
func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) {
route := ub.cloneRoute(RouteNameCatalog)
catalogURL, err := route.URL()
if err != nil {
return "", err
}
return appendValuesURL(catalogURL, values...).String(), nil
}
// BuildTagsURL constructs a url to list the tags in the named repository.
func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) {
route := ub.cloneRoute(RouteNameTags)
tagsURL, err := route.URL("name", name.Name())
if err != nil {
return "", err
}
return tagsURL.String(), nil
}
// BuildManifestURL constructs a url for the manifest identified by name and
// reference. The argument reference may be either a tag or digest.
func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) {
route := ub.cloneRoute(RouteNameManifest)
tagOrDigest := ""
switch v := ref.(type) {
case reference.Tagged:
tagOrDigest = v.Tag()
case reference.Digested:
tagOrDigest = v.Digest().String()
}
manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest)
if err != nil {
return "", err
}
return manifestURL.String(), nil
}
// BuildBlobURL constructs the url for the blob identified by name and dgst.
func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) {
route := ub.cloneRoute(RouteNameBlob)
layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String())
if err != nil {
return "", err
}
return layerURL.String(), nil
}
// BuildBlobUploadURL constructs a url to begin a blob upload in the
// repository identified by name.
func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) {
route := ub.cloneRoute(RouteNameBlobUpload)
uploadURL, err := route.URL("name", name.Name())
if err != nil {
return "", err
}
return appendValuesURL(uploadURL, values...).String(), nil
}
// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid,
// including any url values. This should generally not be used by clients, as
// this url is provided by server implementations during the blob upload
// process.
func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) {
route := ub.cloneRoute(RouteNameBlobUploadChunk)
uploadURL, err := route.URL("name", name.Name(), "uuid", uuid)
if err != nil {
return "", err
}
return appendValuesURL(uploadURL, values...).String(), nil
}
// clondedRoute returns a clone of the named route from the router. Routes
// must be cloned to avoid modifying them during url generation.
func (ub *URLBuilder) cloneRoute(name string) clonedRoute {
route := new(mux.Route)
root := new(url.URL)
*route = *ub.router.GetRoute(name) // clone the route
*root = *ub.root
return clonedRoute{Route: route, root: root, relative: ub.relative}
}
type clonedRoute struct {
*mux.Route
root *url.URL
relative bool
}
func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) {
routeURL, err := cr.Route.URL(pairs...)
if err != nil {
return nil, err
}
if cr.relative {
return routeURL, nil
}
if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" {
routeURL.Path = routeURL.Path[1:]
}
url := cr.root.ResolveReference(routeURL)
url.Scheme = cr.root.Scheme
return url, nil
}
// appendValuesURL appends the parameters to the url.
func appendValuesURL(u *url.URL, values ...url.Values) *url.URL {
merged := u.Query()
for _, v := range values {
for k, vv := range v {
merged[k] = append(merged[k], vv...)
}
}
u.RawQuery = merged.Encode()
return u
}
// appendValues appends the parameters to the url. Panics if the string is not
// a url.
func appendValues(u string, values ...url.Values) string {
up, err := url.Parse(u)
if err != nil {
panic(err) // should never happen
}
return appendValuesURL(up, values...).String()
}

View File

@ -0,0 +1,168 @@
// Package auth defines a standard interface for request access controllers.
//
// An access controller has a simple interface with a single `Authorized`
// method which checks that a given request is authorized to perform one or
// more actions on one or more resources. This method should return a non-nil
// error if the request is not authorized.
//
// An implementation registers its access controller by name with a constructor
// which accepts an options map for configuring the access controller.
//
// options := map[string]interface{}{"sillySecret": "whysosilly?"}
// accessController, _ := auth.GetAccessController("silly", options)
//
// This `accessController` can then be used in a request handler like so:
//
// func updateOrder(w http.ResponseWriter, r *http.Request) {
// orderNumber := r.FormValue("orderNumber")
// resource := auth.Resource{Type: "customerOrder", Name: orderNumber}
// access := auth.Access{Resource: resource, Action: "update"}
//
// if ctx, err := accessController.Authorized(ctx, access); err != nil {
// if challenge, ok := err.(auth.Challenge) {
// // Let the challenge write the response.
// challenge.SetHeaders(w)
// w.WriteHeader(http.StatusUnauthorized)
// return
// } else {
// // Some other error.
// }
// }
// }
//
package auth
import (
"errors"
"fmt"
"net/http"
"github.com/docker/distribution/context"
)
const (
// UserKey is used to get the user object from
// a user context
UserKey = "auth.user"
// UserNameKey is used to get the user name from
// a user context
UserNameKey = "auth.user.name"
)
var (
// ErrInvalidCredential is returned when the auth token does not authenticate correctly.
ErrInvalidCredential = errors.New("invalid authorization credential")
// ErrAuthenticationFailure returned when authentication fails.
ErrAuthenticationFailure = errors.New("authentication failure")
)
// UserInfo carries information about
// an autenticated/authorized client.
type UserInfo struct {
Name string
}
// Resource describes a resource by type and name.
type Resource struct {
Type string
Name string
}
// Access describes a specific action that is
// requested or allowed for a given resource.
type Access struct {
Resource
Action string
}
// Challenge is a special error type which is used for HTTP 401 Unauthorized
// responses and is able to write the response with WWW-Authenticate challenge
// header values based on the error.
type Challenge interface {
error
// SetHeaders prepares the request to conduct a challenge response by
// adding the an HTTP challenge header on the response message. Callers
// are expected to set the appropriate HTTP status code (e.g. 401)
// themselves.
SetHeaders(w http.ResponseWriter)
}
// AccessController controls access to registry resources based on a request
// and required access levels for a request. Implementations can support both
// complete denial and http authorization challenges.
type AccessController interface {
// Authorized returns a non-nil error if the context is granted access and
// returns a new authorized context. If one or more Access structs are
// provided, the requested access will be compared with what is available
// to the context. The given context will contain a "http.request" key with
// a `*http.Request` value. If the error is non-nil, access should always
// be denied. The error may be of type Challenge, in which case the caller
// may have the Challenge handle the request or choose what action to take
// based on the Challenge header or response status. The returned context
// object should have a "auth.user" value set to a UserInfo struct.
Authorized(ctx context.Context, access ...Access) (context.Context, error)
}
// CredentialAuthenticator is an object which is able to authenticate credentials
type CredentialAuthenticator interface {
AuthenticateUser(username, password string) error
}
// WithUser returns a context with the authorized user info.
func WithUser(ctx context.Context, user UserInfo) context.Context {
return userInfoContext{
Context: ctx,
user: user,
}
}
type userInfoContext struct {
context.Context
user UserInfo
}
func (uic userInfoContext) Value(key interface{}) interface{} {
switch key {
case UserKey:
return uic.user
case UserNameKey:
return uic.user.Name
}
return uic.Context.Value(key)
}
// InitFunc is the type of an AccessController factory function and is used
// to register the constructor for different AccesController backends.
type InitFunc func(options map[string]interface{}) (AccessController, error)
var accessControllers map[string]InitFunc
func init() {
accessControllers = make(map[string]InitFunc)
}
// Register is used to register an InitFunc for
// an AccessController backend with the given name.
func Register(name string, initFunc InitFunc) error {
if _, exists := accessControllers[name]; exists {
return fmt.Errorf("name already registered: %s", name)
}
accessControllers[name] = initFunc
return nil
}
// GetAccessController constructs an AccessController
// with the given options using the named backend.
func GetAccessController(name string, options map[string]interface{}) (AccessController, error) {
if initFunc, exists := accessControllers[name]; exists {
return initFunc(options)
}
return nil, fmt.Errorf("no access controller registered with name: %s", name)
}

View File

@ -0,0 +1,268 @@
package token
import (
"crypto"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/auth"
"github.com/docker/libtrust"
)
// accessSet maps a typed, named resource to
// a set of actions requested or authorized.
type accessSet map[auth.Resource]actionSet
// newAccessSet constructs an accessSet from
// a variable number of auth.Access items.
func newAccessSet(accessItems ...auth.Access) accessSet {
accessSet := make(accessSet, len(accessItems))
for _, access := range accessItems {
resource := auth.Resource{
Type: access.Type,
Name: access.Name,
}
set, exists := accessSet[resource]
if !exists {
set = newActionSet()
accessSet[resource] = set
}
set.add(access.Action)
}
return accessSet
}
// contains returns whether or not the given access is in this accessSet.
func (s accessSet) contains(access auth.Access) bool {
actionSet, ok := s[access.Resource]
if ok {
return actionSet.contains(access.Action)
}
return false
}
// scopeParam returns a collection of scopes which can
// be used for a WWW-Authenticate challenge parameter.
// See https://tools.ietf.org/html/rfc6750#section-3
func (s accessSet) scopeParam() string {
scopes := make([]string, 0, len(s))
for resource, actionSet := range s {
actions := strings.Join(actionSet.keys(), ",")
scopes = append(scopes, fmt.Sprintf("%s:%s:%s", resource.Type, resource.Name, actions))
}
return strings.Join(scopes, " ")
}
// Errors used and exported by this package.
var (
ErrInsufficientScope = errors.New("insufficient scope")
ErrTokenRequired = errors.New("authorization token required")
)
// authChallenge implements the auth.Challenge interface.
type authChallenge struct {
err error
realm string
service string
accessSet accessSet
}
var _ auth.Challenge = authChallenge{}
// Error returns the internal error string for this authChallenge.
func (ac authChallenge) Error() string {
return ac.err.Error()
}
// Status returns the HTTP Response Status Code for this authChallenge.
func (ac authChallenge) Status() int {
return http.StatusUnauthorized
}
// challengeParams constructs the value to be used in
// the WWW-Authenticate response challenge header.
// See https://tools.ietf.org/html/rfc6750#section-3
func (ac authChallenge) challengeParams() string {
str := fmt.Sprintf("Bearer realm=%q,service=%q", ac.realm, ac.service)
if scope := ac.accessSet.scopeParam(); scope != "" {
str = fmt.Sprintf("%s,scope=%q", str, scope)
}
if ac.err == ErrInvalidToken || ac.err == ErrMalformedToken {
str = fmt.Sprintf("%s,error=%q", str, "invalid_token")
} else if ac.err == ErrInsufficientScope {
str = fmt.Sprintf("%s,error=%q", str, "insufficient_scope")
}
return str
}
// SetChallenge sets the WWW-Authenticate value for the response.
func (ac authChallenge) SetHeaders(w http.ResponseWriter) {
w.Header().Add("WWW-Authenticate", ac.challengeParams())
}
// accessController implements the auth.AccessController interface.
type accessController struct {
realm string
issuer string
service string
rootCerts *x509.CertPool
trustedKeys map[string]libtrust.PublicKey
}
// tokenAccessOptions is a convenience type for handling
// options to the contstructor of an accessController.
type tokenAccessOptions struct {
realm string
issuer string
service string
rootCertBundle string
}
// checkOptions gathers the necessary options
// for an accessController from the given map.
func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) {
var opts tokenAccessOptions
keys := []string{"realm", "issuer", "service", "rootcertbundle"}
vals := make([]string, 0, len(keys))
for _, key := range keys {
val, ok := options[key].(string)
if !ok {
return opts, fmt.Errorf("token auth requires a valid option string: %q", key)
}
vals = append(vals, val)
}
opts.realm, opts.issuer, opts.service, opts.rootCertBundle = vals[0], vals[1], vals[2], vals[3]
return opts, nil
}
// newAccessController creates an accessController using the given options.
func newAccessController(options map[string]interface{}) (auth.AccessController, error) {
config, err := checkOptions(options)
if err != nil {
return nil, err
}
fp, err := os.Open(config.rootCertBundle)
if err != nil {
return nil, fmt.Errorf("unable to open token auth root certificate bundle file %q: %s", config.rootCertBundle, err)
}
defer fp.Close()
rawCertBundle, err := ioutil.ReadAll(fp)
if err != nil {
return nil, fmt.Errorf("unable to read token auth root certificate bundle file %q: %s", config.rootCertBundle, err)
}
var rootCerts []*x509.Certificate
pemBlock, rawCertBundle := pem.Decode(rawCertBundle)
for pemBlock != nil {
cert, err := x509.ParseCertificate(pemBlock.Bytes)
if err != nil {
return nil, fmt.Errorf("unable to parse token auth root certificate: %s", err)
}
rootCerts = append(rootCerts, cert)
pemBlock, rawCertBundle = pem.Decode(rawCertBundle)
}
if len(rootCerts) == 0 {
return nil, errors.New("token auth requires at least one token signing root certificate")
}
rootPool := x509.NewCertPool()
trustedKeys := make(map[string]libtrust.PublicKey, len(rootCerts))
for _, rootCert := range rootCerts {
rootPool.AddCert(rootCert)
pubKey, err := libtrust.FromCryptoPublicKey(crypto.PublicKey(rootCert.PublicKey))
if err != nil {
return nil, fmt.Errorf("unable to get public key from token auth root certificate: %s", err)
}
trustedKeys[pubKey.KeyID()] = pubKey
}
return &accessController{
realm: config.realm,
issuer: config.issuer,
service: config.service,
rootCerts: rootPool,
trustedKeys: trustedKeys,
}, nil
}
// Authorized handles checking whether the given request is authorized
// for actions on resources described by the given access items.
func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth.Access) (context.Context, error) {
challenge := &authChallenge{
realm: ac.realm,
service: ac.service,
accessSet: newAccessSet(accessItems...),
}
req, err := context.GetRequest(ctx)
if err != nil {
return nil, err
}
parts := strings.Split(req.Header.Get("Authorization"), " ")
if len(parts) != 2 || strings.ToLower(parts[0]) != "bearer" {
challenge.err = ErrTokenRequired
return nil, challenge
}
rawToken := parts[1]
token, err := NewToken(rawToken)
if err != nil {
challenge.err = err
return nil, challenge
}
verifyOpts := VerifyOptions{
TrustedIssuers: []string{ac.issuer},
AcceptedAudiences: []string{ac.service},
Roots: ac.rootCerts,
TrustedKeys: ac.trustedKeys,
}
if err = token.Verify(verifyOpts); err != nil {
challenge.err = err
return nil, challenge
}
accessSet := token.accessSet()
for _, access := range accessItems {
if !accessSet.contains(access) {
challenge.err = ErrInsufficientScope
return nil, challenge
}
}
return auth.WithUser(ctx, auth.UserInfo{Name: token.Claims.Subject}), nil
}
// init handles registering the token auth backend.
func init() {
auth.Register("token", auth.InitFunc(newAccessController))
}

View File

@ -0,0 +1,35 @@
package token
// StringSet is a useful type for looking up strings.
type stringSet map[string]struct{}
// NewStringSet creates a new StringSet with the given strings.
func newStringSet(keys ...string) stringSet {
ss := make(stringSet, len(keys))
ss.add(keys...)
return ss
}
// Add inserts the given keys into this StringSet.
func (ss stringSet) add(keys ...string) {
for _, key := range keys {
ss[key] = struct{}{}
}
}
// Contains returns whether the given key is in this StringSet.
func (ss stringSet) contains(key string) bool {
_, ok := ss[key]
return ok
}
// Keys returns a slice of all keys in this StringSet.
func (ss stringSet) keys() []string {
keys := make([]string, 0, len(ss))
for key := range ss {
keys = append(keys, key)
}
return keys
}

View File

@ -0,0 +1,343 @@
package token
import (
"crypto"
"crypto/x509"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
log "github.com/Sirupsen/logrus"
"github.com/docker/libtrust"
"github.com/docker/distribution/registry/auth"
)
const (
// TokenSeparator is the value which separates the header, claims, and
// signature in the compact serialization of a JSON Web Token.
TokenSeparator = "."
)
// Errors used by token parsing and verification.
var (
ErrMalformedToken = errors.New("malformed token")
ErrInvalidToken = errors.New("invalid token")
)
// ResourceActions stores allowed actions on a named and typed resource.
type ResourceActions struct {
Type string `json:"type"`
Name string `json:"name"`
Actions []string `json:"actions"`
}
// ClaimSet describes the main section of a JSON Web Token.
type ClaimSet struct {
// Public claims
Issuer string `json:"iss"`
Subject string `json:"sub"`
Audience string `json:"aud"`
Expiration int64 `json:"exp"`
NotBefore int64 `json:"nbf"`
IssuedAt int64 `json:"iat"`
JWTID string `json:"jti"`
// Private claims
Access []*ResourceActions `json:"access"`
}
// Header describes the header section of a JSON Web Token.
type Header struct {
Type string `json:"typ"`
SigningAlg string `json:"alg"`
KeyID string `json:"kid,omitempty"`
X5c []string `json:"x5c,omitempty"`
RawJWK *json.RawMessage `json:"jwk,omitempty"`
}
// Token describes a JSON Web Token.
type Token struct {
Raw string
Header *Header
Claims *ClaimSet
Signature []byte
}
// VerifyOptions is used to specify
// options when verifying a JSON Web Token.
type VerifyOptions struct {
TrustedIssuers []string
AcceptedAudiences []string
Roots *x509.CertPool
TrustedKeys map[string]libtrust.PublicKey
}
// NewToken parses the given raw token string
// and constructs an unverified JSON Web Token.
func NewToken(rawToken string) (*Token, error) {
parts := strings.Split(rawToken, TokenSeparator)
if len(parts) != 3 {
return nil, ErrMalformedToken
}
var (
rawHeader, rawClaims = parts[0], parts[1]
headerJSON, claimsJSON []byte
err error
)
defer func() {
if err != nil {
log.Errorf("error while unmarshalling raw token: %s", err)
}
}()
if headerJSON, err = joseBase64UrlDecode(rawHeader); err != nil {
err = fmt.Errorf("unable to decode header: %s", err)
return nil, ErrMalformedToken
}
if claimsJSON, err = joseBase64UrlDecode(rawClaims); err != nil {
err = fmt.Errorf("unable to decode claims: %s", err)
return nil, ErrMalformedToken
}
token := new(Token)
token.Header = new(Header)
token.Claims = new(ClaimSet)
token.Raw = strings.Join(parts[:2], TokenSeparator)
if token.Signature, err = joseBase64UrlDecode(parts[2]); err != nil {
err = fmt.Errorf("unable to decode signature: %s", err)
return nil, ErrMalformedToken
}
if err = json.Unmarshal(headerJSON, token.Header); err != nil {
return nil, ErrMalformedToken
}
if err = json.Unmarshal(claimsJSON, token.Claims); err != nil {
return nil, ErrMalformedToken
}
return token, nil
}
// Verify attempts to verify this token using the given options.
// Returns a nil error if the token is valid.
func (t *Token) Verify(verifyOpts VerifyOptions) error {
// Verify that the Issuer claim is a trusted authority.
if !contains(verifyOpts.TrustedIssuers, t.Claims.Issuer) {
log.Errorf("token from untrusted issuer: %q", t.Claims.Issuer)
return ErrInvalidToken
}
// Verify that the Audience claim is allowed.
if !contains(verifyOpts.AcceptedAudiences, t.Claims.Audience) {
log.Errorf("token intended for another audience: %q", t.Claims.Audience)
return ErrInvalidToken
}
// Verify that the token is currently usable and not expired.
currentUnixTime := time.Now().Unix()
if !(t.Claims.NotBefore <= currentUnixTime && currentUnixTime <= t.Claims.Expiration) {
log.Errorf("token not to be used before %d or after %d - currently %d", t.Claims.NotBefore, t.Claims.Expiration, currentUnixTime)
return ErrInvalidToken
}
// Verify the token signature.
if len(t.Signature) == 0 {
log.Error("token has no signature")
return ErrInvalidToken
}
// Verify that the signing key is trusted.
signingKey, err := t.VerifySigningKey(verifyOpts)
if err != nil {
log.Error(err)
return ErrInvalidToken
}
// Finally, verify the signature of the token using the key which signed it.
if err := signingKey.Verify(strings.NewReader(t.Raw), t.Header.SigningAlg, t.Signature); err != nil {
log.Errorf("unable to verify token signature: %s", err)
return ErrInvalidToken
}
return nil
}
// VerifySigningKey attempts to get the key which was used to sign this token.
// The token header should contain either of these 3 fields:
// `x5c` - The x509 certificate chain for the signing key. Needs to be
// verified.
// `jwk` - The JSON Web Key representation of the signing key.
// May contain its own `x5c` field which needs to be verified.
// `kid` - The unique identifier for the key. This library interprets it
// as a libtrust fingerprint. The key itself can be looked up in
// the trustedKeys field of the given verify options.
// Each of these methods are tried in that order of preference until the
// signing key is found or an error is returned.
func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) {
// First attempt to get an x509 certificate chain from the header.
var (
x5c = t.Header.X5c
rawJWK = t.Header.RawJWK
keyID = t.Header.KeyID
)
switch {
case len(x5c) > 0:
signingKey, err = parseAndVerifyCertChain(x5c, verifyOpts.Roots)
case rawJWK != nil:
signingKey, err = parseAndVerifyRawJWK(rawJWK, verifyOpts)
case len(keyID) > 0:
signingKey = verifyOpts.TrustedKeys[keyID]
if signingKey == nil {
err = fmt.Errorf("token signed by untrusted key with ID: %q", keyID)
}
default:
err = errors.New("unable to get token signing key")
}
return
}
func parseAndVerifyCertChain(x5c []string, roots *x509.CertPool) (leafKey libtrust.PublicKey, err error) {
if len(x5c) == 0 {
return nil, errors.New("empty x509 certificate chain")
}
// Ensure the first element is encoded correctly.
leafCertDer, err := base64.StdEncoding.DecodeString(x5c[0])
if err != nil {
return nil, fmt.Errorf("unable to decode leaf certificate: %s", err)
}
// And that it is a valid x509 certificate.
leafCert, err := x509.ParseCertificate(leafCertDer)
if err != nil {
return nil, fmt.Errorf("unable to parse leaf certificate: %s", err)
}
// The rest of the certificate chain are intermediate certificates.
intermediates := x509.NewCertPool()
for i := 1; i < len(x5c); i++ {
intermediateCertDer, err := base64.StdEncoding.DecodeString(x5c[i])
if err != nil {
return nil, fmt.Errorf("unable to decode intermediate certificate: %s", err)
}
intermediateCert, err := x509.ParseCertificate(intermediateCertDer)
if err != nil {
return nil, fmt.Errorf("unable to parse intermediate certificate: %s", err)
}
intermediates.AddCert(intermediateCert)
}
verifyOpts := x509.VerifyOptions{
Intermediates: intermediates,
Roots: roots,
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
}
// TODO: this call returns certificate chains which we ignore for now, but
// we should check them for revocations if we have the ability later.
if _, err = leafCert.Verify(verifyOpts); err != nil {
return nil, fmt.Errorf("unable to verify certificate chain: %s", err)
}
// Get the public key from the leaf certificate.
leafCryptoKey, ok := leafCert.PublicKey.(crypto.PublicKey)
if !ok {
return nil, errors.New("unable to get leaf cert public key value")
}
leafKey, err = libtrust.FromCryptoPublicKey(leafCryptoKey)
if err != nil {
return nil, fmt.Errorf("unable to make libtrust public key from leaf certificate: %s", err)
}
return
}
func parseAndVerifyRawJWK(rawJWK *json.RawMessage, verifyOpts VerifyOptions) (pubKey libtrust.PublicKey, err error) {
pubKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(*rawJWK))
if err != nil {
return nil, fmt.Errorf("unable to decode raw JWK value: %s", err)
}
// Check to see if the key includes a certificate chain.
x5cVal, ok := pubKey.GetExtendedField("x5c").([]interface{})
if !ok {
// The JWK should be one of the trusted root keys.
if _, trusted := verifyOpts.TrustedKeys[pubKey.KeyID()]; !trusted {
return nil, errors.New("untrusted JWK with no certificate chain")
}
// The JWK is one of the trusted keys.
return
}
// Ensure each item in the chain is of the correct type.
x5c := make([]string, len(x5cVal))
for i, val := range x5cVal {
certString, ok := val.(string)
if !ok || len(certString) == 0 {
return nil, errors.New("malformed certificate chain")
}
x5c[i] = certString
}
// Ensure that the x509 certificate chain can
// be verified up to one of our trusted roots.
leafKey, err := parseAndVerifyCertChain(x5c, verifyOpts.Roots)
if err != nil {
return nil, fmt.Errorf("could not verify JWK certificate chain: %s", err)
}
// Verify that the public key in the leaf cert *is* the signing key.
if pubKey.KeyID() != leafKey.KeyID() {
return nil, errors.New("leaf certificate public key ID does not match JWK key ID")
}
return
}
// accessSet returns a set of actions available for the resource
// actions listed in the `access` section of this token.
func (t *Token) accessSet() accessSet {
if t.Claims == nil {
return nil
}
accessSet := make(accessSet, len(t.Claims.Access))
for _, resourceActions := range t.Claims.Access {
resource := auth.Resource{
Type: resourceActions.Type,
Name: resourceActions.Name,
}
set, exists := accessSet[resource]
if !exists {
set = newActionSet()
accessSet[resource] = set
}
for _, action := range resourceActions.Actions {
set.add(action)
}
}
return accessSet
}
func (t *Token) compactRaw() string {
return fmt.Sprintf("%s.%s", t.Raw, joseBase64UrlEncode(t.Signature))
}

Some files were not shown because too many files have changed in this diff Show More