refactor(quota,middleware): implement size quota by quota interceptor

Signed-off-by: He Weiwei <hweiwei@vmware.com>
This commit is contained in:
He Weiwei 2019-08-08 23:55:54 +00:00
parent f3ba25f656
commit e62c29123d
31 changed files with 2272 additions and 1573 deletions

View File

@ -23,6 +23,15 @@ CREATE TABLE blob
UNIQUE (digest)
);
/* add the table for project and blob */
CREATE TABLE project_blob (
id SERIAL PRIMARY KEY NOT NULL,
project_id int NOT NULL,
blob_id int NOT NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
CONSTRAINT unique_project_blob UNIQUE (project_id, blob_id)
);
CREATE TABLE artifact
(
id SERIAL PRIMARY KEY NOT NULL,

View File

@ -26,6 +26,8 @@ import (
func AddArtifact(af *models.Artifact) (int64, error) {
now := time.Now()
af.CreationTime = now
af.PushTime = now
id, err := GetOrmer().Insert(af)
if err != nil {
if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
@ -36,6 +38,12 @@ func AddArtifact(af *models.Artifact) (int64, error) {
return id, nil
}
// UpdateArtifact ...
func UpdateArtifact(af *models.Artifact) error {
_, err := GetOrmer().Update(af)
return err
}
// UpdateArtifactDigest ...
func UpdateArtifactDigest(af *models.Artifact) error {
_, err := GetOrmer().Update(af, "digest")

View File

@ -2,11 +2,11 @@ package dao
import (
"fmt"
"github.com/astaxie/beego/orm"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils/log"
"strings"
"time"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils/log"
)
// AddBlob ...
@ -23,6 +23,20 @@ func AddBlob(blob *models.Blob) (int64, error) {
return id, nil
}
// GetOrCreateBlob returns blob by digest, create it if not exists
func GetOrCreateBlob(blob *models.Blob) (bool, *models.Blob, error) {
blob.CreationTime = time.Now()
created, id, err := GetOrmer().ReadOrCreate(blob, "digest")
if err != nil {
return false, nil, err
}
blob.ID = id
return created, blob, nil
}
// GetBlob ...
func GetBlob(digest string) (*models.Blob, error) {
o := GetOrmer()
@ -50,15 +64,73 @@ func DeleteBlob(digest string) error {
return err
}
// HasBlobInProject ...
func HasBlobInProject(projectID int64, digest string) (bool, error) {
var res []orm.Params
num, err := GetOrmer().Raw(`SELECT * FROM artifact af LEFT JOIN artifact_blob afnb ON af.digest = afnb.digest_af WHERE af.project_id = ? and afnb.digest_blob = ? `, projectID, digest).Values(&res)
if err != nil {
return false, err
// GetBlobsByArtifact returns blobs of artifact
func GetBlobsByArtifact(artifactDigest string) ([]*models.Blob, error) {
sql := `SELECT * FROM blob WHERE digest IN (SELECT digest_blob FROM artifact_blob WHERE digest_af = ?)`
var blobs []*models.Blob
if _, err := GetOrmer().Raw(sql, artifactDigest).QueryRows(&blobs); err != nil {
return nil, err
}
if num == 0 {
return false, nil
}
return true, nil
return blobs, nil
}
// GetExclusiveBlobs returns layers of repository:tag which are not shared with other repositories in the project
func GetExclusiveBlobs(projectID int64, repository, digest string) ([]*models.Blob, error) {
blobs, err := GetBlobsByArtifact(digest)
if err != nil {
return nil, err
}
sql := fmt.Sprintf(`
SELECT
DISTINCT b.digest_blob AS digest
FROM
(
SELECT
digest
FROM
artifact
WHERE
(
project_id = ?
AND repo != ?
)
OR (
project_id = ?
AND digest != ?
)
) AS a
LEFT JOIN artifact_blob b ON a.digest = b.digest_af
AND b.digest_blob IN (%s)`, paramPlaceholder(len(blobs)-1))
params := []interface{}{projectID, repository, projectID, digest}
for _, blob := range blobs {
if blob.Digest != digest {
params = append(params, blob.Digest)
}
}
var rows []struct {
Digest string
}
if _, err := GetOrmer().Raw(sql, params...).QueryRows(&rows); err != nil {
return nil, err
}
shared := map[string]bool{}
for _, row := range rows {
shared[row.Digest] = true
}
var exclusive []*models.Blob
for _, blob := range blobs {
if blob.Digest != digest && !shared[blob.Digest] {
exclusive = append(exclusive, blob)
}
}
return exclusive, nil
}

View File

@ -15,10 +15,15 @@
package dao
import (
"strings"
"testing"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils"
"github.com/opencontainers/go-digest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"testing"
"github.com/stretchr/testify/suite"
)
func TestAddBlob(t *testing.T) {
@ -64,42 +69,154 @@ func TestDeleteBlob(t *testing.T) {
require.Nil(t, err)
}
func TestHasBlobInProject(t *testing.T) {
af := &models.Artifact{
PID: 1,
Repo: "TestHasBlobInProject",
Tag: "latest",
Digest: "tttt",
Kind: "image",
}
// add
_, err := AddArtifact(af)
require.Nil(t, err)
afnb1 := &models.ArtifactAndBlob{
DigestAF: "tttt",
DigestBlob: "zzza",
}
afnb2 := &models.ArtifactAndBlob{
DigestAF: "tttt",
DigestBlob: "zzzb",
}
afnb3 := &models.ArtifactAndBlob{
DigestAF: "tttt",
DigestBlob: "zzzc",
func prepareImage(projectID int64, projectName, name, tag string, layerDigests ...string) (string, error) {
digest := digest.FromString(strings.Join(layerDigests, ":")).String()
artifact := &models.Artifact{PID: projectID, Repo: projectName + "/" + name, Digest: digest, Tag: tag}
if _, err := AddArtifact(artifact); err != nil {
return "", err
}
var afnbs []*models.ArtifactAndBlob
afnbs = append(afnbs, afnb1)
afnbs = append(afnbs, afnb2)
afnbs = append(afnbs, afnb3)
// add
err = AddArtifactNBlobs(afnbs)
require.Nil(t, err)
blobDigests := append([]string{digest}, layerDigests...)
for _, blobDigest := range blobDigests {
blob := &models.Blob{Digest: blobDigest, Size: 1}
if _, _, err := GetOrCreateBlob(blob); err != nil {
return "", err
}
has, err := HasBlobInProject(1, "zzzb")
require.Nil(t, err)
assert.True(t, has)
afnbs = append(afnbs, &models.ArtifactAndBlob{DigestAF: digest, DigestBlob: blobDigest})
}
total, err := GetTotalOfArtifacts(&models.ArtifactQuery{Digest: digest})
if err != nil {
return "", err
}
if total == 1 {
if err := AddArtifactNBlobs(afnbs); err != nil {
return "", err
}
}
return digest, nil
}
func withProject(f func(int64, string)) {
projectName := utils.GenerateRandomString()
projectID, err := AddProject(models.Project{
Name: projectName,
OwnerID: 1,
})
if err != nil {
panic(err)
}
defer func() {
DeleteProject(projectID)
}()
f(projectID, projectName)
}
type GetExclusiveBlobsSuite struct {
suite.Suite
}
func (suite *GetExclusiveBlobsSuite) mustPrepareImage(projectID int64, projectName, name, tag string, layerDigests ...string) string {
digest, err := prepareImage(projectID, projectName, name, tag, layerDigests...)
suite.Nil(err)
return digest
}
func (suite *GetExclusiveBlobsSuite) TestInSameRepository() {
withProject(func(projectID int64, projectName string) {
digest1 := digest.FromString(utils.GenerateRandomString()).String()
digest2 := digest.FromString(utils.GenerateRandomString()).String()
digest3 := digest.FromString(utils.GenerateRandomString()).String()
manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 2)
}
manifest2 := suite.mustPrepareImage(projectID, projectName, "mysql", "8.0", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) {
suite.Len(blobs, 2)
}
manifest3 := suite.mustPrepareImage(projectID, projectName, "mysql", "dev", digest1, digest2, digest3)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 0)
}
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) {
suite.Len(blobs, 0)
}
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest3); suite.Nil(err) {
suite.Len(blobs, 1)
suite.Equal(digest3, blobs[0].Digest)
}
})
}
func (suite *GetExclusiveBlobsSuite) TestInDifferentRepositories() {
withProject(func(projectID int64, projectName string) {
digest1 := digest.FromString(utils.GenerateRandomString()).String()
digest2 := digest.FromString(utils.GenerateRandomString()).String()
digest3 := digest.FromString(utils.GenerateRandomString()).String()
manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 2)
}
manifest2 := suite.mustPrepareImage(projectID, projectName, "mariadb", "latest", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 0)
}
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mariadb", manifest2); suite.Nil(err) {
suite.Len(blobs, 0)
}
manifest3 := suite.mustPrepareImage(projectID, projectName, "mysql", "dev", digest1, digest2, digest3)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 0)
}
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) {
suite.Len(blobs, 0)
}
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest3); suite.Nil(err) {
suite.Len(blobs, 1)
suite.Equal(digest3, blobs[0].Digest)
}
})
}
func (suite *GetExclusiveBlobsSuite) TestInDifferentProjects() {
withProject(func(projectID int64, projectName string) {
digest1 := digest.FromString(utils.GenerateRandomString()).String()
digest2 := digest.FromString(utils.GenerateRandomString()).String()
manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 2)
}
withProject(func(id int64, name string) {
manifest2 := suite.mustPrepareImage(id, name, "mysql", "latest", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 2)
}
if blobs, err := GetExclusiveBlobs(id, name+"/mysql", manifest2); suite.Nil(err) {
suite.Len(blobs, 2)
}
})
})
}
func TestRunGetExclusiveBlobsSuite(t *testing.T) {
suite.Run(t, new(GetExclusiveBlobsSuite))
}

View File

@ -0,0 +1,105 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dao
import (
"fmt"
"time"
"github.com/goharbor/harbor/src/common/models"
)
// AddBlobToProject ...
func AddBlobToProject(blobID, projectID int64) (int64, error) {
pb := &models.ProjectBlob{
BlobID: blobID,
ProjectID: projectID,
CreationTime: time.Now(),
}
_, id, err := GetOrmer().ReadOrCreate(pb, "blob_id", "project_id")
return id, err
}
// AddBlobsToProject ...
func AddBlobsToProject(projectID int64, blobs ...*models.Blob) (int64, error) {
if len(blobs) == 0 {
return 0, nil
}
now := time.Now()
var projectBlobs []*models.ProjectBlob
for _, blob := range blobs {
projectBlobs = append(projectBlobs, &models.ProjectBlob{
BlobID: blob.ID,
ProjectID: projectID,
CreationTime: now,
})
}
return GetOrmer().InsertMulti(len(projectBlobs), projectBlobs)
}
// RemoveBlobsFromProject ...
func RemoveBlobsFromProject(projectID int64, blobs ...*models.Blob) error {
var blobIDs []interface{}
for _, blob := range blobs {
blobIDs = append(blobIDs, blob.ID)
}
if len(blobIDs) == 0 {
return nil
}
sql := fmt.Sprintf(`DELETE FROM project_blob WHERE blob_id IN (%s)`, paramPlaceholder(len(blobIDs)))
_, err := GetOrmer().Raw(sql, blobIDs).Exec()
return err
}
// HasBlobInProject ...
func HasBlobInProject(projectID int64, digest string) (bool, error) {
sql := `SELECT COUNT(*) FROM project_blob JOIN blob ON project_blob.blob_id = blob.id AND project_id = ? AND digest = ?`
var count int64
if err := GetOrmer().Raw(sql, projectID, digest).QueryRow(&count); err != nil {
return false, err
}
return count > 0, nil
}
// GetBlobsNotInProject returns blobs not in project
func GetBlobsNotInProject(projectID int64, blobDigests ...string) ([]*models.Blob, error) {
if len(blobDigests) == 0 {
return nil, nil
}
sql := fmt.Sprintf("SELECT * FROM blob WHERE id NOT IN (SELECT blob_id FROM project_blob WHERE project_id = ?) AND digest IN (%s)",
paramPlaceholder(len(blobDigests)))
params := []interface{}{projectID}
for _, digest := range blobDigests {
params = append(params, digest)
}
var blobs []*models.Blob
if _, err := GetOrmer().Raw(sql, params...).QueryRows(&blobs); err != nil {
return nil, err
}
return blobs, nil
}

View File

@ -0,0 +1,40 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dao
import (
"testing"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils"
"github.com/opencontainers/go-digest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestHasBlobInProject(t *testing.T) {
_, blob, err := GetOrCreateBlob(&models.Blob{
Digest: digest.FromString(utils.GenerateRandomString()).String(),
Size: 100,
})
require.Nil(t, err)
_, err = AddBlobToProject(blob.ID, 1)
require.Nil(t, err)
has, err := HasBlobInProject(1, blob.Digest)
require.Nil(t, err)
assert.True(t, has)
}

View File

@ -38,6 +38,7 @@ func init() {
new(Robot),
new(OIDCUser),
new(Blob),
new(ProjectBlob),
new(Artifact),
new(ArtifactAndBlob),
new(CVEWhitelist),

View File

@ -12,17 +12,21 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package util
package models
import (
"net/http"
"time"
)
// RegInterceptor ...
type RegInterceptor interface {
// HandleRequest ...
HandleRequest(req *http.Request) error
// HandleResponse won't return any error
HandleResponse(rw CustomResponseWriter, req *http.Request)
// ProjectBlob holds the relationship between manifest and blob.
type ProjectBlob struct {
ID int64 `orm:"pk;auto;column(id)" json:"id"`
ProjectID int64 `orm:"column(project_id)" json:"project_id"`
BlobID int64 `orm:"column(blob_id)" json:"blob_id"`
CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"`
}
// TableName ...
func (*ProjectBlob) TableName() string {
return "project_blob"
}

View File

@ -15,12 +15,12 @@
package chart
import (
"fmt"
"net/http"
"regexp"
"strconv"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/middlewares/interceptor"
"github.com/goharbor/harbor/src/core/middlewares/interceptor/quota"
"github.com/goharbor/harbor/src/core/middlewares/util"
@ -29,81 +29,81 @@ import (
var (
deleteChartVersionRe = regexp.MustCompile(`^/api/chartrepo/(?P<namespace>\w+)/charts/(?P<name>\w+)/(?P<version>[\w\d\.]+)/?$`)
uploadChartVersionRe = regexp.MustCompile(`^/api/chartrepo/(?P<namespace>\w+)/charts/?$`)
createChartVersionRe = regexp.MustCompile(`^/api/chartrepo/(?P<namespace>\w+)/charts/?$`)
)
var (
defaultBuilders = []interceptor.Builder{
&deleteChartVersionBuilder{},
&uploadChartVersionBuilder{},
&chartVersionDeletionBuilder{},
&chartVersionCreationBuilder{},
}
)
type deleteChartVersionBuilder struct {
}
type chartVersionDeletionBuilder struct{}
func (*deleteChartVersionBuilder) Build(req *http.Request) interceptor.Interceptor {
func (*chartVersionDeletionBuilder) Build(req *http.Request) (interceptor.Interceptor, error) {
if req.Method != http.MethodDelete {
return nil
return nil, nil
}
matches := deleteChartVersionRe.FindStringSubmatch(req.URL.String())
if len(matches) <= 1 {
return nil
return nil, nil
}
namespace, chartName, version := matches[1], matches[2], matches[3]
project, err := dao.GetProjectByName(namespace)
if err != nil {
log.Errorf("Failed to get project %s, error: %v", namespace, err)
return nil
return nil, fmt.Errorf("failed to get project %s, error: %v", namespace, err)
}
if project == nil {
log.Warningf("Project %s not found", namespace)
return nil
return nil, fmt.Errorf("project %s not found", namespace)
}
info := &util.ChartVersionInfo{
ProjectID: project.ProjectID,
Namespace: namespace,
ChartName: chartName,
Version: version,
}
opts := []quota.Option{
quota.WithManager("project", strconv.FormatInt(project.ProjectID, 10)),
quota.WithAction(quota.SubtractAction),
quota.StatusCode(http.StatusOK),
quota.MutexKeys(mutexKey(namespace, chartName, version)),
quota.MutexKeys(info.MutexKey()),
quota.Resources(types.ResourceList{types.ResourceCount: 1}),
}
return quota.New(opts...)
return quota.New(opts...), nil
}
type uploadChartVersionBuilder struct {
}
type chartVersionCreationBuilder struct{}
func (*uploadChartVersionBuilder) Build(req *http.Request) interceptor.Interceptor {
func (*chartVersionCreationBuilder) Build(req *http.Request) (interceptor.Interceptor, error) {
if req.Method != http.MethodPost {
return nil
return nil, nil
}
matches := uploadChartVersionRe.FindStringSubmatch(req.URL.String())
matches := createChartVersionRe.FindStringSubmatch(req.URL.String())
if len(matches) <= 1 {
return nil
return nil, nil
}
namespace := matches[1]
project, err := dao.GetProjectByName(namespace)
if err != nil {
log.Errorf("Failed to get project %s, error: %v", namespace, err)
return nil
return nil, fmt.Errorf("failed to get project %s, error: %v", namespace, err)
}
if project == nil {
log.Warningf("Project %s not found", namespace)
return nil
return nil, fmt.Errorf("project %s not found", namespace)
}
chart, err := parseChart(req)
if err != nil {
log.Errorf("Failed to parse chart from body, error: %v", err)
return nil
return nil, fmt.Errorf("failed to parse chart from body, error: %v", err)
}
chartName, version := chart.Metadata.Name, chart.Metadata.Version
@ -120,9 +120,9 @@ func (*uploadChartVersionBuilder) Build(req *http.Request) interceptor.Intercept
quota.WithManager("project", strconv.FormatInt(project.ProjectID, 10)),
quota.WithAction(quota.AddAction),
quota.StatusCode(http.StatusCreated),
quota.MutexKeys(mutexKey(namespace, chartName, version)),
quota.OnResources(computeQuotaForUpload),
quota.MutexKeys(info.MutexKey()),
quota.OnResources(computeResourcesForChartVersionCreation),
}
return quota.New(opts...)
return quota.New(opts...), nil
}

View File

@ -42,7 +42,13 @@ func New(next http.Handler, builders ...interceptor.Builder) http.Handler {
// ServeHTTP manifest ...
func (h *chartHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
interceptor := h.getInterceptor(req)
interceptor, err := h.getInterceptor(req)
if err != nil {
http.Error(rw, util.MarshalError("InternalError", fmt.Sprintf("Error occurred when to handle request in chart count quota handler: %v", err)),
http.StatusInternalServerError)
return
}
if interceptor == nil {
h.next.ServeHTTP(rw, req)
return
@ -61,13 +67,17 @@ func (h *chartHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
interceptor.HandleResponse(w, req)
}
func (h *chartHandler) getInterceptor(req *http.Request) interceptor.Interceptor {
func (h *chartHandler) getInterceptor(req *http.Request) (interceptor.Interceptor, error) {
for _, builder := range h.builders {
interceptor := builder.Build(req)
interceptor, err := builder.Build(req)
if err != nil {
return nil, err
}
if interceptor != nil {
return interceptor
return interceptor, nil
}
}
return nil
return nil, nil
}

View File

@ -85,7 +85,9 @@ func chartVersionExists(namespace, chartName, version string) bool {
return !chartVersion.Removed
}
func computeQuotaForUpload(req *http.Request) (types.ResourceList, error) {
// computeResourcesForChartVersionCreation returns count resource required for the chart package
// no count required if the chart package of version exists in project
func computeResourcesForChartVersionCreation(req *http.Request) (types.ResourceList, error) {
info, ok := util.ChartVersionInfoFromContext(req.Context())
if !ok {
return nil, errors.New("chart version info missing")
@ -99,10 +101,6 @@ func computeQuotaForUpload(req *http.Request) (types.ResourceList, error) {
return types.ResourceList{types.ResourceCount: 1}, nil
}
func mutexKey(str ...string) string {
return "chart:" + strings.Join(str, ":")
}
func parseChart(req *http.Request) (*chart.Chart, error) {
chartFile, _, err := req.FormFile(formFieldNameForChart)
if err != nil {

View File

@ -18,178 +18,80 @@ import (
"fmt"
"net/http"
"strconv"
"strings"
"time"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/middlewares/interceptor"
"github.com/goharbor/harbor/src/core/middlewares/interceptor/quota"
"github.com/goharbor/harbor/src/core/middlewares/util"
"github.com/opencontainers/go-digest"
)
var (
defaultBuilders = []interceptor.Builder{
&deleteManifestBuilder{},
&putManifestBuilder{},
&manifestDeletionBuilder{},
&manifestCreationBuilder{},
}
)
type deleteManifestBuilder struct {
}
type manifestDeletionBuilder struct{}
func (*deleteManifestBuilder) Build(req *http.Request) interceptor.Interceptor {
if req.Method != http.MethodDelete {
return nil
func (*manifestDeletionBuilder) Build(req *http.Request) (interceptor.Interceptor, error) {
if match, _, _ := util.MatchDeleteManifest(req); !match {
return nil, nil
}
match, name, reference := util.MatchManifestURL(req)
if !match {
return nil
}
dgt, err := digest.Parse(reference)
info, ok := util.ManifestInfoFromContext(req.Context())
if !ok {
var err error
info, err = util.ParseManifestInfoFromPath(req)
if err != nil {
// Delete manifest only accept digest as reference
return nil
return nil, fmt.Errorf("failed to parse manifest, error %v", err)
}
projectName := strings.Split(name, "/")[0]
project, err := dao.GetProjectByName(projectName)
if err != nil {
log.Errorf("Failed to get project %s, error: %v", projectName, err)
return nil
// Manifest info will be used by computeResourcesForDeleteManifest
*req = *(req.WithContext(util.NewManifestInfoContext(req.Context(), info)))
}
if project == nil {
log.Warningf("Project %s not found", projectName)
return nil
}
info := &util.MfInfo{
ProjectID: project.ProjectID,
Repository: name,
Digest: dgt.String(),
}
// Manifest info will be used by computeQuotaForUpload
*req = *req.WithContext(util.NewManifestInfoContext(req.Context(), info))
opts := []quota.Option{
quota.WithManager("project", strconv.FormatInt(project.ProjectID, 10)),
quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)),
quota.WithAction(quota.SubtractAction),
quota.StatusCode(http.StatusAccepted),
quota.MutexKeys(mutexKey(info)),
quota.OnResources(computeQuotaForDelete),
quota.MutexKeys(info.MutexKey("count")),
quota.OnResources(computeResourcesForManifestDeletion),
quota.OnFulfilled(func(http.ResponseWriter, *http.Request) error {
return dao.DeleteArtifactByDigest(info.ProjectID, info.Repository, info.Digest)
}),
}
return quota.New(opts...)
return quota.New(opts...), nil
}
type putManifestBuilder struct {
}
type manifestCreationBuilder struct{}
func (b *putManifestBuilder) Build(req *http.Request) interceptor.Interceptor {
if req.Method != http.MethodPut {
return nil
func (*manifestCreationBuilder) Build(req *http.Request) (interceptor.Interceptor, error) {
if match, _, _ := util.MatchPushManifest(req); !match {
return nil, nil
}
info, ok := util.ManifestInfoFromContext(req.Context())
if !ok {
// assert that manifest info will be set by others
return nil
var err error
info, err = util.ParseManifestInfo(req)
if err != nil {
return nil, fmt.Errorf("failed to parse manifest, error %v", err)
}
// Manifest info will be used by computeResourcesForCreateManifest
*req = *(req.WithContext(util.NewManifestInfoContext(req.Context(), info)))
}
opts := []quota.Option{
quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)),
quota.WithAction(quota.AddAction),
quota.StatusCode(http.StatusCreated),
quota.MutexKeys(mutexKey(info)),
quota.OnResources(computeQuotaForPut),
quota.OnFulfilled(func(http.ResponseWriter, *http.Request) error {
newManifest, overwriteTag := !info.Exist, info.DigestChanged
if newManifest {
if err := b.doNewManifest(info); err != nil {
log.Errorf("Failed to handle response for new manifest, error: %v", err)
}
} else if overwriteTag {
if err := b.doOverwriteTag(info); err != nil {
log.Errorf("Failed to handle response for overwrite tag, error: %v", err)
}
quota.MutexKeys(info.MutexKey("count")),
quota.OnResources(computeResourcesForManifestCreation),
quota.OnFulfilled(afterManifestCreated),
}
return nil
}),
}
return quota.New(opts...)
}
func (b *putManifestBuilder) doNewManifest(info *util.MfInfo) error {
artifact := &models.Artifact{
PID: info.ProjectID,
Repo: info.Repository,
Tag: info.Tag,
Digest: info.Digest,
PushTime: time.Now(),
Kind: "Docker-Image",
}
if _, err := dao.AddArtifact(artifact); err != nil {
return fmt.Errorf("error to add artifact, %v", err)
}
return b.attachBlobsToArtifact(info)
}
func (b *putManifestBuilder) doOverwriteTag(info *util.MfInfo) error {
artifact := &models.Artifact{
ID: info.ArtifactID,
PID: info.ProjectID,
Repo: info.Repository,
Tag: info.Tag,
Digest: info.Digest,
PushTime: time.Now(),
Kind: "Docker-Image",
}
if err := dao.UpdateArtifactDigest(artifact); err != nil {
return fmt.Errorf("error to update artifact, %v", err)
}
return b.attachBlobsToArtifact(info)
}
func (b *putManifestBuilder) attachBlobsToArtifact(info *util.MfInfo) error {
self := &models.ArtifactAndBlob{
DigestAF: info.Digest,
DigestBlob: info.Digest,
}
artifactBlobs := append([]*models.ArtifactAndBlob{}, self)
for _, d := range info.Refrerence {
artifactBlob := &models.ArtifactAndBlob{
DigestAF: info.Digest,
DigestBlob: d.Digest.String(),
}
artifactBlobs = append(artifactBlobs, artifactBlob)
}
if err := dao.AddArtifactNBlobs(artifactBlobs); err != nil {
if strings.Contains(err.Error(), dao.ErrDupRows.Error()) {
log.Warning("the artifact and blobs have already in the DB, it maybe an existing image with different tag")
return nil
}
return fmt.Errorf("error to add artifact and blobs in proxy response handler, %v", err)
}
return nil
return quota.New(opts...), nil
}

View File

@ -42,7 +42,14 @@ func New(next http.Handler, builders ...interceptor.Builder) http.Handler {
// ServeHTTP manifest ...
func (h *countQuotaHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
interceptor := h.getInterceptor(req)
interceptor, err := h.getInterceptor(req)
if err != nil {
log.Warningf("Error occurred when to handle request in count quota handler: %v", err)
http.Error(rw, util.MarshalError("InternalError", fmt.Sprintf("Error occurred when to handle request in count quota handler: %v", err)),
http.StatusInternalServerError)
return
}
if interceptor == nil {
h.next.ServeHTTP(rw, req)
return
@ -60,13 +67,17 @@ func (h *countQuotaHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request)
interceptor.HandleResponse(rw, req)
}
func (h *countQuotaHandler) getInterceptor(req *http.Request) interceptor.Interceptor {
func (h *countQuotaHandler) getInterceptor(req *http.Request) (interceptor.Interceptor, error) {
for _, builder := range h.builders {
interceptor := builder.Build(req)
interceptor, err := builder.Build(req)
if err != nil {
return nil, err
}
if interceptor != nil {
return interceptor
return interceptor, nil
}
}
return nil
return nil, nil
}

View File

@ -67,7 +67,7 @@ func doDeleteManifestRequest(projectID int64, projectName, name, dgt string, nex
url := fmt.Sprintf("/v2/%s/manifests/%s", repository, dgt)
req, _ := http.NewRequest("DELETE", url, nil)
ctx := util.NewManifestInfoContext(req.Context(), &util.MfInfo{
ctx := util.NewManifestInfoContext(req.Context(), &util.ManifestInfo{
ProjectID: projectID,
Repository: repository,
Digest: dgt,
@ -96,12 +96,12 @@ func doPutManifestRequest(projectID int64, projectName, name, tag, dgt string, n
url := fmt.Sprintf("/v2/%s/manifests/%s", repository, tag)
req, _ := http.NewRequest("PUT", url, nil)
ctx := util.NewManifestInfoContext(req.Context(), &util.MfInfo{
ctx := util.NewManifestInfoContext(req.Context(), &util.ManifestInfo{
ProjectID: projectID,
Repository: repository,
Tag: tag,
Digest: dgt,
Refrerence: []distribution.Descriptor{
References: []distribution.Descriptor{
{Digest: digest.FromString(randomString(15))},
{Digest: digest.FromString(randomString(15))},
},
@ -146,11 +146,13 @@ func (suite *HandlerSuite) checkCountUsage(expected, projectID int64) {
}
func (suite *HandlerSuite) TearDownTest() {
dao.ClearTable("artifact")
dao.ClearTable("blob")
dao.ClearTable("artifact_blob")
dao.ClearTable("quota")
dao.ClearTable("quota_usage")
for _, table := range []string{
"artifact", "blob",
"artifact_blob", "project_blob",
"quota", "quota_usage",
} {
dao.ClearTable(table)
}
}
func (suite *HandlerSuite) TestPutManifestCreated() {
@ -169,9 +171,6 @@ func (suite *HandlerSuite) TestPutManifestCreated() {
total, err := dao.GetTotalOfArtifacts(&models.ArtifactQuery{Digest: dgt})
suite.Nil(err)
suite.Equal(int64(1), total, "Artifact should be created")
if exists, err := dao.HasBlobInProject(projectID, dgt); suite.Nil(err) {
suite.True(exists)
}
// Push the photon:latest with photon:dev
code = doPutManifestRequest(projectID, projectName, "photon", "dev", dgt)
@ -213,9 +212,6 @@ func (suite *HandlerSuite) TestPutManifestFailed() {
total, err := dao.GetTotalOfArtifacts(&models.ArtifactQuery{Digest: dgt})
suite.Nil(err)
suite.Equal(int64(0), total, "Artifact should not be created")
if exists, err := dao.HasBlobInProject(projectID, dgt); suite.Nil(err) {
suite.False(exists)
}
}
func (suite *HandlerSuite) TestDeleteManifestAccepted() {
@ -258,7 +254,7 @@ func (suite *HandlerSuite) TestDeleteManifestFailed() {
suite.checkCountUsage(1, projectID)
}
func (suite *HandlerSuite) TestDeleteManifesInMultiProjects() {
func (suite *HandlerSuite) TestDeleteManifestInMultiProjects() {
projectName := randomString(5)
projectID := suite.addProject(projectName)

View File

@ -18,23 +18,35 @@ import (
"errors"
"fmt"
"net/http"
"strings"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/quota"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/middlewares/util"
"github.com/goharbor/harbor/src/pkg/types"
)
func mutexKey(info *util.MfInfo) string {
if info.Tag != "" {
return "Quota::manifest-lock::" + info.Repository + ":" + info.Tag
// computeResourcesForManifestCreation returns count resource required for manifest
// no count required if the tag of the repository exists in the project
func computeResourcesForManifestCreation(req *http.Request) (types.ResourceList, error) {
info, ok := util.ManifestInfoFromContext(req.Context())
if !ok {
return nil, errors.New("manifest info missing")
}
return "Quota::manifest-lock::" + info.Repository + ":" + info.Digest
// only count quota required when push new tag
if info.IsNewTag() {
return quota.ResourceList{quota.ResourceCount: 1}, nil
}
return nil, nil
}
func computeQuotaForDelete(req *http.Request) (types.ResourceList, error) {
// computeResourcesForManifestDeletion returns count resource will be released when manifest deleted
// then result will be the sum of manifest count of the same repository in the project
func computeResourcesForManifestDeletion(req *http.Request) (types.ResourceList, error) {
info, ok := util.ManifestInfoFromContext(req.Context())
if !ok {
return nil, errors.New("manifest info missing")
@ -53,40 +65,54 @@ func computeQuotaForDelete(req *http.Request) (types.ResourceList, error) {
return types.ResourceList{types.ResourceCount: total}, nil
}
func computeQuotaForPut(req *http.Request) (types.ResourceList, error) {
// afterManifestCreated the handler after manifest created success
// it will create or update the artifact info in db, and then attach blobs to artifact
func afterManifestCreated(w http.ResponseWriter, req *http.Request) error {
info, ok := util.ManifestInfoFromContext(req.Context())
if !ok {
return nil, errors.New("manifest info missing")
return errors.New("manifest info missing")
}
artifact, err := getArtifact(info)
if err != nil {
return nil, fmt.Errorf("error occurred when to check Manifest existence %v", err)
artifact := info.Artifact()
if artifact.ID == 0 {
if _, err := dao.AddArtifact(artifact); err != nil {
return fmt.Errorf("error to add artifact, %v", err)
}
} else {
if err := dao.UpdateArtifact(artifact); err != nil {
return fmt.Errorf("error to update artifact, %v", err)
}
}
if artifact != nil {
info.ArtifactID = artifact.ID
info.DigestChanged = artifact.Digest != info.Digest
info.Exist = true
return nil, nil
}
return quota.ResourceList{quota.ResourceCount: 1}, nil
return attachBlobsToArtifact(info)
}
// get artifact by manifest info
func getArtifact(info *util.MfInfo) (*models.Artifact, error) {
query := &models.ArtifactQuery{
PID: info.ProjectID,
Repo: info.Repository,
Tag: info.Tag,
// attachBlobsToArtifact attach the blobs which from manifest to artifact
func attachBlobsToArtifact(info *util.ManifestInfo) error {
self := &models.ArtifactAndBlob{
DigestAF: info.Digest,
DigestBlob: info.Digest,
}
artifacts, err := dao.ListArtifacts(query)
if err != nil || len(artifacts) == 0 {
return nil, err
artifactBlobs := append([]*models.ArtifactAndBlob{}, self)
for _, reference := range info.References {
artifactBlob := &models.ArtifactAndBlob{
DigestAF: info.Digest,
DigestBlob: reference.Digest.String(),
}
return artifacts[0], nil
artifactBlobs = append(artifactBlobs, artifactBlob)
}
if err := dao.AddArtifactNBlobs(artifactBlobs); err != nil {
if strings.Contains(err.Error(), dao.ErrDupRows.Error()) {
log.Warning("the artifact and blobs have already in the DB, it maybe an existing image with different tag")
return nil
}
return fmt.Errorf("error to add artifact and blobs in proxy response handler, %v", err)
}
return nil
}

View File

@ -20,8 +20,9 @@ import (
// Builder interceptor builder
type Builder interface {
// Build build interceptor from http.Request returns nil if interceptor not match the request
Build(*http.Request) Interceptor
// Build build interceptor from http.Request
// (nil, nil) must be returned if builder not match the request
Build(*http.Request) (Interceptor, error)
}
// Interceptor interceptor for middleware
@ -32,3 +33,16 @@ type Interceptor interface {
// HandleResponse won't return any error
HandleResponse(http.ResponseWriter, *http.Request)
}
// ResponseInterceptorFunc ...
type ResponseInterceptorFunc func(w http.ResponseWriter, r *http.Request)
// HandleRequest no-op HandleRequest
func (f ResponseInterceptorFunc) HandleRequest(*http.Request) error {
return nil
}
// HandleResponse calls f(w, r).
func (f ResponseInterceptorFunc) HandleResponse(w http.ResponseWriter, r *http.Request) {
f(w, r)
}

View File

@ -65,8 +65,6 @@ func (qi *quotaInterceptor) HandleRequest(req *http.Request) (err error) {
if err != nil {
return fmt.Errorf("failed to compute the resources for quota, error: %v", err)
}
log.Debugf("Compute the resources for quota, got: %v", resources)
}
qi.resources = resources
@ -92,7 +90,9 @@ func (qi *quotaInterceptor) HandleResponse(w http.ResponseWriter, req *http.Requ
switch sr.Status() {
case opts.StatusCode:
if opts.OnFulfilled != nil {
opts.OnFulfilled(w, req)
if err := opts.OnFulfilled(w, req); err != nil {
log.Errorf("Failed to handle on fulfilled, error: %v", err)
}
}
default:
if err := qi.unreserve(); err != nil {
@ -100,12 +100,16 @@ func (qi *quotaInterceptor) HandleResponse(w http.ResponseWriter, req *http.Requ
}
if opts.OnRejected != nil {
opts.OnRejected(w, req)
if err := opts.OnRejected(w, req); err != nil {
log.Errorf("Failed to handle on rejected, error: %v", err)
}
}
}
if opts.OnFinally != nil {
opts.OnFinally(w, req)
if err := opts.OnFinally(w, req); err != nil {
log.Errorf("Failed to handle on finally, error: %v", err)
}
}
}
@ -118,8 +122,6 @@ func (qi *quotaInterceptor) freeMutexes() {
}
func (qi *quotaInterceptor) reserve() error {
log.Debugf("Reserve %s resources, %v", qi.opts.Action, qi.resources)
if len(qi.resources) == 0 {
return nil
}
@ -135,8 +137,6 @@ func (qi *quotaInterceptor) reserve() error {
}
func (qi *quotaInterceptor) unreserve() error {
log.Debugf("Unreserve %s resources, %v", qi.opts.Action, qi.resources)
if len(qi.resources) == 0 {
return nil
}

View File

@ -0,0 +1,208 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sizequota
import (
"fmt"
"net/http"
"strconv"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/middlewares/interceptor"
"github.com/goharbor/harbor/src/core/middlewares/interceptor/quota"
"github.com/goharbor/harbor/src/core/middlewares/util"
)
var (
defaultBuilders = []interceptor.Builder{
&blobStreamUploadBuilder{},
&blobStorageQuotaBuilder{},
&manifestCreationBuilder{},
&manifestDeletionBuilder{},
}
)
// blobStreamUploadBuilder interceptor for PATCH /v2/<name>/blobs/uploads/<uuid>
type blobStreamUploadBuilder struct{}
func (*blobStreamUploadBuilder) Build(req *http.Request) (interceptor.Interceptor, error) {
if !match(req, http.MethodPatch, blobUploadURLRe) {
return nil, nil
}
s := blobUploadURLRe.FindStringSubmatch(req.URL.Path)
uuid := s[2]
onResponse := func(w http.ResponseWriter, req *http.Request) {
size, err := parseUploadedBlobSize(w)
if err != nil {
log.Errorf("failed to parse uploaded blob size for upload %s", uuid)
return
}
ok, err := setUploadedBlobSize(uuid, size)
if err != nil {
log.Errorf("failed to update blob update size for upload %s, error: %v", uuid, err)
return
}
if !ok {
// ToDo discuss what to do here.
log.Errorf("fail to set bunk: %s size: %d in redis, it causes unable to set correct quota for the artifact", uuid, size)
}
}
return interceptor.ResponseInterceptorFunc(onResponse), nil
}
// blobStorageQuotaBuilder interceptor builder for these requests
// PUT /v2/<name>/blobs/uploads/<uuid>?digest=<digest>
// POST /v2/<name>/blobs/uploads/?mount=<digest>&from=<repository name>
type blobStorageQuotaBuilder struct{}
func (*blobStorageQuotaBuilder) Build(req *http.Request) (interceptor.Interceptor, error) {
parseBlobInfo := getBlobInfoParser(req)
if parseBlobInfo == nil {
return nil, nil
}
info, err := parseBlobInfo(req)
if err != nil {
return nil, err
}
// replace req with blob info context
*req = *(req.WithContext(util.NewBlobInfoContext(req.Context(), info)))
opts := []quota.Option{
quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)),
quota.WithAction(quota.AddAction),
quota.StatusCode(http.StatusCreated), // NOTICE: mount blob and blob upload complete both return 201 when success
quota.OnResources(computeResourcesForBlob),
quota.MutexKeys(info.MutexKey()),
quota.OnFulfilled(func(http.ResponseWriter, *http.Request) error {
return syncBlobInfoToProject(info)
}),
}
return quota.New(opts...), nil
}
// manifestCreationBuilder interceptor builder for the request PUT /v2/<name>/manifests/<reference>
type manifestCreationBuilder struct{}
func (*manifestCreationBuilder) Build(req *http.Request) (interceptor.Interceptor, error) {
if match, _, _ := util.MatchPushManifest(req); !match {
return nil, nil
}
info, err := util.ParseManifestInfo(req)
if err != nil {
return nil, err
}
// Replace request with manifests info context
*req = *req.WithContext(util.NewManifestInfoContext(req.Context(), info))
opts := []quota.Option{
quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)),
quota.WithAction(quota.AddAction),
quota.StatusCode(http.StatusCreated),
quota.OnResources(computeResourcesForManifestCreation),
quota.MutexKeys(info.MutexKey("size")),
quota.OnFulfilled(func(http.ResponseWriter, *http.Request) error {
// manifest created, sync manifest itself as blob to blob and project_blob table
blobInfo, err := parseBlobInfoFromManifest(req)
if err != nil {
return err
}
if err := syncBlobInfoToProject(blobInfo); err != nil {
return err
}
// sync blobs from manifest which are not in project to project_blob table
blobs, err := info.GetBlobsNotInProject()
if err != nil {
return err
}
_, err = dao.AddBlobsToProject(info.ProjectID, blobs...)
return err
}),
}
return quota.New(opts...), nil
}
// deleteManifestBuilder interceptor builder for the request DELETE /v2/<name>/manifests/<reference>
type manifestDeletionBuilder struct{}
func (*manifestDeletionBuilder) Build(req *http.Request) (interceptor.Interceptor, error) {
if match, _, _ := util.MatchDeleteManifest(req); !match {
return nil, nil
}
info, ok := util.ManifestInfoFromContext(req.Context())
if !ok {
var err error
info, err = util.ParseManifestInfoFromPath(req)
if err != nil {
return nil, fmt.Errorf("failed to parse manifest, error %v", err)
}
// Manifest info will be used by computeResourcesForDeleteManifest
*req = *(req.WithContext(util.NewManifestInfoContext(req.Context(), info)))
}
blobs, err := dao.GetBlobsByArtifact(info.Digest)
if err != nil {
return nil, fmt.Errorf("failed to query blobs of %s, error: %v", info.Digest, err)
}
mutexKeys := []string{info.MutexKey("size")}
for _, blob := range blobs {
mutexKeys = append(mutexKeys, info.BlobMutexKey(blob))
}
opts := []quota.Option{
quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)),
quota.WithAction(quota.SubtractAction),
quota.StatusCode(http.StatusAccepted),
quota.OnResources(computeResourcesForManifestDeletion),
quota.MutexKeys(mutexKeys...),
quota.OnFulfilled(func(http.ResponseWriter, *http.Request) error {
blobs := info.ExclusiveBlobs
total, err := dao.GetTotalOfArtifacts(&models.ArtifactQuery{
PID: info.ProjectID,
Digest: info.Digest,
})
if err == nil && total > 0 {
blob, err := dao.GetBlob(info.Digest)
if err == nil {
blobs = append(blobs, blob)
}
}
return dao.RemoveBlobsFromProject(info.ProjectID, blobs...)
}),
}
return quota.New(opts...), nil
}

View File

@ -15,217 +15,68 @@
package sizequota
import (
"errors"
"fmt"
"github.com/garyburd/redigo/redis"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/quota"
common_util "github.com/goharbor/harbor/src/common/utils"
"github.com/goharbor/harbor/src/common/utils/log"
common_redis "github.com/goharbor/harbor/src/common/utils/redis"
"github.com/goharbor/harbor/src/core/middlewares/util"
"net/http"
"strings"
"time"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/middlewares/interceptor"
"github.com/goharbor/harbor/src/core/middlewares/util"
)
type sizeQuotaHandler struct {
builders []interceptor.Builder
next http.Handler
}
// New ...
func New(next http.Handler) http.Handler {
func New(next http.Handler, builders ...interceptor.Builder) http.Handler {
if len(builders) == 0 {
builders = defaultBuilders
}
return &sizeQuotaHandler{
builders: builders,
next: next,
}
}
// ServeHTTP ...
func (sqh *sizeQuotaHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
sizeInteceptor := getInteceptor(req)
if sizeInteceptor == nil {
sqh.next.ServeHTTP(rw, req)
func (h *sizeQuotaHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
interceptor, err := h.getInterceptor(req)
if err != nil {
http.Error(rw, util.MarshalError("InternalError", fmt.Sprintf("Error occurred when to handle request in size quota handler: %v", err)),
http.StatusInternalServerError)
return
}
// handler request
if err := sizeInteceptor.HandleRequest(req); err != nil {
if interceptor == nil {
h.next.ServeHTTP(rw, req)
return
}
if err := interceptor.HandleRequest(req); err != nil {
log.Warningf("Error occurred when to handle request in size quota handler: %v", err)
http.Error(rw, util.MarshalError("InternalError", fmt.Sprintf("Error occurred when to handle request in size quota handler: %v", err)),
http.StatusInternalServerError)
return
}
sqh.next.ServeHTTP(rw, req)
// handler response
sizeInteceptor.HandleResponse(*rw.(*util.CustomResponseWriter), req)
h.next.ServeHTTP(rw, req)
interceptor.HandleResponse(rw, req)
}
func getInteceptor(req *http.Request) util.RegInterceptor {
// POST /v2/<name>/blobs/uploads/?mount=<digest>&from=<repository name>
matchMountBlob, repository, mount, _ := util.MatchMountBlobURL(req)
if matchMountBlob {
bb := util.BlobInfo{}
bb.Repository = repository
bb.Digest = mount
return NewMountBlobInterceptor(&bb)
}
// PUT /v2/<name>/blobs/uploads/<uuid>?digest=<digest>
matchPutBlob, repository := util.MatchPutBlobURL(req)
if matchPutBlob {
bb := util.BlobInfo{}
bb.Repository = repository
return NewPutBlobInterceptor(&bb)
}
// PUT /v2/<name>/manifests/<reference>
matchPushMF, repository, tag := util.MatchPushManifest(req)
if matchPushMF {
bb := util.BlobInfo{}
mfInfo := util.MfInfo{}
bb.Repository = repository
mfInfo.Repository = repository
mfInfo.Tag = tag
return NewPutManifestInterceptor(&bb, &mfInfo)
}
// PATCH /v2/<name>/blobs/uploads/<uuid>
matchPatchBlob, _ := util.MatchPatchBlobURL(req)
if matchPatchBlob {
return NewPatchBlobInterceptor()
}
return nil
}
func requireQuota(conn redis.Conn, blobInfo *util.BlobInfo) error {
projectID, err := util.GetProjectID(strings.Split(blobInfo.Repository, "/")[0])
if err != nil {
return err
}
blobInfo.ProjectID = projectID
digestLock, err := tryLockBlob(conn, blobInfo)
if err != nil {
log.Infof("failed to lock digest in redis, %v", err)
return err
}
blobInfo.DigestLock = digestLock
blobExist, err := dao.HasBlobInProject(blobInfo.ProjectID, blobInfo.Digest)
if err != nil {
tryFreeBlob(blobInfo)
return err
}
blobInfo.Exist = blobExist
if blobExist {
return nil
}
// only require quota for non existing blob.
quotaRes := &quota.ResourceList{
quota.ResourceStorage: blobInfo.Size,
}
err = util.TryRequireQuota(blobInfo.ProjectID, quotaRes)
if err != nil {
log.Infof("project id, %d, size %d", blobInfo.ProjectID, blobInfo.Size)
tryFreeBlob(blobInfo)
log.Errorf("cannot get quota for the blob %v", err)
return err
}
blobInfo.Quota = quotaRes
return nil
}
// HandleBlobCommon handles put blob complete request
// 1, add blob into DB if success
// 2, roll back resource if failure.
func HandleBlobCommon(rw util.CustomResponseWriter, req *http.Request) error {
bbInfo := req.Context().Value(util.BBInfokKey)
bb, ok := bbInfo.(*util.BlobInfo)
if !ok {
return errors.New("failed to convert blob information context into BBInfo")
}
defer func() {
_, err := bb.DigestLock.Free()
if err != nil {
log.Errorf("Error to unlock blob digest:%s in response handler, %v", bb.Digest, err)
}
if err := bb.DigestLock.Conn.Close(); err != nil {
log.Errorf("Error to close redis connection in put blob response handler, %v", err)
}
}()
// Do nothing for a existing blob.
if bb.Exist {
return nil
}
if rw.Status() == http.StatusCreated {
blob := &models.Blob{
Digest: bb.Digest,
ContentType: bb.ContentType,
Size: bb.Size,
CreationTime: time.Now(),
}
_, err := dao.AddBlob(blob)
if err != nil {
return err
}
} else if rw.Status() >= 300 && rw.Status() <= 511 {
success := util.TryFreeQuota(bb.ProjectID, bb.Quota)
if !success {
return fmt.Errorf("Error to release resource booked for the blob, %d, digest: %s ", bb.ProjectID, bb.Digest)
}
}
return nil
}
// tryLockBlob locks blob with redis ...
func tryLockBlob(conn redis.Conn, blobInfo *util.BlobInfo) (*common_redis.Mutex, error) {
// Quota::blob-lock::projectname::digest
digestLock := common_redis.New(conn, "Quota::blob-lock::"+strings.Split(blobInfo.Repository, "/")[0]+":"+blobInfo.Digest, common_util.GenerateRandomString())
success, err := digestLock.Require()
func (h *sizeQuotaHandler) getInterceptor(req *http.Request) (interceptor.Interceptor, error) {
for _, builder := range h.builders {
interceptor, err := builder.Build(req)
if err != nil {
return nil, err
}
if !success {
return nil, fmt.Errorf("unable to lock digest: %s, %s ", blobInfo.Repository, blobInfo.Digest)
}
return digestLock, nil
}
func tryFreeBlob(blobInfo *util.BlobInfo) {
_, err := blobInfo.DigestLock.Free()
if err != nil {
log.Warningf("Error to unlock digest: %s,%s with error: %v ", blobInfo.Repository, blobInfo.Digest, err)
if interceptor != nil {
return interceptor, nil
}
}
}
func rmBlobUploadUUID(conn redis.Conn, UUID string) (bool, error) {
exists, err := redis.Int(conn.Do("EXISTS", UUID))
if err != nil {
return false, err
}
if exists == 1 {
res, err := redis.Int(conn.Do("DEL", UUID))
if err != nil {
return false, err
}
return res == 1, nil
}
return true, nil
}
// put blob path: /v2/<name>/blobs/uploads/<uuid>
func getUUID(path string) string {
if !strings.Contains(path, "/") {
log.Infof("it's not a valid path string: %s", path)
return ""
}
strs := strings.Split(path, "/")
return strs[len(strs)-1]
return nil, nil
}

View File

@ -15,163 +15,661 @@
package sizequota
import (
"context"
"bytes"
"encoding/json"
"fmt"
"github.com/garyburd/redigo/redis"
utilstest "github.com/goharbor/harbor/src/common/utils/test"
"github.com/goharbor/harbor/src/core/middlewares/util"
"github.com/stretchr/testify/assert"
"math/rand"
"net/http"
"net/http/httptest"
"os"
"strconv"
"sync"
"testing"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest"
"github.com/docker/distribution/manifest/schema2"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/core/middlewares/countquota"
"github.com/goharbor/harbor/src/core/middlewares/util"
"github.com/goharbor/harbor/src/pkg/types"
"github.com/opencontainers/go-digest"
"github.com/stretchr/testify/suite"
)
const testingRedisHost = "REDIS_HOST"
func init() {
rand.Seed(time.Now().UnixNano())
}
func genUUID() string {
b := make([]byte, 16)
if _, err := rand.Read(b); err != nil {
return ""
}
return fmt.Sprintf("%X-%X-%X-%X-%X", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
}
func getProjectCountUsage(projectID int64) (int64, error) {
usage := models.QuotaUsage{Reference: "project", ReferenceID: fmt.Sprintf("%d", projectID)}
err := dao.GetOrmer().Read(&usage, "reference", "reference_id")
if err != nil {
return 0, err
}
used, err := types.NewResourceList(usage.Used)
if err != nil {
return 0, err
}
return used[types.ResourceCount], nil
}
func getProjectStorageUsage(projectID int64) (int64, error) {
usage := models.QuotaUsage{Reference: "project", ReferenceID: fmt.Sprintf("%d", projectID)}
err := dao.GetOrmer().Read(&usage, "reference", "reference_id")
if err != nil {
return 0, err
}
used, err := types.NewResourceList(usage.Used)
if err != nil {
return 0, err
}
return used[types.ResourceStorage], nil
}
func randomString(n int) string {
const letterBytes = "abcdefghijklmnopqrstuvwxyz"
b := make([]byte, n)
for i := range b {
b[i] = letterBytes[rand.Intn(len(letterBytes))]
}
return string(b)
}
func makeManifest(configSize int64, layerSizes []int64) schema2.Manifest {
manifest := schema2.Manifest{
Versioned: manifest.Versioned{SchemaVersion: 2, MediaType: schema2.MediaTypeManifest},
Config: distribution.Descriptor{
MediaType: schema2.MediaTypeImageConfig,
Size: configSize,
Digest: digest.FromString(randomString(15)),
},
}
for _, size := range layerSizes {
manifest.Layers = append(manifest.Layers, distribution.Descriptor{
MediaType: schema2.MediaTypeLayer,
Size: size,
Digest: digest.FromString(randomString(15)),
})
}
return manifest
}
func manifestWithAdditionalLayers(raw schema2.Manifest, layerSizes []int64) schema2.Manifest {
var manifest schema2.Manifest
manifest.Versioned = raw.Versioned
manifest.Config = raw.Config
manifest.Layers = append(manifest.Layers, raw.Layers...)
for _, size := range layerSizes {
manifest.Layers = append(manifest.Layers, distribution.Descriptor{
MediaType: schema2.MediaTypeLayer,
Size: size,
Digest: digest.FromString(randomString(15)),
})
}
return manifest
}
func digestOfManifest(manifest schema2.Manifest) string {
bytes, _ := json.Marshal(manifest)
return digest.FromBytes(bytes).String()
}
func sizeOfManifest(manifest schema2.Manifest) int64 {
bytes, _ := json.Marshal(manifest)
return int64(len(bytes))
}
func sizeOfImage(manifest schema2.Manifest) int64 {
totalSizeOfLayers := manifest.Config.Size
for _, layer := range manifest.Layers {
totalSizeOfLayers += layer.Size
}
return sizeOfManifest(manifest) + totalSizeOfLayers
}
func doHandle(req *http.Request, next ...http.HandlerFunc) int {
rr := httptest.NewRecorder()
var n http.HandlerFunc
if len(next) > 0 {
n = next[0]
} else {
n = func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(http.StatusCreated)
}
}
h := New(http.HandlerFunc(n))
h.ServeHTTP(util.NewCustomResponseWriter(rr), req)
return rr.Code
}
func patchBlobUpload(projectName, name, uuid, blobDigest string, chunkSize int64) {
repository := fmt.Sprintf("%s/%s", projectName, name)
url := fmt.Sprintf("/v2/%s/blobs/uploads/%s?digest=%s", repository, uuid, blobDigest)
req, _ := http.NewRequest(http.MethodPatch, url, nil)
doHandle(req, func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(http.StatusAccepted)
w.Header().Add("Range", fmt.Sprintf("0-%d", chunkSize-1))
})
}
func putBlobUpload(projectName, name, uuid, blobDigest string, blobSize ...int64) {
repository := fmt.Sprintf("%s/%s", projectName, name)
url := fmt.Sprintf("/v2/%s/blobs/uploads/%s?digest=%s", repository, uuid, blobDigest)
req, _ := http.NewRequest(http.MethodPut, url, nil)
if len(blobSize) > 0 {
req.Header.Add("Content-Length", strconv.FormatInt(blobSize[0], 10))
}
doHandle(req, func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(http.StatusCreated)
})
}
func mountBlob(projectName, name, blobDigest, fromRepository string) {
repository := fmt.Sprintf("%s/%s", projectName, name)
url := fmt.Sprintf("/v2/%s/blobs/uploads/?mount=%s&from=%s", repository, blobDigest, fromRepository)
req, _ := http.NewRequest(http.MethodPost, url, nil)
doHandle(req, func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(http.StatusCreated)
})
}
func deleteManifest(projectName, name, digest string, accepted ...func() bool) {
repository := fmt.Sprintf("%s/%s", projectName, name)
url := fmt.Sprintf("/v2/%s/manifests/%s", repository, digest)
req, _ := http.NewRequest(http.MethodDelete, url, nil)
next := countquota.New(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if len(accepted) > 0 {
if accepted[0]() {
w.WriteHeader(http.StatusAccepted)
} else {
w.WriteHeader(http.StatusNotFound)
}
return
}
w.WriteHeader(http.StatusAccepted)
}))
rr := httptest.NewRecorder()
h := New(next)
h.ServeHTTP(util.NewCustomResponseWriter(rr), req)
}
func putManifest(projectName, name, tag string, manifest schema2.Manifest) {
repository := fmt.Sprintf("%s/%s", projectName, name)
buf, _ := json.Marshal(manifest)
url := fmt.Sprintf("/v2/%s/manifests/%s", repository, tag)
req, _ := http.NewRequest(http.MethodPut, url, bytes.NewReader(buf))
req.Header.Add("Content-Type", manifest.MediaType)
next := countquota.New(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(http.StatusCreated)
}))
rr := httptest.NewRecorder()
h := New(next)
h.ServeHTTP(util.NewCustomResponseWriter(rr), req)
}
func pushImage(projectName, name, tag string, manifest schema2.Manifest) {
putBlobUpload(projectName, name, genUUID(), manifest.Config.Digest.String(), manifest.Config.Size)
for _, layer := range manifest.Layers {
putBlobUpload(projectName, name, genUUID(), layer.Digest.String(), layer.Size)
}
putManifest(projectName, name, tag, manifest)
}
func withProject(f func(int64, string)) {
projectName := randomString(5)
projectID, err := dao.AddProject(models.Project{
Name: projectName,
OwnerID: 1,
})
if err != nil {
panic(err)
}
defer func() {
dao.DeleteProject(projectID)
}()
f(projectID, projectName)
}
type HandlerSuite struct {
suite.Suite
}
func (suite *HandlerSuite) checkCountUsage(expected, projectID int64) {
count, err := getProjectCountUsage(projectID)
suite.Nil(err, fmt.Sprintf("Failed to get count usage of project %d, error: %v", projectID, err))
suite.Equal(expected, count, "Failed to check count usage for project %d", projectID)
}
func (suite *HandlerSuite) checkStorageUsage(expected, projectID int64) {
value, err := getProjectStorageUsage(projectID)
suite.Nil(err, fmt.Sprintf("Failed to get storage usage of project %d, error: %v", projectID, err))
suite.Equal(expected, value, "Failed to check storage usage for project %d", projectID)
}
func (suite *HandlerSuite) TearDownTest() {
for _, table := range []string{
"artifact", "blob",
"artifact_blob", "project_blob",
"quota", "quota_usage",
} {
dao.ClearTable(table)
}
}
func (suite *HandlerSuite) TestPatchBlobUpload() {
withProject(func(projectID int64, projectName string) {
uuid := genUUID()
blobDigest := digest.FromString(randomString(15)).String()
patchBlobUpload(projectName, "photon", uuid, blobDigest, 1024)
size, err := getUploadedBlobSize(uuid)
suite.Nil(err)
suite.Equal(int64(1024), size)
})
}
func (suite *HandlerSuite) TestPutBlobUpload() {
withProject(func(projectID int64, projectName string) {
uuid := genUUID()
blobDigest := digest.FromString(randomString(15)).String()
putBlobUpload(projectName, "photon", uuid, blobDigest, 1024)
suite.checkStorageUsage(1024, projectID)
blob, err := dao.GetBlob(blobDigest)
suite.Nil(err)
suite.Equal(int64(1024), blob.Size)
})
}
func (suite *HandlerSuite) TestPutBlobUploadWithPatch() {
withProject(func(projectID int64, projectName string) {
uuid := genUUID()
blobDigest := digest.FromString(randomString(15)).String()
patchBlobUpload(projectName, "photon", uuid, blobDigest, 1024)
putBlobUpload(projectName, "photon", uuid, blobDigest)
suite.checkStorageUsage(1024, projectID)
blob, err := dao.GetBlob(blobDigest)
suite.Nil(err)
suite.Equal(int64(1024), blob.Size)
})
}
func (suite *HandlerSuite) TestMountBlob() {
withProject(func(projectID int64, projectName string) {
blobDigest := digest.FromString(randomString(15)).String()
putBlobUpload(projectName, "photon", genUUID(), blobDigest, 1024)
suite.checkStorageUsage(1024, projectID)
repository := fmt.Sprintf("%s/%s", projectName, "photon")
withProject(func(projectID int64, projectName string) {
mountBlob(projectName, "harbor", blobDigest, repository)
suite.checkStorageUsage(1024, projectID)
})
})
}
func (suite *HandlerSuite) TestPutManifestCreated() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(100, []int64{100, 100})
putBlobUpload(projectName, "photon", genUUID(), manifest.Config.Digest.String(), manifest.Config.Size)
for _, layer := range manifest.Layers {
putBlobUpload(projectName, "photon", genUUID(), layer.Digest.String(), layer.Size)
}
putManifest(projectName, "photon", "latest", manifest)
suite.checkStorageUsage(int64(300+sizeOfManifest(manifest)), projectID)
})
}
func (suite *HandlerSuite) TestDeleteManifest() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
size := sizeOfImage(manifest)
pushImage(projectName, "photon", "latest", manifest)
suite.checkStorageUsage(size, projectID)
deleteManifest(projectName, "photon", digestOfManifest(manifest))
suite.checkStorageUsage(0, projectID)
})
}
func (suite *HandlerSuite) TestImageOverwrite() {
withProject(func(projectID int64, projectName string) {
manifest1 := makeManifest(1, []int64{2, 3, 4, 5})
size1 := sizeOfImage(manifest1)
pushImage(projectName, "photon", "latest", manifest1)
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(size1, projectID)
manifest2 := makeManifest(1, []int64{2, 3, 4, 5})
size2 := sizeOfImage(manifest2)
pushImage(projectName, "photon", "latest", manifest2)
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(size1+size2, projectID)
manifest3 := makeManifest(1, []int64{2, 3, 4, 5})
size3 := sizeOfImage(manifest2)
pushImage(projectName, "photon", "latest", manifest3)
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(size1+size2+size3, projectID)
})
}
func (suite *HandlerSuite) TestPushImageMultiTimes() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
size := sizeOfImage(manifest)
pushImage(projectName, "photon", "latest", manifest)
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(size, projectID)
pushImage(projectName, "photon", "latest", manifest)
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(size, projectID)
pushImage(projectName, "photon", "latest", manifest)
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(size, projectID)
})
}
func (suite *HandlerSuite) TestPushImageToSameRepository() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
size := sizeOfImage(manifest)
pushImage(projectName, "photon", "latest", manifest)
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(size, projectID)
pushImage(projectName, "photon", "dev", manifest)
suite.checkCountUsage(2, projectID)
suite.checkStorageUsage(size, projectID)
})
}
func (suite *HandlerSuite) TestPushImageToDifferentRepositories() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
size := sizeOfImage(manifest)
pushImage(projectName, "mysql", "latest", manifest)
suite.checkStorageUsage(size, projectID)
pushImage(projectName, "redis", "latest", manifest)
suite.checkStorageUsage(size+sizeOfManifest(manifest), projectID)
pushImage(projectName, "postgres", "latest", manifest)
suite.checkStorageUsage(size+2*sizeOfManifest(manifest), projectID)
})
}
func (suite *HandlerSuite) TestPushImageToDifferentProjects() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
size := sizeOfImage(manifest)
pushImage(projectName, "mysql", "latest", manifest)
suite.checkStorageUsage(size, projectID)
withProject(func(id int64, name string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
size := sizeOfImage(manifest)
pushImage(name, "mysql", "latest", manifest)
suite.checkStorageUsage(size, id)
suite.checkStorageUsage(size, projectID)
})
})
}
func (suite *HandlerSuite) TestDeleteManifestShareLayersInSameRepository() {
withProject(func(projectID int64, projectName string) {
manifest1 := makeManifest(1, []int64{2, 3, 4, 5})
size1 := sizeOfImage(manifest1)
pushImage(projectName, "mysql", "latest", manifest1)
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(size1, projectID)
manifest2 := manifestWithAdditionalLayers(manifest1, []int64{6, 7})
pushImage(projectName, "mysql", "dev", manifest2)
suite.checkCountUsage(2, projectID)
totalSize := size1 + sizeOfManifest(manifest2) + 6 + 7
suite.checkStorageUsage(totalSize, projectID)
deleteManifest(projectName, "mysql", digestOfManifest(manifest1))
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(totalSize-sizeOfManifest(manifest1), projectID)
})
}
func (suite *HandlerSuite) TestDeleteManifestShareLayersInDifferentRepositories() {
withProject(func(projectID int64, projectName string) {
manifest1 := makeManifest(1, []int64{2, 3, 4, 5})
size1 := sizeOfImage(manifest1)
pushImage(projectName, "mysql", "latest", manifest1)
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(size1, projectID)
pushImage(projectName, "mysql", "dev", manifest1)
suite.checkCountUsage(2, projectID)
suite.checkStorageUsage(size1, projectID)
manifest2 := manifestWithAdditionalLayers(manifest1, []int64{6, 7})
pushImage(projectName, "mariadb", "latest", manifest2)
suite.checkCountUsage(3, projectID)
totalSize := size1 + sizeOfManifest(manifest2) + 6 + 7
suite.checkStorageUsage(totalSize, projectID)
deleteManifest(projectName, "mysql", digestOfManifest(manifest1))
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(totalSize-sizeOfManifest(manifest1), projectID)
})
}
func (suite *HandlerSuite) TestDeleteManifestInSameRepository() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
size := sizeOfImage(manifest)
pushImage(projectName, "photon", "latest", manifest)
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(size, projectID)
pushImage(projectName, "photon", "dev", manifest)
suite.checkCountUsage(2, projectID)
suite.checkStorageUsage(size, projectID)
deleteManifest(projectName, "photon", digestOfManifest(manifest))
suite.checkCountUsage(0, projectID)
suite.checkStorageUsage(0, projectID)
})
}
func (suite *HandlerSuite) TestDeleteManifestInDifferentRepositories() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
size := sizeOfImage(manifest)
pushImage(projectName, "mysql", "latest", manifest)
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(size, projectID)
pushImage(projectName, "mysql", "5.6", manifest)
suite.checkCountUsage(2, projectID)
suite.checkStorageUsage(size, projectID)
pushImage(projectName, "redis", "latest", manifest)
suite.checkCountUsage(3, projectID)
suite.checkStorageUsage(size+sizeOfManifest(manifest), projectID)
deleteManifest(projectName, "redis", digestOfManifest(manifest))
suite.checkCountUsage(2, projectID)
suite.checkStorageUsage(size, projectID)
pushImage(projectName, "redis", "latest", manifest)
suite.checkCountUsage(3, projectID)
suite.checkStorageUsage(size+sizeOfManifest(manifest), projectID)
})
}
func (suite *HandlerSuite) TestDeleteManifestInDifferentProjects() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
size := sizeOfImage(manifest)
pushImage(projectName, "mysql", "latest", manifest)
suite.checkStorageUsage(size, projectID)
withProject(func(id int64, name string) {
pushImage(name, "mysql", "latest", manifest)
suite.checkStorageUsage(size, id)
suite.checkStorageUsage(size, projectID)
deleteManifest(projectName, "mysql", digestOfManifest(manifest))
suite.checkCountUsage(0, projectID)
suite.checkStorageUsage(0, projectID)
})
})
}
func (suite *HandlerSuite) TestPushDeletePush() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
size := sizeOfImage(manifest)
pushImage(projectName, "photon", "latest", manifest)
suite.checkStorageUsage(size, projectID)
deleteManifest(projectName, "photon", digestOfManifest(manifest))
suite.checkStorageUsage(0, projectID)
pushImage(projectName, "photon", "latest", manifest)
suite.checkStorageUsage(size, projectID)
})
}
func (suite *HandlerSuite) TestPushImageRace() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
size := sizeOfImage(manifest)
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
wg.Add(1)
go func() {
defer wg.Done()
pushImage(projectName, "photon", "latest", manifest)
}()
}
wg.Wait()
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(size, projectID)
})
}
func (suite *HandlerSuite) TestDeleteImageRace() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
pushImage(projectName, "photon", "latest", manifest)
count := 100
size := sizeOfImage(manifest)
for i := 0; i < count; i++ {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
pushImage(projectName, "mysql", fmt.Sprintf("tag%d", i), manifest)
size += sizeOfImage(manifest)
}
suite.checkCountUsage(int64(count+1), projectID)
suite.checkStorageUsage(size, projectID)
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
deleteManifest(projectName, "photon", digestOfManifest(manifest), func() bool {
return i == 0
})
}(i)
}
wg.Wait()
suite.checkCountUsage(int64(count), projectID)
suite.checkStorageUsage(size-sizeOfImage(manifest), projectID)
})
}
func TestMain(m *testing.M) {
utilstest.InitDatabaseFromEnv()
rc := m.Run()
if rc != 0 {
os.Exit(rc)
dao.PrepareTestForPostgresSQL()
if result := m.Run(); result != 0 {
os.Exit(result)
}
}
func TestGetInteceptor(t *testing.T) {
assert := assert.New(t)
req1, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil)
res1 := getInteceptor(req1)
_, ok := res1.(*PutManifestInterceptor)
assert.True(ok)
req2, _ := http.NewRequest("POST", "http://127.0.0.1:5000/v2/library/ubuntu/TestGetInteceptor/14.04", nil)
res2 := getInteceptor(req2)
assert.Nil(res2)
}
func TestRequireQuota(t *testing.T) {
con, err := redis.Dial(
"tcp",
fmt.Sprintf("%s:%d", getRedisHost(), 6379),
redis.DialConnectTimeout(30*time.Second),
redis.DialReadTimeout(time.Minute+10*time.Second),
redis.DialWriteTimeout(10*time.Second),
)
assert.Nil(t, err)
defer con.Close()
assert := assert.New(t)
blobInfo := &util.BlobInfo{
Repository: "library/test",
Digest: "sha256:abcdf123sdfefeg1246",
}
err = requireQuota(con, blobInfo)
assert.Nil(err)
}
func TestGetUUID(t *testing.T) {
str1 := "test/1/2/uuid-1"
uuid1 := getUUID(str1)
assert.Equal(t, uuid1, "uuid-1")
// not a valid path, just return empty
str2 := "test-1-2-uuid-2"
uuid2 := getUUID(str2)
assert.Equal(t, uuid2, "")
}
func TestAddRmUUID(t *testing.T) {
con, err := redis.Dial(
"tcp",
fmt.Sprintf("%s:%d", getRedisHost(), 6379),
redis.DialConnectTimeout(30*time.Second),
redis.DialReadTimeout(time.Minute+10*time.Second),
redis.DialWriteTimeout(10*time.Second),
)
assert.Nil(t, err)
defer con.Close()
rmfail, err := rmBlobUploadUUID(con, "test-rm-uuid")
assert.Nil(t, err)
assert.True(t, rmfail)
success, err := util.SetBunkSize(con, "test-rm-uuid", 1000)
assert.Nil(t, err)
assert.True(t, success)
rmSuccess, err := rmBlobUploadUUID(con, "test-rm-uuid")
assert.Nil(t, err)
assert.True(t, rmSuccess)
}
func TestTryFreeLockBlob(t *testing.T) {
con, err := redis.Dial(
"tcp",
fmt.Sprintf("%s:%d", getRedisHost(), 6379),
redis.DialConnectTimeout(30*time.Second),
redis.DialReadTimeout(time.Minute+10*time.Second),
redis.DialWriteTimeout(10*time.Second),
)
assert.Nil(t, err)
defer con.Close()
blobInfo := util.BlobInfo{
Repository: "lock/test",
Digest: "sha256:abcdf123sdfefeg1246",
}
lock, err := tryLockBlob(con, &blobInfo)
assert.Nil(t, err)
blobInfo.DigestLock = lock
tryFreeBlob(&blobInfo)
}
func TestBlobCommon(t *testing.T) {
con, err := redis.Dial(
"tcp",
fmt.Sprintf("%s:%d", getRedisHost(), 6379),
redis.DialConnectTimeout(30*time.Second),
redis.DialReadTimeout(time.Minute+10*time.Second),
redis.DialWriteTimeout(10*time.Second),
)
assert.Nil(t, err)
defer con.Close()
req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil)
blobInfo := util.BlobInfo{
Repository: "TestBlobCommon/test",
Digest: "sha256:abcdf12345678sdfefeg1246",
ContentType: "ContentType",
Size: 101,
Exist: false,
}
rw := httptest.NewRecorder()
customResW := util.CustomResponseWriter{ResponseWriter: rw}
customResW.WriteHeader(201)
lock, err := tryLockBlob(con, &blobInfo)
assert.Nil(t, err)
blobInfo.DigestLock = lock
*req = *(req.WithContext(context.WithValue(req.Context(), util.BBInfokKey, &blobInfo)))
err = HandleBlobCommon(customResW, req)
assert.Nil(t, err)
}
func getRedisHost() string {
redisHost := os.Getenv(testingRedisHost)
if redisHost == "" {
redisHost = "127.0.0.1" // for local test
}
return redisHost
func TestRunHandlerSuite(t *testing.T) {
suite.Run(t, new(HandlerSuite))
}

View File

@ -1,69 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sizequota
import (
"context"
"fmt"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/middlewares/util"
"net/http"
"strings"
)
// MountBlobInterceptor ...
type MountBlobInterceptor struct {
blobInfo *util.BlobInfo
}
// NewMountBlobInterceptor ...
func NewMountBlobInterceptor(blobInfo *util.BlobInfo) *MountBlobInterceptor {
return &MountBlobInterceptor{
blobInfo: blobInfo,
}
}
// HandleRequest ...
func (mbi *MountBlobInterceptor) HandleRequest(req *http.Request) error {
tProjectID, err := util.GetProjectID(strings.Split(mbi.blobInfo.Repository, "/")[0])
if err != nil {
return fmt.Errorf("error occurred when to get target project: %d, %v", tProjectID, err)
}
blob, err := dao.GetBlob(mbi.blobInfo.Digest)
if err != nil {
return err
}
if blob == nil {
return fmt.Errorf("the blob in the mount request with digest: %s doesn't exist", mbi.blobInfo.Digest)
}
mbi.blobInfo.Size = blob.Size
con, err := util.GetRegRedisCon()
if err != nil {
return err
}
if err := requireQuota(con, mbi.blobInfo); err != nil {
return err
}
*req = *(req.WithContext(context.WithValue(req.Context(), util.BBInfokKey, mbi.blobInfo)))
return nil
}
// HandleResponse ...
func (mbi *MountBlobInterceptor) HandleResponse(rw util.CustomResponseWriter, req *http.Request) {
if err := HandleBlobCommon(rw, req); err != nil {
log.Error(err)
}
}

View File

@ -1,85 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sizequota
import (
"context"
"fmt"
"github.com/garyburd/redigo/redis"
"github.com/goharbor/harbor/src/core/middlewares/util"
"github.com/stretchr/testify/assert"
"net/http"
"net/http/httptest"
"testing"
"time"
)
func TestNewMountBlobInterceptor(t *testing.T) {
blobinfo := &util.BlobInfo{}
blobinfo.Repository = "TestNewMountBlobInterceptor/latest"
bi := NewMountBlobInterceptor(blobinfo)
assert.NotNil(t, bi)
}
func TestMountBlobHandleRequest(t *testing.T) {
blobInfo := util.BlobInfo{
Repository: "TestHandleRequest/test",
Digest: "sha256:TestHandleRequest1234",
ContentType: "ContentType",
Size: 101,
Exist: false,
}
req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil)
bi := NewMountBlobInterceptor(&blobInfo)
assert.NotNil(t, bi.HandleRequest(req))
}
func TestMountBlobHandleResponse(t *testing.T) {
con, err := redis.Dial(
"tcp",
fmt.Sprintf("%s:%d", getRedisHost(), 6379),
redis.DialConnectTimeout(30*time.Second),
redis.DialReadTimeout(time.Minute+10*time.Second),
redis.DialWriteTimeout(10*time.Second),
)
assert.Nil(t, err)
defer con.Close()
req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil)
blobInfo := util.BlobInfo{
Repository: "TestHandleResponse/test",
Digest: "sha256:TestHandleResponseabcdf12345678sdfefeg1246",
ContentType: "ContentType",
Size: 101,
Exist: false,
}
rw := httptest.NewRecorder()
customResW := util.CustomResponseWriter{ResponseWriter: rw}
customResW.WriteHeader(201)
lock, err := tryLockBlob(con, &blobInfo)
assert.Nil(t, err)
blobInfo.DigestLock = lock
*req = *(req.WithContext(context.WithValue(req.Context(), util.BBInfokKey, &blobInfo)))
bi := NewMountBlobInterceptor(&blobInfo)
assert.NotNil(t, bi)
bi.HandleResponse(customResW, req)
}

View File

@ -1,86 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sizequota
import (
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/middlewares/util"
"net/http"
"strconv"
"strings"
)
// PatchBlobInterceptor ...
type PatchBlobInterceptor struct {
}
// NewPatchBlobInterceptor ...
func NewPatchBlobInterceptor() *PatchBlobInterceptor {
return &PatchBlobInterceptor{}
}
// HandleRequest do nothing for patch blob, just let the request to proxy.
func (pbi *PatchBlobInterceptor) HandleRequest(req *http.Request) error {
return nil
}
// HandleResponse record the upload process with Range attribute, set it into redis with UUID as the key
func (pbi *PatchBlobInterceptor) HandleResponse(rw util.CustomResponseWriter, req *http.Request) {
if rw.Status() != http.StatusAccepted {
return
}
con, err := util.GetRegRedisCon()
if err != nil {
log.Error(err)
return
}
defer con.Close()
uuid := rw.Header().Get("Docker-Upload-UUID")
if uuid == "" {
log.Errorf("no UUID in the patch blob response, the request path %s ", req.URL.Path)
return
}
// Range: Range indicating the current progress of the upload.
// https://github.com/opencontainers/distribution-spec/blob/master/spec.md#get-blob-upload
patchRange := rw.Header().Get("Range")
if uuid == "" {
log.Errorf("no Range in the patch blob response, the request path %s ", req.URL.Path)
return
}
endRange := strings.Split(patchRange, "-")[1]
size, err := strconv.ParseInt(endRange, 10, 64)
// docker registry did '-1' in the response
if size > 0 {
size = size + 1
}
if err != nil {
log.Error(err)
return
}
success, err := util.SetBunkSize(con, uuid, size)
if err != nil {
log.Error(err)
return
}
if !success {
// ToDo discuss what to do here.
log.Warningf(" T_T: Fail to set bunk: %s size: %d in redis, it causes unable to set correct quota for the artifact.", uuid, size)
}
return
}

View File

@ -1,42 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sizequota
import (
"github.com/goharbor/harbor/src/core/middlewares/util"
"github.com/stretchr/testify/assert"
"net/http"
"net/http/httptest"
"testing"
)
func TestNewPatchBlobInterceptor(t *testing.T) {
bi := NewPatchBlobInterceptor()
assert.NotNil(t, bi)
}
func TestPatchBlobHandleRequest(t *testing.T) {
req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil)
bi := NewPatchBlobInterceptor()
assert.Nil(t, bi.HandleRequest(req))
}
func TestPatchBlobHandleResponse(t *testing.T) {
req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil)
rw := httptest.NewRecorder()
customResW := util.CustomResponseWriter{ResponseWriter: rw}
customResW.WriteHeader(400)
NewPatchBlobInterceptor().HandleResponse(customResW, req)
}

View File

@ -1,83 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sizequota
import (
"context"
"errors"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/middlewares/util"
"github.com/opencontainers/go-digest"
"net/http"
)
// PutBlobInterceptor ...
type PutBlobInterceptor struct {
blobInfo *util.BlobInfo
}
// NewPutBlobInterceptor ...
func NewPutBlobInterceptor(blobInfo *util.BlobInfo) *PutBlobInterceptor {
return &PutBlobInterceptor{
blobInfo: blobInfo,
}
}
// HandleRequest ...
func (pbi *PutBlobInterceptor) HandleRequest(req *http.Request) error {
// the redis connection will be closed in the put response.
con, err := util.GetRegRedisCon()
if err != nil {
return err
}
defer func() {
if pbi.blobInfo.UUID != "" {
_, err := rmBlobUploadUUID(con, pbi.blobInfo.UUID)
if err != nil {
log.Warningf("error occurred when remove UUID for blob, %v", err)
}
}
}()
dgstStr := req.FormValue("digest")
if dgstStr == "" {
return errors.New("blob digest missing")
}
dgst, err := digest.Parse(dgstStr)
if err != nil {
return errors.New("blob digest parsing failed")
}
pbi.blobInfo.Digest = dgst.String()
pbi.blobInfo.UUID = getUUID(req.URL.Path)
size, err := util.GetBlobSize(con, pbi.blobInfo.UUID)
if err != nil {
return err
}
pbi.blobInfo.Size = size
if err := requireQuota(con, pbi.blobInfo); err != nil {
return err
}
*req = *(req.WithContext(context.WithValue(req.Context(), util.BBInfokKey, pbi.blobInfo)))
return nil
}
// HandleResponse ...
func (pbi *PutBlobInterceptor) HandleResponse(rw util.CustomResponseWriter, req *http.Request) {
if err := HandleBlobCommon(rw, req); err != nil {
log.Error(err)
}
}

View File

@ -1,80 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sizequota
import (
"context"
"fmt"
"github.com/garyburd/redigo/redis"
"github.com/goharbor/harbor/src/core/middlewares/util"
"github.com/stretchr/testify/assert"
"net/http"
"net/http/httptest"
"testing"
"time"
)
func TestNewPutBlobInterceptor(t *testing.T) {
blobinfo := &util.BlobInfo{}
blobinfo.Repository = "TestNewPutBlobInterceptor/latest"
bi := NewPutBlobInterceptor(blobinfo)
assert.NotNil(t, bi)
}
func TestPutBlobHandleRequest(t *testing.T) {
req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil)
blobinfo := &util.BlobInfo{}
blobinfo.Repository = "TestPutBlobHandleRequest/latest"
bi := NewPutBlobInterceptor(blobinfo)
assert.NotNil(t, bi.HandleRequest(req))
}
func TestPutBlobHandleResponse(t *testing.T) {
con, err := redis.Dial(
"tcp",
fmt.Sprintf("%s:%d", getRedisHost(), 6379),
redis.DialConnectTimeout(30*time.Second),
redis.DialReadTimeout(time.Minute+10*time.Second),
redis.DialWriteTimeout(10*time.Second),
)
assert.Nil(t, err)
defer con.Close()
req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil)
blobInfo := util.BlobInfo{
Repository: "TestPutBlobHandleResponse/test",
Digest: "sha256:TestPutBlobHandleResponseabcdf12345678sdfefeg1246",
ContentType: "ContentType",
Size: 101,
Exist: false,
}
rw := httptest.NewRecorder()
customResW := util.CustomResponseWriter{ResponseWriter: rw}
customResW.WriteHeader(201)
lock, err := tryLockBlob(con, &blobInfo)
assert.Nil(t, err)
blobInfo.DigestLock = lock
*req = *(req.WithContext(context.WithValue(req.Context(), util.BBInfokKey, &blobInfo)))
bi := NewPutBlobInterceptor(&blobInfo)
assert.NotNil(t, bi)
bi.HandleResponse(customResW, req)
}

View File

@ -1,102 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sizequota
import (
"bytes"
"context"
"fmt"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/middlewares/util"
"io/ioutil"
"net/http"
"strings"
)
// PutManifestInterceptor ...
type PutManifestInterceptor struct {
blobInfo *util.BlobInfo
mfInfo *util.MfInfo
}
// NewPutManifestInterceptor ...
func NewPutManifestInterceptor(blobInfo *util.BlobInfo, mfInfo *util.MfInfo) *PutManifestInterceptor {
return &PutManifestInterceptor{
blobInfo: blobInfo,
mfInfo: mfInfo,
}
}
// HandleRequest ...
func (pmi *PutManifestInterceptor) HandleRequest(req *http.Request) error {
mediaType := req.Header.Get("Content-Type")
if mediaType == schema1.MediaTypeManifest ||
mediaType == schema1.MediaTypeSignedManifest ||
mediaType == schema2.MediaTypeManifest {
con, err := util.GetRegRedisCon()
if err != nil {
log.Infof("failed to get registry redis connection, %v", err)
return err
}
data, err := ioutil.ReadAll(req.Body)
if err != nil {
log.Warningf("Error occurred when to copy manifest body %v", err)
return err
}
req.Body = ioutil.NopCloser(bytes.NewBuffer(data))
manifest, desc, err := distribution.UnmarshalManifest(mediaType, data)
if err != nil {
log.Warningf("Error occurred when to Unmarshal Manifest %v", err)
return err
}
projectID, err := util.GetProjectID(strings.Split(pmi.mfInfo.Repository, "/")[0])
if err != nil {
log.Warningf("Error occurred when to get project ID %v", err)
return err
}
pmi.mfInfo.ProjectID = projectID
pmi.mfInfo.Refrerence = manifest.References()
pmi.mfInfo.Digest = desc.Digest.String()
pmi.blobInfo.ProjectID = projectID
pmi.blobInfo.Digest = desc.Digest.String()
pmi.blobInfo.Size = desc.Size
pmi.blobInfo.ContentType = mediaType
if err := requireQuota(con, pmi.blobInfo); err != nil {
return err
}
*req = *(req.WithContext(context.WithValue(req.Context(), util.MFInfokKey, pmi.mfInfo)))
*req = *(req.WithContext(context.WithValue(req.Context(), util.BBInfokKey, pmi.blobInfo)))
return nil
}
return fmt.Errorf("unsupported content type for manifest: %s", mediaType)
}
// HandleResponse ...
func (pmi *PutManifestInterceptor) HandleResponse(rw util.CustomResponseWriter, req *http.Request) {
if err := HandleBlobCommon(rw, req); err != nil {
log.Error(err)
return
}
}

View File

@ -1,92 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sizequota
import (
"context"
"fmt"
"github.com/garyburd/redigo/redis"
"github.com/goharbor/harbor/src/core/middlewares/util"
"github.com/stretchr/testify/assert"
"net/http"
"net/http/httptest"
"testing"
"time"
)
func TestNewPutManifestInterceptor(t *testing.T) {
blobinfo := &util.BlobInfo{}
blobinfo.Repository = "TestNewPutManifestInterceptor/latest"
mfinfo := &util.MfInfo{
Repository: "TestNewPutManifestInterceptor",
}
mi := NewPutManifestInterceptor(blobinfo, mfinfo)
assert.NotNil(t, mi)
}
func TestPutManifestHandleRequest(t *testing.T) {
req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil)
blobinfo := &util.BlobInfo{}
blobinfo.Repository = "TestPutManifestHandleRequest/latest"
mfinfo := &util.MfInfo{
Repository: "TestPutManifestHandleRequest",
}
mi := NewPutManifestInterceptor(blobinfo, mfinfo)
assert.NotNil(t, mi.HandleRequest(req))
}
func TestPutManifestHandleResponse(t *testing.T) {
con, err := redis.Dial(
"tcp",
fmt.Sprintf("%s:%d", getRedisHost(), 6379),
redis.DialConnectTimeout(30*time.Second),
redis.DialReadTimeout(time.Minute+10*time.Second),
redis.DialWriteTimeout(10*time.Second),
)
assert.Nil(t, err)
defer con.Close()
req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil)
blobInfo := util.BlobInfo{
Repository: "TestPutManifestandleResponse/test",
Digest: "sha256:TestPutManifestandleResponseabcdf12345678sdfefeg1246",
ContentType: "ContentType",
Size: 101,
Exist: false,
}
mfinfo := util.MfInfo{
Repository: "TestPutManifestandleResponse",
}
rw := httptest.NewRecorder()
customResW := util.CustomResponseWriter{ResponseWriter: rw}
customResW.WriteHeader(201)
lock, err := tryLockBlob(con, &blobInfo)
assert.Nil(t, err)
blobInfo.DigestLock = lock
*req = *(req.WithContext(context.WithValue(req.Context(), util.BBInfokKey, &blobInfo)))
bi := NewPutManifestInterceptor(&blobInfo, &mfinfo)
assert.NotNil(t, bi)
bi.HandleResponse(customResW, req)
}

View File

@ -0,0 +1,330 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sizequota
import (
"errors"
"fmt"
"net/http"
"regexp"
"strconv"
"strings"
"time"
"github.com/garyburd/redigo/redis"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils"
"github.com/goharbor/harbor/src/core/middlewares/util"
"github.com/goharbor/harbor/src/pkg/types"
"github.com/opencontainers/go-digest"
)
var (
blobUploadURLRe = regexp.MustCompile(`^/v2/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)+)blobs/uploads/([a-zA-Z0-9-_.=]+)/?$`)
initiateBlobUploadURLRe = regexp.MustCompile(`^/v2/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)+)blobs/uploads/?$`)
)
// parseUploadedBlobSize parse the blob stream upload response and return the size blob uploaded
func parseUploadedBlobSize(w http.ResponseWriter) (int64, error) {
// Range: Range indicating the current progress of the upload.
// https://github.com/opencontainers/distribution-spec/blob/master/spec.md#get-blob-upload
r := w.Header().Get("Range")
end := strings.Split(r, "-")[1]
size, err := strconv.ParseInt(end, 10, 64)
if err != nil {
return 0, err
}
// docker registry did '-1' in the response
if size > 0 {
size = size + 1
}
return size, nil
}
// setUploadedBlobSize update the size of stream upload blob
func setUploadedBlobSize(uuid string, size int64) (bool, error) {
conn, err := util.GetRegRedisCon()
if err != nil {
return false, err
}
defer conn.Close()
key := fmt.Sprintf("upload:%s:size", uuid)
reply, err := redis.String(conn.Do("SET", key, size))
if err != nil {
return false, err
}
return reply == "OK", nil
}
// getUploadedBlobSize returns the size of stream upload blob
func getUploadedBlobSize(uuid string) (int64, error) {
conn, err := util.GetRegRedisCon()
if err != nil {
return 0, err
}
defer conn.Close()
key := fmt.Sprintf("upload:%s:size", uuid)
size, err := redis.Int64(conn.Do("GET", key))
if err != nil {
return 0, err
}
return size, nil
}
// parseBlobSize returns blob size from blob upload complete request
func parseBlobSize(req *http.Request, uuid string) (int64, error) {
size, err := strconv.ParseInt(req.Header.Get("Content-Length"), 10, 64)
if err == nil && size != 0 {
return size, nil
}
return getUploadedBlobSize(uuid)
}
// match returns true if request method equal method and path match re
func match(req *http.Request, method string, re *regexp.Regexp) bool {
return req.Method == method && re.MatchString(req.URL.Path)
}
// parseBlobInfoFromComplete returns blob info from blob upload complete request
func parseBlobInfoFromComplete(req *http.Request) (*util.BlobInfo, error) {
if !match(req, http.MethodPut, blobUploadURLRe) {
return nil, fmt.Errorf("not match url %s for blob upload complete", req.URL.Path)
}
s := blobUploadURLRe.FindStringSubmatch(req.URL.Path)
repository, uuid := s[1][:len(s[1])-1], s[2]
projectName, _ := utils.ParseRepository(repository)
project, err := dao.GetProjectByName(projectName)
if err != nil {
return nil, fmt.Errorf("failed to get project %s, error: %v", projectName, err)
}
if project == nil {
return nil, fmt.Errorf("project %s not found", projectName)
}
dgt, err := digest.Parse(req.FormValue("digest"))
if err != nil {
return nil, fmt.Errorf("blob digest invalid for upload %s", uuid)
}
size, err := parseBlobSize(req, uuid)
if err != nil {
return nil, fmt.Errorf("failed to get content length of blob upload %s, error: %v", uuid, err)
}
return &util.BlobInfo{
ProjectID: project.ProjectID,
Repository: repository,
Digest: dgt.String(),
Size: size,
}, nil
}
// parseBlobInfoFromManifest returns blob info from put the manifest request
func parseBlobInfoFromManifest(req *http.Request) (*util.BlobInfo, error) {
info, ok := util.ManifestInfoFromContext(req.Context())
if !ok {
manifest, err := util.ParseManifestInfo(req)
if err != nil {
return nil, err
}
info = manifest
// replace the request with manifest info
*req = *(req.WithContext(util.NewManifestInfoContext(req.Context(), info)))
}
return &util.BlobInfo{
ProjectID: info.ProjectID,
Repository: info.Repository,
Digest: info.Descriptor.Digest.String(),
Size: info.Descriptor.Size,
ContentType: info.Descriptor.MediaType,
}, nil
}
// parseBlobInfoFromMount returns blob info from blob mount request
func parseBlobInfoFromMount(req *http.Request) (*util.BlobInfo, error) {
if !match(req, http.MethodPost, initiateBlobUploadURLRe) {
return nil, fmt.Errorf("not match url %s for mount blob", req.URL.Path)
}
if req.FormValue("mount") == "" || req.FormValue("from") == "" {
return nil, fmt.Errorf("not match url %s for mount blob", req.URL.Path)
}
dgt, err := digest.Parse(req.FormValue("mount"))
if err != nil {
return nil, errors.New("mount must be digest")
}
s := initiateBlobUploadURLRe.FindStringSubmatch(req.URL.Path)
repository := strings.TrimSuffix(s[1], "/")
projectName, _ := utils.ParseRepository(repository)
project, err := dao.GetProjectByName(projectName)
if err != nil {
return nil, fmt.Errorf("failed to get project %s, error: %v", projectName, err)
}
if project == nil {
return nil, fmt.Errorf("project %s not found", projectName)
}
blob, err := dao.GetBlob(dgt.String())
if err != nil {
return nil, fmt.Errorf("failed to get blob %s, error: %v", dgt.String(), err)
}
if blob == nil {
return nil, fmt.Errorf("the blob in the mount request with digest: %s doesn't exist", dgt.String())
}
return &util.BlobInfo{
ProjectID: project.ProjectID,
Repository: repository,
Digest: dgt.String(),
Size: blob.Size,
}, nil
}
// getBlobInfoParser return parse blob info function for request
// returns parseBlobInfoFromComplete when request match PUT /v2/<name>/blobs/uploads/<uuid>?digest=<digest>
// returns parseBlobInfoFromMount when request match POST /v2/<name>/blobs/uploads/?mount=<digest>&from=<repository name>
func getBlobInfoParser(req *http.Request) func(*http.Request) (*util.BlobInfo, error) {
if match(req, http.MethodPut, blobUploadURLRe) {
if req.FormValue("digest") != "" {
return parseBlobInfoFromComplete
}
}
if match(req, http.MethodPost, initiateBlobUploadURLRe) {
if req.FormValue("mount") != "" && req.FormValue("from") != "" {
return parseBlobInfoFromMount
}
}
return nil
}
// computeResourcesForBlob returns storage required for blob, no storage required if blob exists in project
func computeResourcesForBlob(req *http.Request) (types.ResourceList, error) {
info, ok := util.BlobInfoFromContext(req.Context())
if !ok {
return nil, errors.New("blob info missing")
}
exist, err := info.BlobExists()
if err != nil {
return nil, err
}
if exist {
return nil, nil
}
return types.ResourceList{types.ResourceStorage: info.Size}, nil
}
// computeResourcesForManifestCreation returns storage resource required for manifest
// no storage required if manifest exists in project
// the sum size of manifest itself and blobs not in project will return if manifest not exists in project
func computeResourcesForManifestCreation(req *http.Request) (types.ResourceList, error) {
info, ok := util.ManifestInfoFromContext(req.Context())
if !ok {
return nil, errors.New("manifest info missing")
}
exist, err := info.ManifestExists()
if err != nil {
return nil, err
}
// manifest exist in project, so no storage quota required
if exist {
return nil, nil
}
blobs, err := info.GetBlobsNotInProject()
if err != nil {
return nil, err
}
size := info.Descriptor.Size
for _, blob := range blobs {
size += blob.Size
}
return types.ResourceList{types.ResourceStorage: size}, nil
}
// computeResourcesForManifestDeletion returns storage resource will be released when manifest deleted
// then result will be the sum of manifest itself and blobs which will not be used by other manifests of project
func computeResourcesForManifestDeletion(req *http.Request) (types.ResourceList, error) {
info, ok := util.ManifestInfoFromContext(req.Context())
if !ok {
return nil, errors.New("manifest info missing")
}
blobs, err := dao.GetExclusiveBlobs(info.ProjectID, info.Repository, info.Digest)
if err != nil {
return nil, err
}
info.ExclusiveBlobs = blobs
blob, err := dao.GetBlob(info.Digest)
if err != nil {
return nil, err
}
// manifest size will always be released
size := blob.Size
for _, blob := range blobs {
size = size + blob.Size
}
return types.ResourceList{types.ResourceStorage: size}, nil
}
// syncBlobInfoToProject create the blob and add it to project
func syncBlobInfoToProject(info *util.BlobInfo) error {
_, blob, err := dao.GetOrCreateBlob(&models.Blob{
Digest: info.Digest,
ContentType: info.ContentType,
Size: info.Size,
CreationTime: time.Now(),
})
if err != nil {
return err
}
if _, err := dao.AddBlobToProject(blob.ID, info.ProjectID); err != nil {
return err
}
return nil
}

View File

@ -15,51 +15,49 @@
package util
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
"github.com/garyburd/redigo/redis"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/quota"
"github.com/goharbor/harbor/src/common/utils"
"github.com/goharbor/harbor/src/common/utils/clair"
"github.com/goharbor/harbor/src/common/utils/log"
common_redis "github.com/goharbor/harbor/src/common/utils/redis"
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/core/promgr"
"github.com/goharbor/harbor/src/pkg/scan/whitelist"
"github.com/opencontainers/go-digest"
)
type contextKey string
// ErrRequireQuota ...
var ErrRequireQuota = errors.New("cannot get quota on project for request")
const (
manifestURLPattern = `^/v2/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)+)manifests/([\w][\w.:-]{0,127})`
blobURLPattern = `^/v2/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)+)blobs/uploads/`
chartVersionInfoKey = contextKey("ChartVersionInfo")
// ImageInfoCtxKey the context key for image information
ImageInfoCtxKey = contextKey("ImageInfo")
// TokenUsername ...
// TODO: temp solution, remove after vmware/harbor#2242 is resolved.
TokenUsername = "harbor-core"
// MFInfokKey the context key for image tag redis lock
MFInfokKey = contextKey("ManifestInfo")
// BBInfokKey the context key for image tag redis lock
BBInfokKey = contextKey("BlobInfo")
// blobInfoKey the context key for blob info
blobInfoKey = contextKey("BlobInfo")
// chartVersionInfoKey the context key for chart version info
chartVersionInfoKey = contextKey("ChartVersionInfo")
// manifestInfoKey the context key for manifest info
manifestInfoKey = contextKey("ManifestInfo")
// DialConnectionTimeout ...
DialConnectionTimeout = 30 * time.Second
@ -69,6 +67,10 @@ const (
DialWriteTimeout = 10 * time.Second
)
var (
manifestURLRe = regexp.MustCompile(`^/v2/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)+)manifests/([\w][\w.:-]{0,127})`)
)
// ChartVersionInfo ...
type ChartVersionInfo struct {
ProjectID int64
@ -77,6 +79,13 @@ type ChartVersionInfo struct {
Version string
}
// MutexKey returns mutex key of the chart version
func (info *ChartVersionInfo) MutexKey(suffix ...string) string {
a := []string{"quota", info.Namespace, "chart", info.ChartName, "version", info.Version}
return strings.Join(append(a, suffix...), ":")
}
// ImageInfo ...
type ImageInfo struct {
Repository string
@ -87,46 +96,147 @@ type ImageInfo struct {
// BlobInfo ...
type BlobInfo struct {
UUID string
ProjectID int64
ContentType string
Size int64
Repository string
Tag string
// Exist is to index the existing of the manifest in DB. If false, it's an new image for uploading.
Exist bool
Digest string
DigestLock *common_redis.Mutex
// Quota is the resource applied for the manifest upload request.
Quota *quota.ResourceList
blobExist bool
blobExistErr error
blobExistOnce sync.Once
}
// MfInfo ...
type MfInfo struct {
// BlobExists returns true when blob exists in the project
func (info *BlobInfo) BlobExists() (bool, error) {
info.blobExistOnce.Do(func() {
info.blobExist, info.blobExistErr = dao.HasBlobInProject(info.ProjectID, info.Digest)
})
return info.blobExist, info.blobExistErr
}
// MutexKey returns mutex key of the blob
func (info *BlobInfo) MutexKey(suffix ...string) string {
projectName, _ := utils.ParseRepository(info.Repository)
a := []string{"quota", projectName, "blob", info.Digest}
return strings.Join(append(a, suffix...), ":")
}
// ManifestInfo ...
type ManifestInfo struct {
// basic information of a manifest
ProjectID int64
Repository string
Tag string
Digest string
// Exist is to index the existing of the manifest in DB. If false, it's an new image for uploading.
Exist bool
References []distribution.Descriptor
Descriptor distribution.Descriptor
// ArtifactID is the ID of the artifact which query by repository and tag
ArtifactID int64
// manifestExist is to index the existing of the manifest in DB by (repository, digest)
manifestExist bool
manifestExistErr error
manifestExistOnce sync.Once
// DigestChanged true means the manifest exists but digest is changed.
// Probably it's a new image with existing repo/tag name or overwrite.
DigestChanged bool
// artifact the artifact indexed by (repository, tag) in DB
artifact *models.Artifact
artifactErr error
artifactOnce sync.Once
// used to block multiple push on same image.
TagLock *common_redis.Mutex
Refrerence []distribution.Descriptor
// ExclusiveBlobs include the blobs that belong to the manifest only
// and exclude the blobs that shared by other manifests in the same repo(project/repository).
ExclusiveBlobs []*models.Blob
}
// Quota is the resource applied for the manifest upload request.
Quota *quota.ResourceList
// MutexKey returns mutex key of the manifest
func (info *ManifestInfo) MutexKey(suffix ...string) string {
projectName, _ := utils.ParseRepository(info.Repository)
var a []string
if info.Tag != "" {
// tag not empty happened in PUT /v2/<name>/manifests/<reference>
// lock by to tag to compute the count resource required by quota
a = []string{"quota", projectName, "manifest", info.Tag}
} else {
a = []string{"quota", projectName, "manifest", info.Digest}
}
return strings.Join(append(a, suffix...), ":")
}
// BlobMutexKey returns mutex key of the blob in manifest
func (info *ManifestInfo) BlobMutexKey(blob *models.Blob, suffix ...string) string {
projectName, _ := utils.ParseRepository(info.Repository)
a := []string{"quota", projectName, "blob", blob.Digest}
return strings.Join(append(a, suffix...), ":")
}
// GetBlobsNotInProject returns blobs of the manifest which not in the project
func (info *ManifestInfo) GetBlobsNotInProject() ([]*models.Blob, error) {
var digests []string
for _, reference := range info.References {
digests = append(digests, reference.Digest.String())
}
blobs, err := dao.GetBlobsNotInProject(info.ProjectID, digests...)
if err != nil {
return nil, err
}
return blobs, nil
}
func (info *ManifestInfo) fetchArtifact() (*models.Artifact, error) {
info.artifactOnce.Do(func() {
info.artifact, info.artifactErr = dao.GetArtifact(info.Repository, info.Tag)
})
return info.artifact, info.artifactErr
}
// IsNewTag returns true if the tag of the manifest not exists in project
func (info *ManifestInfo) IsNewTag() bool {
artifact, _ := info.fetchArtifact()
return artifact == nil
}
// Artifact returns artifact of the manifest
func (info *ManifestInfo) Artifact() *models.Artifact {
result := &models.Artifact{
PID: info.ProjectID,
Repo: info.Repository,
Tag: info.Tag,
Digest: info.Digest,
Kind: "Docker-Image",
}
if artifact, _ := info.fetchArtifact(); artifact != nil {
result.ID = artifact.ID
result.CreationTime = artifact.CreationTime
result.PushTime = time.Now()
}
return result
}
// ManifestExists returns true if manifest exist in repository
func (info *ManifestInfo) ManifestExists() (bool, error) {
info.manifestExistOnce.Do(func() {
total, err := dao.GetTotalOfArtifacts(&models.ArtifactQuery{
PID: info.ProjectID,
Repo: info.Repository,
Digest: info.Digest,
})
info.manifestExist = total > 0
info.manifestExistErr = err
})
return info.manifestExist, info.manifestExistErr
}
// JSONError wraps a concrete Code and Message, it's readable for docker deamon.
@ -156,12 +266,7 @@ func MarshalError(code, msg string) string {
// MatchManifestURL ...
func MatchManifestURL(req *http.Request) (bool, string, string) {
re, err := regexp.Compile(manifestURLPattern)
if err != nil {
log.Errorf("error to match manifest url, %v", err)
return false, "", ""
}
s := re.FindStringSubmatch(req.URL.Path)
s := manifestURLRe.FindStringSubmatch(req.URL.Path)
if len(s) == 3 {
s[1] = strings.TrimSuffix(s[1], "/")
return true, s[1], s[2]
@ -169,42 +274,6 @@ func MatchManifestURL(req *http.Request) (bool, string, string) {
return false, "", ""
}
// MatchPutBlobURL ...
func MatchPutBlobURL(req *http.Request) (bool, string) {
if req.Method != http.MethodPut {
return false, ""
}
re, err := regexp.Compile(blobURLPattern)
if err != nil {
log.Errorf("error to match put blob url, %v", err)
return false, ""
}
s := re.FindStringSubmatch(req.URL.Path)
if len(s) == 2 {
s[1] = strings.TrimSuffix(s[1], "/")
return true, s[1]
}
return false, ""
}
// MatchPatchBlobURL ...
func MatchPatchBlobURL(req *http.Request) (bool, string) {
if req.Method != http.MethodPatch {
return false, ""
}
re, err := regexp.Compile(blobURLPattern)
if err != nil {
log.Errorf("error to match put blob url, %v", err)
return false, ""
}
s := re.FindStringSubmatch(req.URL.Path)
if len(s) == 2 {
s[1] = strings.TrimSuffix(s[1], "/")
return true, s[1]
}
return false, ""
}
// MatchPullManifest checks if the request looks like a request to pull manifest. If it is returns the image and tag/sha256 digest as 2nd and 3rd return values
func MatchPullManifest(req *http.Request) (bool, string, string) {
if req.Method != http.MethodGet {
@ -221,31 +290,21 @@ func MatchPushManifest(req *http.Request) (bool, string, string) {
return MatchManifestURL(req)
}
// MatchMountBlobURL POST /v2/<name>/blobs/uploads/?mount=<digest>&from=<repository name>
// If match, will return repo, mount and from as the 2nd, 3th and 4th.
func MatchMountBlobURL(req *http.Request) (bool, string, string, string) {
if req.Method != http.MethodPost {
return false, "", "", ""
// MatchDeleteManifest checks if the request
func MatchDeleteManifest(req *http.Request) (match bool, repository string, reference string) {
if req.Method != http.MethodDelete {
return
}
re, err := regexp.Compile(blobURLPattern)
if err != nil {
log.Errorf("error to match post blob url, %v", err)
return false, "", "", ""
match, repository, reference = MatchManifestURL(req)
if _, err := digest.Parse(reference); err != nil {
// Delete manifest only accept digest as reference
match = false
return
}
s := re.FindStringSubmatch(req.URL.Path)
if len(s) == 2 {
s[1] = strings.TrimSuffix(s[1], "/")
mount := req.FormValue("mount")
if mount == "" {
return false, "", "", ""
}
from := req.FormValue("from")
if from == "" {
return false, "", "", ""
}
return true, s[1], mount, from
}
return false, "", "", ""
return
}
// CopyResp ...
@ -318,72 +377,6 @@ func GetPolicyChecker() PolicyChecker {
return NewPMSPolicyChecker(config.GlobalProjectMgr)
}
// TryRequireQuota ...
func TryRequireQuota(projectID int64, quotaRes *quota.ResourceList) error {
quotaMgr, err := quota.NewManager("project", strconv.FormatInt(projectID, 10))
if err != nil {
log.Errorf("Error occurred when to new quota manager %v", err)
return err
}
if err := quotaMgr.AddResources(*quotaRes); err != nil {
log.Errorf("cannot get quota for the project resource: %d, err: %v", projectID, err)
return ErrRequireQuota
}
return nil
}
// TryFreeQuota used to release resource for failure case
func TryFreeQuota(projectID int64, qres *quota.ResourceList) bool {
quotaMgr, err := quota.NewManager("project", strconv.FormatInt(projectID, 10))
if err != nil {
log.Errorf("Error occurred when to new quota manager %v", err)
return false
}
if err := quotaMgr.SubtractResources(*qres); err != nil {
log.Errorf("cannot release quota for the project resource: %d, err: %v", projectID, err)
return false
}
return true
}
// GetBlobSize blob size with UUID in redis
func GetBlobSize(conn redis.Conn, uuid string) (int64, error) {
exists, err := redis.Int(conn.Do("EXISTS", uuid))
if err != nil {
return 0, err
}
if exists == 1 {
size, err := redis.Int64(conn.Do("GET", uuid))
if err != nil {
return 0, err
}
return size, nil
}
return 0, nil
}
// SetBunkSize sets the temp size for blob bunk with its uuid.
func SetBunkSize(conn redis.Conn, uuid string, size int64) (bool, error) {
setRes, err := redis.String(conn.Do("SET", uuid, size))
if err != nil {
return false, err
}
return setRes == "OK", nil
}
// GetProjectID ...
func GetProjectID(name string) (int64, error) {
project, err := dao.GetProjectByName(name)
if err != nil {
return 0, err
}
if project != nil {
return project.ProjectID, nil
}
return 0, fmt.Errorf("project %s is not found", name)
}
// GetRegRedisCon ...
func GetRegRedisCon() (redis.Conn, error) {
// FOR UT
@ -406,7 +399,7 @@ func GetRegRedisCon() (redis.Conn, error) {
// BlobInfoFromContext returns blob info from context
func BlobInfoFromContext(ctx context.Context) (*BlobInfo, bool) {
info, ok := ctx.Value(BBInfokKey).(*BlobInfo)
info, ok := ctx.Value(blobInfoKey).(*BlobInfo)
return info, ok
}
@ -423,14 +416,14 @@ func ImageInfoFromContext(ctx context.Context) (*ImageInfo, bool) {
}
// ManifestInfoFromContext returns manifest info from context
func ManifestInfoFromContext(ctx context.Context) (*MfInfo, bool) {
info, ok := ctx.Value(MFInfokKey).(*MfInfo)
func ManifestInfoFromContext(ctx context.Context) (*ManifestInfo, bool) {
info, ok := ctx.Value(manifestInfoKey).(*ManifestInfo)
return info, ok
}
// NewBlobInfoContext returns context with blob info
func NewBlobInfoContext(ctx context.Context, info *BlobInfo) context.Context {
return context.WithValue(ctx, BBInfokKey, info)
return context.WithValue(ctx, blobInfoKey, info)
}
// NewChartVersionInfoContext returns context with blob info
@ -444,6 +437,92 @@ func NewImageInfoContext(ctx context.Context, info *ImageInfo) context.Context {
}
// NewManifestInfoContext returns context with manifest info
func NewManifestInfoContext(ctx context.Context, info *MfInfo) context.Context {
return context.WithValue(ctx, MFInfokKey, info)
func NewManifestInfoContext(ctx context.Context, info *ManifestInfo) context.Context {
return context.WithValue(ctx, manifestInfoKey, info)
}
// ParseManifestInfo prase manifest from request
func ParseManifestInfo(req *http.Request) (*ManifestInfo, error) {
match, repository, reference := MatchManifestURL(req)
if !match {
return nil, fmt.Errorf("not match url %s for manifest", req.URL.Path)
}
var tag string
if _, err := digest.Parse(reference); err != nil {
tag = reference
}
mediaType := req.Header.Get("Content-Type")
if mediaType != schema1.MediaTypeManifest &&
mediaType != schema1.MediaTypeSignedManifest &&
mediaType != schema2.MediaTypeManifest {
return nil, fmt.Errorf("unsupported content type for manifest: %s", mediaType)
}
if req.Body == nil {
return nil, fmt.Errorf("body missing")
}
body, err := ioutil.ReadAll(req.Body)
if err != nil {
log.Warningf("Error occurred when to copy manifest body %v", err)
return nil, err
}
req.Body = ioutil.NopCloser(bytes.NewBuffer(body))
manifest, desc, err := distribution.UnmarshalManifest(mediaType, body)
if err != nil {
log.Warningf("Error occurred when to Unmarshal Manifest %v", err)
return nil, err
}
projectName, _ := utils.ParseRepository(repository)
project, err := dao.GetProjectByName(projectName)
if err != nil {
return nil, fmt.Errorf("failed to get project %s, error: %v", projectName, err)
}
if project == nil {
return nil, fmt.Errorf("project %s not found", projectName)
}
return &ManifestInfo{
ProjectID: project.ProjectID,
Repository: repository,
Tag: tag,
Digest: desc.Digest.String(),
References: manifest.References(),
Descriptor: desc,
}, nil
}
// ParseManifestInfoFromPath prase manifest from request path
func ParseManifestInfoFromPath(req *http.Request) (*ManifestInfo, error) {
match, repository, reference := MatchManifestURL(req)
if !match {
return nil, fmt.Errorf("not match url %s for manifest", req.URL.Path)
}
projectName, _ := utils.ParseRepository(repository)
project, err := dao.GetProjectByName(projectName)
if err != nil {
return nil, fmt.Errorf("failed to get project %s, error: %v", projectName, err)
}
if project == nil {
return nil, fmt.Errorf("project %s not found", projectName)
}
info := &ManifestInfo{
ProjectID: project.ProjectID,
Repository: repository,
}
dgt, err := digest.Parse(reference)
if err != nil {
info.Tag = reference
} else {
info.Digest = dgt.String()
}
return info, nil
}

View File

@ -15,33 +15,31 @@
package util
import (
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
notarytest "github.com/goharbor/harbor/src/common/utils/notary/test"
testutils "github.com/goharbor/harbor/src/common/utils/test"
"github.com/goharbor/harbor/src/core/config"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"fmt"
"github.com/garyburd/redigo/redis"
"github.com/goharbor/harbor/src/common/quota"
"bytes"
"encoding/json"
"net/http"
"net/http/httptest"
"os"
"reflect"
"testing"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest"
"github.com/docker/distribution/manifest/schema2"
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils"
notarytest "github.com/goharbor/harbor/src/common/utils/notary/test"
testutils "github.com/goharbor/harbor/src/common/utils/test"
"github.com/goharbor/harbor/src/core/config"
"github.com/opencontainers/go-digest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var endpoint = "10.117.4.142"
var notaryServer *httptest.Server
const testingRedisHost = "REDIS_HOST"
var admiralEndpoint = "http://127.0.0.1:8282"
var token = ""
func TestMain(m *testing.M) {
testutils.InitDatabaseFromEnv()
notaryServer = notarytest.NewNotaryServer(endpoint)
@ -99,56 +97,6 @@ func TestMatchPullManifest(t *testing.T) {
assert.Equal("sha256:ca4626b691f57d16ce1576231e4a2e2135554d32e13a85dcff380d51fdd13f6a", tag7)
}
func TestMatchPutBlob(t *testing.T) {
assert := assert.New(t)
req1, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/blobs/uploads/67bb4d9b-4dab-4bbe-b726-2e39322b8303?_state=7W3kWkgdr3fTW", nil)
res1, repo1 := MatchPutBlobURL(req1)
assert.True(res1, "%s %v is not a request to put blob", req1.Method, req1.URL)
assert.Equal("library/ubuntu", repo1)
req2, _ := http.NewRequest("PATCH", "http://127.0.0.1:5000/v2/library/blobs/uploads/67bb4d9b-4dab-4bbe-b726-2e39322b8303?_state=7W3kWkgdr3fTW", nil)
res2, _ := MatchPutBlobURL(req2)
assert.False(res2, "%s %v is a request to put blob", req2.Method, req2.URL)
req3, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/manifest/67bb4d9b-4dab-4bbe-b726-2e39322b8303?_state=7W3kWkgdr3fTW", nil)
res3, _ := MatchPutBlobURL(req3)
assert.False(res3, "%s %v is not a request to put blob", req3.Method, req3.URL)
}
func TestMatchMountBlobURL(t *testing.T) {
assert := assert.New(t)
req1, _ := http.NewRequest("POST", "http://127.0.0.1:5000/v2/library/ubuntu/blobs/uploads/?mount=digtest123&from=testrepo", nil)
res1, repo1, mount, from := MatchMountBlobURL(req1)
assert.True(res1, "%s %v is not a request to mount blob", req1.Method, req1.URL)
assert.Equal("library/ubuntu", repo1)
assert.Equal("digtest123", mount)
assert.Equal("testrepo", from)
req2, _ := http.NewRequest("PATCH", "http://127.0.0.1:5000/v2/library/ubuntu/blobs/uploads/?mount=digtest123&from=testrepo", nil)
res2, _, _, _ := MatchMountBlobURL(req2)
assert.False(res2, "%s %v is a request to mount blob", req2.Method, req2.URL)
req3, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/blobs/uploads/?mount=digtest123&from=testrepo", nil)
res3, _, _, _ := MatchMountBlobURL(req3)
assert.False(res3, "%s %v is not a request to put blob", req3.Method, req3.URL)
}
func TestPatchBlobURL(t *testing.T) {
assert := assert.New(t)
req1, _ := http.NewRequest("PATCH", "http://127.0.0.1:5000/v2/library/ubuntu/blobs/uploads/1234-1234-abcd", nil)
res1, repo1 := MatchPatchBlobURL(req1)
assert.True(res1, "%s %v is not a request to patch blob", req1.Method, req1.URL)
assert.Equal("library/ubuntu", repo1)
req2, _ := http.NewRequest("POST", "http://127.0.0.1:5000/v2/library/ubuntu/blobs/uploads/1234-1234-abcd", nil)
res2, _ := MatchPatchBlobURL(req2)
assert.False(res2, "%s %v is a request to patch blob", req2.Method, req2.URL)
req3, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/blobs/uploads/?mount=digtest123&from=testrepo", nil)
res3, _ := MatchPatchBlobURL(req3)
assert.False(res3, "%s %v is not a request to patch blob", req3.Method, req3.URL)
}
func TestMatchPushManifest(t *testing.T) {
assert := assert.New(t)
req1, _ := http.NewRequest("POST", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil)
@ -260,83 +208,194 @@ func TestMarshalError(t *testing.T) {
assert.Equal("{\"errors\":[{\"code\":\"DENIED\",\"message\":\"The action is denied\",\"detail\":\"The action is denied\"}]}", js2)
}
func TestTryRequireQuota(t *testing.T) {
quotaRes := &quota.ResourceList{
quota.ResourceStorage: 100,
}
err := TryRequireQuota(1, quotaRes)
assert.Nil(t, err)
}
func TestTryFreeQuota(t *testing.T) {
quotaRes := &quota.ResourceList{
quota.ResourceStorage: 1,
}
success := TryFreeQuota(1, quotaRes)
assert.True(t, success)
}
func TestGetBlobSize(t *testing.T) {
con, err := redis.Dial(
"tcp",
fmt.Sprintf("%s:%d", getRedisHost(), 6379),
redis.DialConnectTimeout(30*time.Second),
redis.DialReadTimeout(time.Minute+10*time.Second),
redis.DialWriteTimeout(10*time.Second),
)
assert.Nil(t, err)
defer con.Close()
size, err := GetBlobSize(con, "test-TestGetBlobSize")
assert.Nil(t, err)
assert.Equal(t, size, int64(0))
}
func TestSetBunkSize(t *testing.T) {
con, err := redis.Dial(
"tcp",
fmt.Sprintf("%s:%d", getRedisHost(), 6379),
redis.DialConnectTimeout(30*time.Second),
redis.DialReadTimeout(time.Minute+10*time.Second),
redis.DialWriteTimeout(10*time.Second),
)
assert.Nil(t, err)
defer con.Close()
size, err := GetBlobSize(con, "TestSetBunkSize")
assert.Nil(t, err)
assert.Equal(t, size, int64(0))
_, err = SetBunkSize(con, "TestSetBunkSize", 123)
assert.Nil(t, err)
size1, err := GetBlobSize(con, "TestSetBunkSize")
assert.Nil(t, err)
assert.Equal(t, size1, int64(123))
}
func TestGetProjectID(t *testing.T) {
name := "project_for_TestGetProjectID"
project := models.Project{
OwnerID: 1,
Name: name,
func makeManifest(configSize int64, layerSizes []int64) schema2.Manifest {
manifest := schema2.Manifest{
Versioned: manifest.Versioned{SchemaVersion: 2, MediaType: schema2.MediaTypeManifest},
Config: distribution.Descriptor{
MediaType: schema2.MediaTypeImageConfig,
Size: configSize,
Digest: digest.FromString(utils.GenerateRandomString()),
},
}
id, err := dao.AddProject(project)
if err != nil {
t.Fatalf("failed to add project: %v", err)
for _, size := range layerSizes {
manifest.Layers = append(manifest.Layers, distribution.Descriptor{
MediaType: schema2.MediaTypeLayer,
Size: size,
Digest: digest.FromString(utils.GenerateRandomString()),
})
}
idget, err := GetProjectID(name)
assert.Nil(t, err)
assert.Equal(t, id, idget)
return manifest
}
func getRedisHost() string {
redisHost := os.Getenv(testingRedisHost)
if redisHost == "" {
redisHost = "127.0.0.1" // for local test
func getDescriptor(manifest schema2.Manifest) distribution.Descriptor {
buf, _ := json.Marshal(manifest)
_, desc, _ := distribution.UnmarshalManifest(manifest.Versioned.MediaType, buf)
return desc
}
func TestParseManifestInfo(t *testing.T) {
manifest := makeManifest(1, []int64{2, 3, 4})
tests := []struct {
name string
req func() *http.Request
want *ManifestInfo
wantErr bool
}{
{
"ok",
func() *http.Request {
buf, _ := json.Marshal(manifest)
req, _ := http.NewRequest(http.MethodPut, "/v2/library/photon/manifests/latest", bytes.NewReader(buf))
req.Header.Add("Content-Type", manifest.MediaType)
return req
},
&ManifestInfo{
ProjectID: 1,
Repository: "library/photon",
Tag: "latest",
Digest: getDescriptor(manifest).Digest.String(),
References: manifest.References(),
Descriptor: getDescriptor(manifest),
},
false,
},
{
"bad content type",
func() *http.Request {
buf, _ := json.Marshal(manifest)
req, _ := http.NewRequest(http.MethodPut, "/v2/notfound/photon/manifests/latest", bytes.NewReader(buf))
req.Header.Add("Content-Type", "application/json")
return req
},
nil,
true,
},
{
"bad manifest",
func() *http.Request {
req, _ := http.NewRequest(http.MethodPut, "/v2/notfound/photon/manifests/latest", bytes.NewReader([]byte("")))
req.Header.Add("Content-Type", schema2.MediaTypeManifest)
return req
},
nil,
true,
},
{
"body missing",
func() *http.Request {
req, _ := http.NewRequest(http.MethodPut, "/v2/notfound/photon/manifests/latest", nil)
req.Header.Add("Content-Type", schema2.MediaTypeManifest)
return req
},
nil,
true,
},
{
"project not found",
func() *http.Request {
buf, _ := json.Marshal(manifest)
req, _ := http.NewRequest(http.MethodPut, "/v2/notfound/photon/manifests/latest", bytes.NewReader(buf))
req.Header.Add("Content-Type", manifest.MediaType)
return req
},
nil,
true,
},
{
"url not match",
func() *http.Request {
buf, _ := json.Marshal(manifest)
req, _ := http.NewRequest(http.MethodPut, "/v2/library/photon/manifest/latest", bytes.NewReader(buf))
req.Header.Add("Content-Type", manifest.MediaType)
return req
},
nil,
true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := ParseManifestInfo(tt.req())
if (err != nil) != tt.wantErr {
t.Errorf("ParseManifestInfo() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("ParseManifestInfo() = %v, want %v", got, tt.want)
}
})
}
}
func TestParseManifestInfoFromPath(t *testing.T) {
mustRequest := func(method, url string) *http.Request {
req, _ := http.NewRequest(method, url, nil)
return req
}
return redisHost
type args struct {
req *http.Request
}
tests := []struct {
name string
args args
want *ManifestInfo
wantErr bool
}{
{
"ok for digest",
args{mustRequest(http.MethodDelete, "/v2/library/photon/manifests/sha256:3e17b60ab9d92d953fb8ebefa25624c0d23fb95f78dde5572285d10158044059")},
&ManifestInfo{
ProjectID: 1,
Repository: "library/photon",
Digest: "sha256:3e17b60ab9d92d953fb8ebefa25624c0d23fb95f78dde5572285d10158044059",
},
false,
},
{
"ok for tag",
args{mustRequest(http.MethodDelete, "/v2/library/photon/manifests/latest")},
&ManifestInfo{
ProjectID: 1,
Repository: "library/photon",
Tag: "latest",
},
false,
},
{
"project not found",
args{mustRequest(http.MethodDelete, "/v2/notfound/photon/manifests/latest")},
nil,
true,
},
{
"url not match",
args{mustRequest(http.MethodDelete, "/v2/library/photon/manifest/latest")},
nil,
true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := ParseManifestInfoFromPath(tt.args.req)
if (err != nil) != tt.wantErr {
t.Errorf("ParseManifestInfoFromPath() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("ParseManifestInfoFromPath() = %v, want %v", got, tt.want)
}
})
}
}