Merge pull request #8750 from wy65701436/gc-fix-quota

fix quota size usage in gc job
This commit is contained in:
Wang Yan 2019-08-21 18:32:26 +08:00 committed by GitHub
commit af845bdab5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 209 additions and 6 deletions

View File

@ -108,7 +108,21 @@ func GetBlobsNotInProject(projectID int64, blobDigests ...string) ([]*models.Blo
func CountSizeOfProject(pid int64) (int64, error) {
var blobs []models.Blob
_, err := GetOrmer().Raw(`SELECT bb.id, bb.digest, bb.content_type, bb.size, bb.creation_time FROM project_blob pb LEFT JOIN blob bb ON pb.blob_id = bb.id WHERE pb.project_id = ? `, pid).QueryRows(&blobs)
sql := `
SELECT
DISTINCT bb.digest,
bb.id,
bb.content_type,
bb.size,
bb.creation_time
FROM artifact af
JOIN artifact_blob afnb
ON af.digest = afnb.digest_af
JOIN BLOB bb
ON afnb.digest_blob = bb.digest
WHERE af.project_id = ?
`
_, err := GetOrmer().Raw(sql, pid).QueryRows(&blobs)
if err != nil {
return 0, err
}

View File

@ -40,29 +40,159 @@ func TestHasBlobInProject(t *testing.T) {
}
func TestCountSizeOfProject(t *testing.T) {
id1, err := AddBlob(&models.Blob{
_, err := AddBlob(&models.Blob{
Digest: "CountSizeOfProject_blob1",
Size: 101,
})
require.Nil(t, err)
id2, err := AddBlob(&models.Blob{
_, err = AddBlob(&models.Blob{
Digest: "CountSizeOfProject_blob2",
Size: 202,
})
require.Nil(t, err)
_, err = AddBlob(&models.Blob{
Digest: "CountSizeOfProject_blob3",
Size: 303,
})
require.Nil(t, err)
pid1, err := AddProject(models.Project{
Name: "CountSizeOfProject_project1",
OwnerID: 1,
})
require.Nil(t, err)
_, err = AddBlobToProject(id1, pid1)
af := &models.Artifact{
PID: pid1,
Repo: "hello-world",
Tag: "v1",
Digest: "CountSizeOfProject_af1",
Kind: "image",
}
// add
_, err = AddArtifact(af)
require.Nil(t, err)
_, err = AddBlobToProject(id2, pid1)
afnb1 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af1",
DigestBlob: "CountSizeOfProject_blob1",
}
afnb2 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af1",
DigestBlob: "CountSizeOfProject_blob2",
}
afnb3 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af1",
DigestBlob: "CountSizeOfProject_blob3",
}
var afnbs []*models.ArtifactAndBlob
afnbs = append(afnbs, afnb1)
afnbs = append(afnbs, afnb2)
afnbs = append(afnbs, afnb3)
// add
err = AddArtifactNBlobs(afnbs)
require.Nil(t, err)
pSize, err := CountSizeOfProject(pid1)
assert.Equal(t, pSize, int64(303))
assert.Equal(t, pSize, int64(606))
}
func TestCountSizeOfProjectDupdigest(t *testing.T) {
_, err := AddBlob(&models.Blob{
Digest: "CountSizeOfProject_blob11",
Size: 101,
})
require.Nil(t, err)
_, err = AddBlob(&models.Blob{
Digest: "CountSizeOfProject_blob22",
Size: 202,
})
require.Nil(t, err)
_, err = AddBlob(&models.Blob{
Digest: "CountSizeOfProject_blob33",
Size: 303,
})
require.Nil(t, err)
_, err = AddBlob(&models.Blob{
Digest: "CountSizeOfProject_blob44",
Size: 404,
})
require.Nil(t, err)
pid1, err := AddProject(models.Project{
Name: "CountSizeOfProject_project11",
OwnerID: 1,
})
require.Nil(t, err)
// add af1 into project
af1 := &models.Artifact{
PID: pid1,
Repo: "hello-world",
Tag: "v1",
Digest: "CountSizeOfProject_af11",
Kind: "image",
}
_, err = AddArtifact(af1)
require.Nil(t, err)
afnb11 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af11",
DigestBlob: "CountSizeOfProject_blob11",
}
afnb12 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af11",
DigestBlob: "CountSizeOfProject_blob22",
}
afnb13 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af11",
DigestBlob: "CountSizeOfProject_blob33",
}
var afnbs1 []*models.ArtifactAndBlob
afnbs1 = append(afnbs1, afnb11)
afnbs1 = append(afnbs1, afnb12)
afnbs1 = append(afnbs1, afnb13)
err = AddArtifactNBlobs(afnbs1)
require.Nil(t, err)
// add af2 into project
af2 := &models.Artifact{
PID: pid1,
Repo: "hello-world",
Tag: "v2",
Digest: "CountSizeOfProject_af22",
Kind: "image",
}
_, err = AddArtifact(af2)
require.Nil(t, err)
afnb21 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af22",
DigestBlob: "CountSizeOfProject_blob11",
}
afnb22 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af22",
DigestBlob: "CountSizeOfProject_blob22",
}
afnb23 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af22",
DigestBlob: "CountSizeOfProject_blob33",
}
afnb24 := &models.ArtifactAndBlob{
DigestAF: "CountSizeOfProject_af22",
DigestBlob: "CountSizeOfProject_blob44",
}
var afnbs2 []*models.ArtifactAndBlob
afnbs2 = append(afnbs2, afnb21)
afnbs2 = append(afnbs2, afnb22)
afnbs2 = append(afnbs2, afnb23)
afnbs2 = append(afnbs2, afnb24)
err = AddArtifactNBlobs(afnbs2)
require.Nil(t, err)
pSize, err := CountSizeOfProject(pid1)
assert.Equal(t, pSize, int64(1010))
}

View File

@ -193,6 +193,16 @@ func (m *Manager) UpdateQuota(hardLimits types.ResourceList) error {
return err
}
// SetResourceUsage sets the usage per resource name
func (m *Manager) SetResourceUsage(resource types.ResourceName, value int64) error {
o := dao.GetOrmer()
sql := fmt.Sprintf("UPDATE quota_usage SET used = jsonb_set(used, '{%s}', to_jsonb(%d::int), true) WHERE reference = ? AND reference_id = ?", resource, value)
_, err := o.Raw(sql, m.reference, m.referenceID).Exec()
return err
}
// EnsureQuota ensures the reference has quota and usage,
// if non-existent, will create new quota and usage.
// if existent, update the quota and usage.

View File

@ -132,6 +132,24 @@ func (suite *ManagerSuite) TestUpdateQuota() {
}
}
func (suite *ManagerSuite) TestSetResourceUsage() {
mgr := suite.quotaManager()
id, _ := mgr.NewQuota(hardLimits)
if err := mgr.SetResourceUsage(types.ResourceCount, 123); suite.Nil(err) {
quota, _ := dao.GetQuota(id)
suite.Equal(hardLimits, mustResourceList(quota.Hard))
usage, _ := dao.GetQuotaUsage(id)
suite.Equal(types.ResourceList{types.ResourceCount: 123, types.ResourceStorage: 0}, mustResourceList(usage.Used))
}
if err := mgr.SetResourceUsage(types.ResourceStorage, 234); suite.Nil(err) {
usage, _ := dao.GetQuotaUsage(id)
suite.Equal(types.ResourceList{types.ResourceCount: 123, types.ResourceStorage: 234}, mustResourceList(usage.Used))
}
}
func (suite *ManagerSuite) TestEnsureQuota() {
// non-existent
nonExistRefID := "3"

View File

@ -22,10 +22,14 @@ import (
"github.com/garyburd/redigo/redis"
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/config"
"github.com/goharbor/harbor/src/common/dao"
common_quota "github.com/goharbor/harbor/src/common/quota"
"github.com/goharbor/harbor/src/common/registryctl"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/jobservice/logger"
"github.com/goharbor/harbor/src/pkg/types"
"github.com/goharbor/harbor/src/registryctl/client"
"strconv"
)
const (
@ -88,6 +92,9 @@ func (gc *GarbageCollector) Run(ctx job.Context, params job.Parameters) error {
if err := gc.cleanCache(); err != nil {
return err
}
if err := gc.ensureQuota(); err != nil {
gc.logger.Warningf("failed to align quota data in gc job, with error: %v", err)
}
gc.logger.Infof("GC results: status: %t, message: %s, start: %s, end: %s.", gcr.Status, gcr.Msg, gcr.StartTime, gcr.EndTime)
gc.logger.Infof("success to run gc in job.")
return nil
@ -193,3 +200,27 @@ func delKeys(con redis.Conn, pattern string) error {
}
return nil
}
func (gc *GarbageCollector) ensureQuota() error {
projects, err := dao.GetProjects(nil)
if err != nil {
return err
}
for _, project := range projects {
pSize, err := dao.CountSizeOfProject(project.ProjectID)
if err != nil {
gc.logger.Warningf("error happen on counting size of project:%d by artifact, error:%v, just skip it.", project.ProjectID, err)
continue
}
quotaMgr, err := common_quota.NewManager("project", strconv.FormatInt(project.ProjectID, 10))
if err != nil {
gc.logger.Errorf("Error occurred when to new quota manager %v, just skip it.", err)
continue
}
if err := quotaMgr.SetResourceUsage(types.ResourceStorage, pSize); err != nil {
gc.logger.Errorf("cannot ensure quota for the project: %d, err: %v, just skip it.", project.ProjectID, err)
continue
}
}
return nil
}