Merge branch 'master' into add-sign-api-back-to-travis-despite-codacy-1

This commit is contained in:
danfengliu 2019-08-16 16:28:49 +08:00 committed by GitHub
commit 2aa6f1aed7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
161 changed files with 4664 additions and 2047 deletions

View File

@ -1263,11 +1263,16 @@ paths:
type: string
required: true
description: Relevant repository name.
- name: label_ids
- name: label_id
in: query
type: string
required: false
description: A list of comma separated label IDs.
description: A label ID.
- name: detail
in: query
type: boolean
required: false
description: Bool value indicating whether return detailed information of the tag, such as vulnerability scan info, if set to false, only tag name is returned.
tags:
- Products
responses:
@ -3684,7 +3689,7 @@ paths:
description: Unexpected internal errors.
'/projects/{project_id}/webhook/policies':
get:
sumary: List project webhook policies.
summary: List project webhook policies.
description: |
This endpoint returns webhook policies of a project.
parameters:
@ -3712,7 +3717,7 @@ paths:
'500':
description: Unexpected internal errors.
post:
sumary: Create project webhook policy.
summary: Create project webhook policy.
description: |
This endpoint create a webhook policy if the project does not have one.
parameters:
@ -3757,7 +3762,7 @@ paths:
in: path
description: The id of webhook policy.
required: true
type: int64
type: integer
format: int64
tags:
- Products
@ -3791,7 +3796,7 @@ paths:
in: path
description: The id of webhook policy.
required: true
type: int64
type: integer
format: int64
- name: policy
in: body
@ -3829,7 +3834,7 @@ paths:
in: path
description: The id of webhook policy.
required: true
type: int64
type: integer
format: int64
tags:
- Products
@ -3908,7 +3913,7 @@ paths:
description: Internal server errors.
'/projects/{project_id}/webhook/jobs':
get:
sumary: List project webhook jobs
summary: List project webhook jobs
description: |
This endpoint returns webhook jobs of a project.
parameters:
@ -4083,16 +4088,20 @@ definitions:
description: 'The public status of the project. The valid values are "true", "false".'
enable_content_trust:
type: string
description: 'Whether content trust is enabled or not. If it is enabled, user cann''t pull unsigned images from this project. The valid values are "true", "false".'
description: 'Whether content trust is enabled or not. If it is enabled, user can''t pull unsigned images from this project. The valid values are "true", "false".'
prevent_vul:
type: string
description: 'Whether prevent the vulnerable images from running. The valid values are "true", "false".'
severity:
type: string
description: 'If the vulnerability is high than severity defined here, the images cann''t be pulled. The valid values are "negligible", "low", "medium", "high", "critical".'
description: 'If the vulnerability is high than severity defined here, the images can''t be pulled. The valid values are "negligible", "low", "medium", "high", "critical".'
auto_scan:
type: string
description: 'Whether scan images automatically when pushing. The valid values are "true", "false".'
reuse_sys_cve_whitelist:
type: string
description: 'Whether this project reuse the system level CVE whitelist as the whitelist of its own. The valid values are "true", "false".
If it is set to "true" the actual whitelist associate with this project, if any, will be ignored.'
ProjectSummary:
type: object
properties:
@ -4841,6 +4850,9 @@ definitions:
project_creation_restriction:
type: string
description: This attribute restricts what users have the permission to create project. It can be "everyone" or "adminonly".
quota_per_project_enable:
type: boolean
description: This attribute indicates whether quota per project enabled in harbor
read_only:
type: boolean
description: '''docker push'' is prohibited by Harbor if you set it to true. '
@ -4938,6 +4950,9 @@ definitions:
project_creation_restriction:
$ref: '#/definitions/StringConfigItem'
description: This attribute restricts what users have the permission to create project. It can be "everyone" or "adminonly".
quota_per_project_enable:
$ref: '#/definitions/BoolConfigItem'
description: This attribute indicates whether quota per project enabled in harbor
read_only:
$ref: '#/definitions/BoolConfigItem'
description: '''docker push'' is prohibited by Harbor if you set it to true. '
@ -5349,7 +5364,9 @@ definitions:
properties:
type:
type: string
description: The schedule type. The valid values are hourly, daily weekly, custom and None. 'None' means to cancel the schedule.
description: |
The schedule type. The valid values are 'Hourly', 'Daily', 'Weekly', 'Custom', 'Manually' and 'None'.
'Manually' means to trigger it right away and 'None' means to cancel the schedule.
cron:
type: string
description: A cron expression, a time-based job scheduler.
@ -5724,7 +5741,7 @@ definitions:
description: The webhook job ID.
policy_id:
type: integer
fromat: int64
format: int64
description: The webhook policy ID.
event_type:
type: string

View File

@ -30,6 +30,11 @@ harbor_admin_password: Harbor12345
database:
# The password for the root user of Harbor DB. Change this before any production use.
password: root123
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
max_idle_conns: 50
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
# Note: the default number of connections is 100 for postgres.
max_open_conns: 100
# The default data volume
data_volume: /data
@ -50,18 +55,12 @@ data_volume: /data
# disabled: false
# Clair configuration
clair:
clair:
# The interval of clair updaters, the unit is hour, set to 0 to disable the updaters.
updaters_interval: 12
# Config http proxy for Clair, e.g. http://my.proxy.com:3128
# Clair doesn't need to connect to harbor internal components via http proxy.
http_proxy:
https_proxy:
no_proxy: 127.0.0.1,localhost,core,registry
jobservice:
# Maximum number of job workers in job service
# Maximum number of job workers in job service
max_job_workers: 10
notification:
@ -80,8 +79,8 @@ log:
local:
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
rotate_count: 50
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
# are all valid.
rotate_size: 200M
# The directory on your host that store log
@ -143,3 +142,20 @@ _version: 1.8.0
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
# uaa:
# ca_file: /path/to/ca
# Global proxy
# Config http proxy for components, e.g. http://my.proxy.com:3128
# Components doesn't need to connect to each others via http proxy.
# Remove component from `components` array if want disable proxy
# for it. If you want use proxy for replication, MUST enable proxy
# for core and jobservice, and set `http_proxy` and `https_proxy`.
# Add domain to the `no_proxy` field, when you want disable proxy
# for some special registry.
proxy:
http_proxy:
https_proxy:
no_proxy: 127.0.0.1,localhost,.local,.internal,log,db,redis,nginx,core,portal,postgresql,jobservice,registry,registryctl,clair
components:
- core
- jobservice
- clair

View File

@ -23,6 +23,15 @@ CREATE TABLE blob
UNIQUE (digest)
);
/* add the table for project and blob */
CREATE TABLE project_blob (
id SERIAL PRIMARY KEY NOT NULL,
project_id int NOT NULL,
blob_id int NOT NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
CONSTRAINT unique_project_blob UNIQUE (project_id, blob_id)
);
CREATE TABLE artifact
(
id SERIAL PRIMARY KEY NOT NULL,

View File

@ -1,7 +1,8 @@
FROM node:10.15.0 as nodeportal
COPY src/portal /portal_src
COPY ./docs/swagger.yaml /portal_src
COPY ./docs/swagger.yaml /portal_src
COPY ./LICENSE /portal_src
WORKDIR /build_dir
@ -21,6 +22,7 @@ FROM photon:2.0
COPY --from=nodeportal /build_dir/dist /usr/share/nginx/html
COPY --from=nodeportal /build_dir/swagger.yaml /usr/share/nginx/html
COPY --from=nodeportal /build_dir/swagger.json /usr/share/nginx/html
COPY --from=nodeportal /build_dir/LICENSE /usr/share/nginx/html
COPY make/photon/portal/nginx.conf /etc/nginx/nginx.conf

View File

@ -12,11 +12,12 @@ REDIS_UID = 999
REDIS_GID = 999
## Global variable
host_root_dir = '/hostfs'
base_dir = '/harbor_make'
templates_dir = "/usr/src/app/templates"
config_dir = '/config'
data_dir = '/data'
secret_dir = '/secret'
secret_key_dir='/secret/keys'

View File

@ -1,3 +1,3 @@
http_proxy={{clair_http_proxy}}
https_proxy={{clair_https_proxy}}
no_proxy={{clair_no_proxy}}
HTTP_PROXY={{clair_http_proxy}}
HTTPS_PROXY={{clair_https_proxy}}
NO_PROXY={{clair_no_proxy}}

View File

@ -15,6 +15,8 @@ POSTGRESQL_USERNAME={{harbor_db_username}}
POSTGRESQL_PASSWORD={{harbor_db_password}}
POSTGRESQL_DATABASE={{harbor_db_name}}
POSTGRESQL_SSLMODE={{harbor_db_sslmode}}
POSTGRESQL_MAX_IDLE_CONNS={{harbor_db_max_idle_conns}}
POSTGRESQL_MAX_OPEN_CONNS={{harbor_db_max_open_conns}}
REGISTRY_URL={{registry_url}}
TOKEN_SERVICE_URL={{token_service_url}}
HARBOR_ADMIN_PASSWORD={{harbor_admin_password}}
@ -41,3 +43,7 @@ RELOAD_KEY={{reload_key}}
CHART_REPOSITORY_URL={{chart_repository_url}}
REGISTRY_CONTROLLER_URL={{registry_controller_url}}
WITH_CHARTMUSEUM={{with_chartmuseum}}
HTTP_PROXY={{core_http_proxy}}
HTTPS_PROXY={{core_https_proxy}}
NO_PROXY={{core_no_proxy}}

View File

@ -276,12 +276,7 @@ services:
volumes:
- ./common/config/nginx:/etc/nginx:z
{% if protocol == 'https' %}
- type: bind
source: {{cert_key_path}}
target: /etc/cert/server.key
- type: bind
source: {{cert_path}}
target: /etc/cert/server.crt
- {{data_volume}}/secret/cert:/etc/cert:z
{% endif %}
networks:
- harbor

View File

@ -2,3 +2,7 @@ CORE_SECRET={{core_secret}}
JOBSERVICE_SECRET={{jobservice_secret}}
CORE_URL={{core_url}}
JOBSERVICE_WEBHOOK_JOB_MAX_RETRY={{notification_webhook_job_max_retry}}
HTTP_PROXY={{jobservice_http_proxy}}
HTTPS_PROXY={{jobservice_https_proxy}}
NO_PROXY={{jobservice_no_proxy}}

View File

@ -112,6 +112,11 @@ def parse_yaml_config(config_file_path):
config_dict['harbor_db_username'] = 'postgres'
config_dict['harbor_db_password'] = db_configs.get("password") or ''
config_dict['harbor_db_sslmode'] = 'disable'
default_max_idle_conns = 2 # NOTE: https://golang.org/pkg/database/sql/#DB.SetMaxIdleConns
default_max_open_conns = 0 # NOTE: https://golang.org/pkg/database/sql/#DB.SetMaxOpenConns
config_dict['harbor_db_max_idle_conns'] = db_configs.get("max_idle_conns") or default_max_idle_conns
config_dict['harbor_db_max_open_conns'] = db_configs.get("max_open_conns") or default_max_open_conns
# clari db
config_dict['clair_db_host'] = 'postgresql'
config_dict['clair_db_port'] = 5432
@ -171,13 +176,18 @@ def parse_yaml_config(config_file_path):
if storage_config.get('redirect'):
config_dict['storage_redirect_disabled'] = storage_config['redirect']['disabled']
# Global proxy configs
proxy_config = configs.get('proxy') or {}
proxy_components = proxy_config.get('components') or []
for proxy_component in proxy_components:
config_dict[proxy_component + '_http_proxy'] = proxy_config.get('http_proxy') or ''
config_dict[proxy_component + '_https_proxy'] = proxy_config.get('https_proxy') or ''
config_dict[proxy_component + '_no_proxy'] = proxy_config.get('no_proxy') or '127.0.0.1,localhost,core,registry'
# Clair configs, optional
clair_configs = configs.get("clair") or {}
config_dict['clair_db'] = 'postgres'
config_dict['clair_updaters_interval'] = clair_configs.get("updaters_interval") or 12
config_dict['clair_http_proxy'] = clair_configs.get('http_proxy') or ''
config_dict['clair_https_proxy'] = clair_configs.get('https_proxy') or ''
config_dict['clair_no_proxy'] = clair_configs.get('no_proxy') or '127.0.0.1,localhost,core,registry'
# Chart configs
chart_configs = configs.get("chart") or {}
@ -286,4 +296,4 @@ def parse_yaml_config(config_file_path):
# UAA configs
config_dict['uaa'] = configs.get('uaa') or {}
return config_dict
return config_dict

View File

@ -2,11 +2,13 @@ import os, shutil
from fnmatch import fnmatch
from pathlib import Path
from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID
from g import config_dir, templates_dir, host_root_dir, DEFAULT_GID, DEFAULT_UID, data_dir
from utils.misc import prepare_dir, mark_file
from utils.jinja import render_jinja
from utils.cert import SSL_CERT_KEY_PATH, SSL_CERT_PATH
host_ngx_real_cert_dir = Path(os.path.join(data_dir, 'secret', 'cert'))
nginx_conf = os.path.join(config_dir, "nginx", "nginx.conf")
nginx_confd_dir = os.path.join(config_dir, "nginx", "conf.d")
nginx_https_conf_template = os.path.join(templates_dir, "nginx", "nginx.https.conf.jinja")
@ -20,8 +22,38 @@ def prepare_nginx(config_dict):
prepare_dir(nginx_confd_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
render_nginx_template(config_dict)
def prepare_nginx_certs(cert_key_path, cert_path):
"""
Prepare the certs file with proper ownership
1. Remove nginx cert files in secret dir
2. Copy cert files on host filesystem to secret dir
3. Change the permission to 644 and ownership to 10000:10000
"""
host_ngx_cert_key_path = Path(os.path.join(host_root_dir, cert_key_path.lstrip('/')))
host_ngx_cert_path = Path(os.path.join(host_root_dir, cert_path.lstrip('/')))
if host_ngx_real_cert_dir.exists() and host_ngx_real_cert_dir.is_dir():
shutil.rmtree(host_ngx_real_cert_dir)
os.makedirs(host_ngx_real_cert_dir, mode=0o755)
real_key_path = os.path.join(host_ngx_real_cert_dir, 'server.key')
real_crt_path = os.path.join(host_ngx_real_cert_dir, 'server.crt')
shutil.copy2(host_ngx_cert_key_path, real_key_path)
shutil.copy2(host_ngx_cert_path, real_crt_path)
os.chown(host_ngx_real_cert_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
mark_file(real_key_path, uid=DEFAULT_UID, gid=DEFAULT_GID)
mark_file(real_crt_path, uid=DEFAULT_UID, gid=DEFAULT_GID)
def render_nginx_template(config_dict):
if config_dict['protocol'] == "https":
"""
1. render nginx config file through protocol
2. copy additional configs to cert.d dir
"""
if config_dict['protocol'] == 'https':
prepare_nginx_certs(config_dict['cert_key_path'], config_dict['cert_path'])
render_jinja(
nginx_https_conf_template,
nginx_conf,
@ -30,12 +62,7 @@ def render_nginx_template(config_dict):
ssl_cert=SSL_CERT_PATH,
ssl_cert_key=SSL_CERT_KEY_PATH)
location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTPS
cert_dir = Path(os.path.join(config_dir, 'cert'))
ssl_key_path = Path(os.path.join(cert_dir, 'server.key'))
ssl_crt_path = Path(os.path.join(cert_dir, 'server.crt'))
cert_dir.mkdir(parents=True, exist_ok=True)
ssl_key_path.touch()
ssl_crt_path.touch()
else:
render_jinja(
nginx_http_conf_template,
@ -45,22 +72,23 @@ def render_nginx_template(config_dict):
location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTP
copy_nginx_location_configs_if_exist(nginx_template_ext_dir, nginx_confd_dir, location_file_pattern)
def add_additional_location_config(src, dst):
"""
These conf files is used for user that wanna add additional customized locations to harbor proxy
:params src: source of the file
:params dst: destination file path
"""
if not os.path.isfile(src):
return
print("Copying nginx configuration file {src} to {dst}".format(
src=src, dst=dst))
shutil.copy2(src, dst)
mark_file(dst, mode=0o644)
def copy_nginx_location_configs_if_exist(src_config_dir, dst_config_dir, filename_pattern):
if not os.path.exists(src_config_dir):
return
def add_additional_location_config(src, dst):
"""
These conf files is used for user that wanna add additional customized locations to harbor proxy
:params src: source of the file
:params dst: destination file path
"""
if not os.path.isfile(src):
return
print("Copying nginx configuration file {src} to {dst}".format(src=src, dst=dst))
shutil.copy2(src, dst)
mark_file(dst, mode=0o644)
map(lambda filename: add_additional_location_config(
os.path.join(src_config_dir, filename),
os.path.join(dst_config_dir, filename)),

View File

@ -50,6 +50,7 @@ docker run --rm -v $input_dir:/input:z \
-v $harbor_prepare_path:/compose_location:z \
-v $config_dir:/config:z \
-v $secret_dir:/secret:z \
-v /:/hostfs:z \
goharbor/prepare:dev $@
echo "Clean up the input dir"

View File

@ -210,12 +210,14 @@ func (c *CfgManager) GetDatabaseCfg() *models.Database {
return &models.Database{
Type: c.Get(common.DatabaseType).GetString(),
PostGreSQL: &models.PostGreSQL{
Host: c.Get(common.PostGreSQLHOST).GetString(),
Port: c.Get(common.PostGreSQLPort).GetInt(),
Username: c.Get(common.PostGreSQLUsername).GetString(),
Password: c.Get(common.PostGreSQLPassword).GetString(),
Database: c.Get(common.PostGreSQLDatabase).GetString(),
SSLMode: c.Get(common.PostGreSQLSSLMode).GetString(),
Host: c.Get(common.PostGreSQLHOST).GetString(),
Port: c.Get(common.PostGreSQLPort).GetInt(),
Username: c.Get(common.PostGreSQLUsername).GetString(),
Password: c.Get(common.PostGreSQLPassword).GetString(),
Database: c.Get(common.PostGreSQLDatabase).GetString(),
SSLMode: c.Get(common.PostGreSQLSSLMode).GetString(),
MaxIdleConns: c.Get(common.PostGreSQLMaxIdleConns).GetInt(),
MaxOpenConns: c.Get(common.PostGreSQLMaxOpenConns).GetInt(),
},
}
}

View File

@ -116,6 +116,8 @@ var (
{Name: common.PostGreSQLPort, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_PORT", DefaultValue: "5432", ItemType: &PortType{}, Editable: false},
{Name: common.PostGreSQLSSLMode, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_SSLMODE", DefaultValue: "disable", ItemType: &StringType{}, Editable: false},
{Name: common.PostGreSQLUsername, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_USERNAME", DefaultValue: "postgres", ItemType: &StringType{}, Editable: false},
{Name: common.PostGreSQLMaxIdleConns, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_MAX_IDLE_CONNS", DefaultValue: "2", ItemType: &IntType{}, Editable: false},
{Name: common.PostGreSQLMaxOpenConns, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_MAX_OPEN_CONNS", DefaultValue: "0", ItemType: &IntType{}, Editable: false},
{Name: common.ProjectCreationRestriction, Scope: UserScope, Group: BasicGroup, EnvKey: "PROJECT_CREATION_RESTRICTION", DefaultValue: common.ProCrtRestrEveryone, ItemType: &ProjectCreationRestrictionType{}, Editable: false},
{Name: common.ReadOnly, Scope: UserScope, Group: BasicGroup, EnvKey: "READ_ONLY", DefaultValue: "false", ItemType: &BoolType{}, Editable: false},
@ -151,6 +153,7 @@ var (
{Name: common.RobotTokenDuration, Scope: UserScope, Group: BasicGroup, EnvKey: "ROBOT_TOKEN_DURATION", DefaultValue: "43200", ItemType: &IntType{}, Editable: true},
{Name: common.NotificationEnable, Scope: UserScope, Group: BasicGroup, EnvKey: "NOTIFICATION_ENABLE", DefaultValue: "true", ItemType: &BoolType{}, Editable: true},
{Name: common.QuotaPerProjectEnable, Scope: UserScope, Group: QuotaGroup, EnvKey: "QUOTA_PER_PROJECT_ENABLE", DefaultValue: "true", ItemType: &BoolType{}, Editable: true},
{Name: common.CountPerProject, Scope: UserScope, Group: QuotaGroup, EnvKey: "COUNT_PER_PROJECT", DefaultValue: "-1", ItemType: &QuotaType{}, Editable: true},
{Name: common.StoragePerProject, Scope: UserScope, Group: QuotaGroup, EnvKey: "STORAGE_PER_PROJECT", DefaultValue: "-1", ItemType: &QuotaType{}, Editable: true},
}

View File

@ -53,6 +53,8 @@ const (
PostGreSQLPassword = "postgresql_password"
PostGreSQLDatabase = "postgresql_database"
PostGreSQLSSLMode = "postgresql_sslmode"
PostGreSQLMaxIdleConns = "postgresql_max_idle_conns"
PostGreSQLMaxOpenConns = "postgresql_max_open_conns"
SelfRegistration = "self_registration"
CoreURL = "core_url"
CoreLocalURL = "core_local_url"
@ -146,7 +148,9 @@ const (
// Global notification enable configuration
NotificationEnable = "notification_enable"
// Quota setting items for project
CountPerProject = "count_per_project"
StoragePerProject = "storage_per_project"
QuotaPerProjectEnable = "quota_per_project_enable"
CountPerProject = "count_per_project"
StoragePerProject = "storage_per_project"
)

View File

@ -26,6 +26,8 @@ import (
func AddArtifact(af *models.Artifact) (int64, error) {
now := time.Now()
af.CreationTime = now
af.PushTime = now
id, err := GetOrmer().Insert(af)
if err != nil {
if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
@ -36,6 +38,12 @@ func AddArtifact(af *models.Artifact) (int64, error) {
return id, nil
}
// UpdateArtifact ...
func UpdateArtifact(af *models.Artifact) error {
_, err := GetOrmer().Update(af)
return err
}
// UpdateArtifactDigest ...
func UpdateArtifactDigest(af *models.Artifact) error {
_, err := GetOrmer().Update(af, "digest")

View File

@ -121,12 +121,16 @@ func getDatabase(database *models.Database) (db Database, err error) {
switch database.Type {
case "", "postgresql":
db = NewPGSQL(database.PostGreSQL.Host,
db = NewPGSQL(
database.PostGreSQL.Host,
strconv.Itoa(database.PostGreSQL.Port),
database.PostGreSQL.Username,
database.PostGreSQL.Password,
database.PostGreSQL.Database,
database.PostGreSQL.SSLMode)
database.PostGreSQL.SSLMode,
database.PostGreSQL.MaxIdleConns,
database.PostGreSQL.MaxOpenConns,
)
default:
err = fmt.Errorf("invalid database: %s", database.Type)
}
@ -139,6 +143,8 @@ var once sync.Once
// GetOrmer :set ormer singleton
func GetOrmer() orm.Ormer {
once.Do(func() {
// override the default value(1000) to return all records when setting no limit
orm.DefaultRowsLimit = -1
globalOrm = orm.NewOrm()
})
return globalOrm

View File

@ -2,11 +2,11 @@ package dao
import (
"fmt"
"github.com/astaxie/beego/orm"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils/log"
"strings"
"time"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils/log"
)
// AddBlob ...
@ -23,6 +23,20 @@ func AddBlob(blob *models.Blob) (int64, error) {
return id, nil
}
// GetOrCreateBlob returns blob by digest, create it if not exists
func GetOrCreateBlob(blob *models.Blob) (bool, *models.Blob, error) {
blob.CreationTime = time.Now()
created, id, err := GetOrmer().ReadOrCreate(blob, "digest")
if err != nil {
return false, nil, err
}
blob.ID = id
return created, blob, nil
}
// GetBlob ...
func GetBlob(digest string) (*models.Blob, error) {
o := GetOrmer()
@ -50,15 +64,73 @@ func DeleteBlob(digest string) error {
return err
}
// HasBlobInProject ...
func HasBlobInProject(projectID int64, digest string) (bool, error) {
var res []orm.Params
num, err := GetOrmer().Raw(`SELECT * FROM artifact af LEFT JOIN artifact_blob afnb ON af.digest = afnb.digest_af WHERE af.project_id = ? and afnb.digest_blob = ? `, projectID, digest).Values(&res)
if err != nil {
return false, err
// GetBlobsByArtifact returns blobs of artifact
func GetBlobsByArtifact(artifactDigest string) ([]*models.Blob, error) {
sql := `SELECT * FROM blob WHERE digest IN (SELECT digest_blob FROM artifact_blob WHERE digest_af = ?)`
var blobs []*models.Blob
if _, err := GetOrmer().Raw(sql, artifactDigest).QueryRows(&blobs); err != nil {
return nil, err
}
if num == 0 {
return false, nil
}
return true, nil
return blobs, nil
}
// GetExclusiveBlobs returns layers of repository:tag which are not shared with other repositories in the project
func GetExclusiveBlobs(projectID int64, repository, digest string) ([]*models.Blob, error) {
blobs, err := GetBlobsByArtifact(digest)
if err != nil {
return nil, err
}
sql := fmt.Sprintf(`
SELECT
DISTINCT b.digest_blob AS digest
FROM
(
SELECT
digest
FROM
artifact
WHERE
(
project_id = ?
AND repo != ?
)
OR (
project_id = ?
AND digest != ?
)
) AS a
LEFT JOIN artifact_blob b ON a.digest = b.digest_af
AND b.digest_blob IN (%s)`, ParamPlaceholderForIn(len(blobs)-1))
params := []interface{}{projectID, repository, projectID, digest}
for _, blob := range blobs {
if blob.Digest != digest {
params = append(params, blob.Digest)
}
}
var rows []struct {
Digest string
}
if _, err := GetOrmer().Raw(sql, params...).QueryRows(&rows); err != nil {
return nil, err
}
shared := map[string]bool{}
for _, row := range rows {
shared[row.Digest] = true
}
var exclusive []*models.Blob
for _, blob := range blobs {
if blob.Digest != digest && !shared[blob.Digest] {
exclusive = append(exclusive, blob)
}
}
return exclusive, nil
}

View File

@ -15,10 +15,15 @@
package dao
import (
"strings"
"testing"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils"
"github.com/opencontainers/go-digest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"testing"
"github.com/stretchr/testify/suite"
)
func TestAddBlob(t *testing.T) {
@ -64,42 +69,154 @@ func TestDeleteBlob(t *testing.T) {
require.Nil(t, err)
}
func TestHasBlobInProject(t *testing.T) {
af := &models.Artifact{
PID: 1,
Repo: "TestHasBlobInProject",
Tag: "latest",
Digest: "tttt",
Kind: "image",
}
// add
_, err := AddArtifact(af)
require.Nil(t, err)
afnb1 := &models.ArtifactAndBlob{
DigestAF: "tttt",
DigestBlob: "zzza",
}
afnb2 := &models.ArtifactAndBlob{
DigestAF: "tttt",
DigestBlob: "zzzb",
}
afnb3 := &models.ArtifactAndBlob{
DigestAF: "tttt",
DigestBlob: "zzzc",
func prepareImage(projectID int64, projectName, name, tag string, layerDigests ...string) (string, error) {
digest := digest.FromString(strings.Join(layerDigests, ":")).String()
artifact := &models.Artifact{PID: projectID, Repo: projectName + "/" + name, Digest: digest, Tag: tag}
if _, err := AddArtifact(artifact); err != nil {
return "", err
}
var afnbs []*models.ArtifactAndBlob
afnbs = append(afnbs, afnb1)
afnbs = append(afnbs, afnb2)
afnbs = append(afnbs, afnb3)
// add
err = AddArtifactNBlobs(afnbs)
require.Nil(t, err)
blobDigests := append([]string{digest}, layerDigests...)
for _, blobDigest := range blobDigests {
blob := &models.Blob{Digest: blobDigest, Size: 1}
if _, _, err := GetOrCreateBlob(blob); err != nil {
return "", err
}
has, err := HasBlobInProject(1, "zzzb")
require.Nil(t, err)
assert.True(t, has)
afnbs = append(afnbs, &models.ArtifactAndBlob{DigestAF: digest, DigestBlob: blobDigest})
}
total, err := GetTotalOfArtifacts(&models.ArtifactQuery{Digest: digest})
if err != nil {
return "", err
}
if total == 1 {
if err := AddArtifactNBlobs(afnbs); err != nil {
return "", err
}
}
return digest, nil
}
func withProject(f func(int64, string)) {
projectName := utils.GenerateRandomString()
projectID, err := AddProject(models.Project{
Name: projectName,
OwnerID: 1,
})
if err != nil {
panic(err)
}
defer func() {
DeleteProject(projectID)
}()
f(projectID, projectName)
}
type GetExclusiveBlobsSuite struct {
suite.Suite
}
func (suite *GetExclusiveBlobsSuite) mustPrepareImage(projectID int64, projectName, name, tag string, layerDigests ...string) string {
digest, err := prepareImage(projectID, projectName, name, tag, layerDigests...)
suite.Nil(err)
return digest
}
func (suite *GetExclusiveBlobsSuite) TestInSameRepository() {
withProject(func(projectID int64, projectName string) {
digest1 := digest.FromString(utils.GenerateRandomString()).String()
digest2 := digest.FromString(utils.GenerateRandomString()).String()
digest3 := digest.FromString(utils.GenerateRandomString()).String()
manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 2)
}
manifest2 := suite.mustPrepareImage(projectID, projectName, "mysql", "8.0", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) {
suite.Len(blobs, 2)
}
manifest3 := suite.mustPrepareImage(projectID, projectName, "mysql", "dev", digest1, digest2, digest3)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 0)
}
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) {
suite.Len(blobs, 0)
}
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest3); suite.Nil(err) {
suite.Len(blobs, 1)
suite.Equal(digest3, blobs[0].Digest)
}
})
}
func (suite *GetExclusiveBlobsSuite) TestInDifferentRepositories() {
withProject(func(projectID int64, projectName string) {
digest1 := digest.FromString(utils.GenerateRandomString()).String()
digest2 := digest.FromString(utils.GenerateRandomString()).String()
digest3 := digest.FromString(utils.GenerateRandomString()).String()
manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 2)
}
manifest2 := suite.mustPrepareImage(projectID, projectName, "mariadb", "latest", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 0)
}
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mariadb", manifest2); suite.Nil(err) {
suite.Len(blobs, 0)
}
manifest3 := suite.mustPrepareImage(projectID, projectName, "mysql", "dev", digest1, digest2, digest3)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 0)
}
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) {
suite.Len(blobs, 0)
}
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest3); suite.Nil(err) {
suite.Len(blobs, 1)
suite.Equal(digest3, blobs[0].Digest)
}
})
}
func (suite *GetExclusiveBlobsSuite) TestInDifferentProjects() {
withProject(func(projectID int64, projectName string) {
digest1 := digest.FromString(utils.GenerateRandomString()).String()
digest2 := digest.FromString(utils.GenerateRandomString()).String()
manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 2)
}
withProject(func(id int64, name string) {
manifest2 := suite.mustPrepareImage(id, name, "mysql", "latest", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 2)
}
if blobs, err := GetExclusiveBlobs(id, name+"/mysql", manifest2); suite.Nil(err) {
suite.Len(blobs, 2)
}
})
})
}
func TestRunGetExclusiveBlobsSuite(t *testing.T) {
suite.Run(t, new(GetExclusiveBlobsSuite))
}

View File

@ -31,12 +31,14 @@ import (
const defaultMigrationPath = "migrations/postgresql/"
type pgsql struct {
host string
port string
usr string
pwd string
database string
sslmode string
host string
port string
usr string
pwd string
database string
sslmode string
maxIdleConns int
maxOpenConns int
}
// Name returns the name of PostgreSQL
@ -51,17 +53,19 @@ func (p *pgsql) String() string {
}
// NewPGSQL returns an instance of postgres
func NewPGSQL(host string, port string, usr string, pwd string, database string, sslmode string) Database {
func NewPGSQL(host string, port string, usr string, pwd string, database string, sslmode string, maxIdleConns int, maxOpenConns int) Database {
if len(sslmode) == 0 {
sslmode = "disable"
}
return &pgsql{
host: host,
port: port,
usr: usr,
pwd: pwd,
database: database,
sslmode: sslmode,
host: host,
port: port,
usr: usr,
pwd: pwd,
database: database,
sslmode: sslmode,
maxIdleConns: maxIdleConns,
maxOpenConns: maxOpenConns,
}
}
@ -82,7 +86,7 @@ func (p *pgsql) Register(alias ...string) error {
info := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=%s",
p.host, p.port, p.usr, p.pwd, p.database, p.sslmode)
return orm.RegisterDataBase(an, "postgres", info)
return orm.RegisterDataBase(an, "postgres", info, p.maxIdleConns, p.maxOpenConns)
}
// UpgradeSchema calls migrate tool to upgrade schema to the latest based on the SQL scripts.

View File

@ -44,7 +44,7 @@ func DeleteProjectMetadata(projectID int64, name ...string) error {
params = append(params, projectID)
if len(name) > 0 {
sql += fmt.Sprintf(` and name in ( %s )`, paramPlaceholder(len(name)))
sql += fmt.Sprintf(` and name in ( %s )`, ParamPlaceholderForIn(len(name)))
params = append(params, name)
}
@ -74,7 +74,7 @@ func GetProjectMetadata(projectID int64, name ...string) ([]*models.ProjectMetad
params = append(params, projectID)
if len(name) > 0 {
sql += fmt.Sprintf(` and name in ( %s )`, paramPlaceholder(len(name)))
sql += fmt.Sprintf(` and name in ( %s )`, ParamPlaceholderForIn(len(name)))
params = append(params, name)
}
@ -82,7 +82,9 @@ func GetProjectMetadata(projectID int64, name ...string) ([]*models.ProjectMetad
return proMetas, err
}
func paramPlaceholder(n int) string {
// ParamPlaceholderForIn returns a string that contains placeholders for sql keyword "in"
// e.g. n=3, returns "?,?,?"
func ParamPlaceholderForIn(n int) string {
placeholders := []string{}
for i := 0; i < n; i++ {
placeholders = append(placeholders, "?")

View File

@ -167,9 +167,10 @@ func GetGroupProjects(groupIDs []int, query *models.ProjectQueryParam) ([]*model
from project p
left join project_member pm on p.project_id = pm.project_id
left join user_group ug on ug.id = pm.entity_id and pm.entity_type = 'g'
where ug.id in ( %s ) order by name`,
where ug.id in ( %s )`,
sql, groupIDCondition)
}
sql = sql + ` order by name`
sqlStr, queryParams := CreatePagination(query, sql, params)
log.Debugf("query sql:%v", sql)
var projects []*models.Project
@ -259,7 +260,7 @@ func projectQueryConditions(query *models.ProjectQueryParam) (string, []interfac
}
if len(query.ProjectIDs) > 0 {
sql += fmt.Sprintf(` and p.project_id in ( %s )`,
paramPlaceholder(len(query.ProjectIDs)))
ParamPlaceholderForIn(len(query.ProjectIDs)))
params = append(params, query.ProjectIDs)
}
return sql, params

View File

@ -0,0 +1,122 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dao
import (
"fmt"
"time"
"github.com/goharbor/harbor/src/common/models"
)
// AddBlobToProject ...
func AddBlobToProject(blobID, projectID int64) (int64, error) {
pb := &models.ProjectBlob{
BlobID: blobID,
ProjectID: projectID,
CreationTime: time.Now(),
}
_, id, err := GetOrmer().ReadOrCreate(pb, "blob_id", "project_id")
return id, err
}
// AddBlobsToProject ...
func AddBlobsToProject(projectID int64, blobs ...*models.Blob) (int64, error) {
if len(blobs) == 0 {
return 0, nil
}
now := time.Now()
var projectBlobs []*models.ProjectBlob
for _, blob := range blobs {
projectBlobs = append(projectBlobs, &models.ProjectBlob{
BlobID: blob.ID,
ProjectID: projectID,
CreationTime: now,
})
}
return GetOrmer().InsertMulti(len(projectBlobs), projectBlobs)
}
// RemoveBlobsFromProject ...
func RemoveBlobsFromProject(projectID int64, blobs ...*models.Blob) error {
var blobIDs []interface{}
for _, blob := range blobs {
blobIDs = append(blobIDs, blob.ID)
}
if len(blobIDs) == 0 {
return nil
}
sql := fmt.Sprintf(`DELETE FROM project_blob WHERE blob_id IN (%s)`, ParamPlaceholderForIn(len(blobIDs)))
_, err := GetOrmer().Raw(sql, blobIDs).Exec()
return err
}
// HasBlobInProject ...
func HasBlobInProject(projectID int64, digest string) (bool, error) {
sql := `SELECT COUNT(*) FROM project_blob JOIN blob ON project_blob.blob_id = blob.id AND project_id = ? AND digest = ?`
var count int64
if err := GetOrmer().Raw(sql, projectID, digest).QueryRow(&count); err != nil {
return false, err
}
return count > 0, nil
}
// GetBlobsNotInProject returns blobs not in project
func GetBlobsNotInProject(projectID int64, blobDigests ...string) ([]*models.Blob, error) {
if len(blobDigests) == 0 {
return nil, nil
}
sql := fmt.Sprintf("SELECT * FROM blob WHERE id NOT IN (SELECT blob_id FROM project_blob WHERE project_id = ?) AND digest IN (%s)",
ParamPlaceholderForIn(len(blobDigests)))
params := []interface{}{projectID}
for _, digest := range blobDigests {
params = append(params, digest)
}
var blobs []*models.Blob
if _, err := GetOrmer().Raw(sql, params...).QueryRows(&blobs); err != nil {
return nil, err
}
return blobs, nil
}
// CountSizeOfProject ...
func CountSizeOfProject(pid int64) (int64, error) {
var blobs []models.Blob
_, err := GetOrmer().Raw(`SELECT bb.id, bb.digest, bb.content_type, bb.size, bb.creation_time FROM project_blob pb LEFT JOIN blob bb ON pb.blob_id = bb.id WHERE pb.project_id = ? `, pid).QueryRows(&blobs)
if err != nil {
return 0, err
}
var size int64
for _, blob := range blobs {
size += blob.Size
}
return size, err
}

View File

@ -0,0 +1,68 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dao
import (
"testing"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils"
"github.com/opencontainers/go-digest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestHasBlobInProject(t *testing.T) {
_, blob, err := GetOrCreateBlob(&models.Blob{
Digest: digest.FromString(utils.GenerateRandomString()).String(),
Size: 100,
})
require.Nil(t, err)
_, err = AddBlobToProject(blob.ID, 1)
require.Nil(t, err)
has, err := HasBlobInProject(1, blob.Digest)
require.Nil(t, err)
assert.True(t, has)
}
func TestCountSizeOfProject(t *testing.T) {
id1, err := AddBlob(&models.Blob{
Digest: "CountSizeOfProject_blob1",
Size: 101,
})
require.Nil(t, err)
id2, err := AddBlob(&models.Blob{
Digest: "CountSizeOfProject_blob2",
Size: 202,
})
require.Nil(t, err)
pid1, err := AddProject(models.Project{
Name: "CountSizeOfProject_project1",
OwnerID: 1,
})
require.Nil(t, err)
_, err = AddBlobToProject(id1, pid1)
require.Nil(t, err)
_, err = AddBlobToProject(id2, pid1)
require.Nil(t, err)
pSize, err := CountSizeOfProject(pid1)
assert.Equal(t, pSize, int64(303))
}

View File

@ -193,7 +193,7 @@ func quotaQueryConditions(query ...*models.QuotaQuery) (string, []interface{}) {
}
if len(q.ReferenceIDs) != 0 {
sql += fmt.Sprintf(`AND a.reference_id IN (%s) `, paramPlaceholder(len(q.ReferenceIDs)))
sql += fmt.Sprintf(`AND a.reference_id IN (%s) `, ParamPlaceholderForIn(len(q.ReferenceIDs)))
params = append(params, q.ReferenceIDs)
}

View File

@ -111,7 +111,7 @@ func quotaUsageQueryConditions(query ...*models.QuotaUsageQuery) (string, []inte
params = append(params, q.ReferenceID)
}
if len(q.ReferenceIDs) != 0 {
sql += fmt.Sprintf(`and reference_id in (%s) `, paramPlaceholder(len(q.ReferenceIDs)))
sql += fmt.Sprintf(`and reference_id in (%s) `, ParamPlaceholderForIn(len(q.ReferenceIDs)))
params = append(params, q.ReferenceIDs)
}

View File

@ -178,7 +178,7 @@ func repositoryQueryConditions(query ...*models.RepositoryQuery) (string, []inte
if len(q.ProjectIDs) > 0 {
sql += fmt.Sprintf(`and r.project_id in ( %s ) `,
paramPlaceholder(len(q.ProjectIDs)))
ParamPlaceholderForIn(len(q.ProjectIDs)))
params = append(params, q.ProjectIDs)
}

View File

@ -40,6 +40,7 @@ func init() {
new(NotificationPolicy),
new(NotificationJob),
new(Blob),
new(ProjectBlob),
new(Artifact),
new(ArtifactAndBlob),
new(CVEWhitelist),

View File

@ -45,12 +45,14 @@ type SQLite struct {
// PostGreSQL ...
type PostGreSQL struct {
Host string `json:"host"`
Port int `json:"port"`
Username string `json:"username"`
Password string `json:"password,omitempty"`
Database string `json:"database"`
SSLMode string `json:"sslmode"`
Host string `json:"host"`
Port int `json:"port"`
Username string `json:"username"`
Password string `json:"password,omitempty"`
Database string `json:"database"`
SSLMode string `json:"sslmode"`
MaxIdleConns int `json:"max_idle_conns"`
MaxOpenConns int `json:"max_open_conns"`
}
// Email ...

View File

@ -12,17 +12,21 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package util
package models
import (
"net/http"
"time"
)
// RegInterceptor ...
type RegInterceptor interface {
// HandleRequest ...
HandleRequest(req *http.Request) error
// HandleResponse won't return any error
HandleResponse(rw CustomResponseWriter, req *http.Request)
// ProjectBlob holds the relationship between manifest and blob.
type ProjectBlob struct {
ID int64 `orm:"pk;auto;column(id)" json:"id"`
ProjectID int64 `orm:"column(project_id)" json:"project_id"`
BlobID int64 `orm:"column(blob_id)" json:"blob_id"`
CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"`
}
// TableName ...
func (*ProjectBlob) TableName() string {
return "project_blob"
}

View File

@ -176,16 +176,63 @@ func (m *Manager) DeleteQuota() error {
// UpdateQuota update the quota resource spec
func (m *Manager) UpdateQuota(hardLimits types.ResourceList) error {
o := dao.GetOrmer()
if err := m.driver.Validate(hardLimits); err != nil {
return err
}
sql := `UPDATE quota SET hard = ? WHERE reference = ? AND reference_id = ?`
_, err := dao.GetOrmer().Raw(sql, hardLimits.String(), m.reference, m.referenceID).Exec()
_, err := o.Raw(sql, hardLimits.String(), m.reference, m.referenceID).Exec()
return err
}
// EnsureQuota ensures the reference has quota and usage,
// if non-existent, will create new quota and usage.
// if existent, update the quota and usage.
func (m *Manager) EnsureQuota(usages types.ResourceList) error {
query := &models.QuotaQuery{
Reference: m.reference,
ReferenceID: m.referenceID,
}
quotas, err := dao.ListQuotas(query)
if err != nil {
return err
}
// non-existent: create quota and usage
defaultHardLimit := m.driver.HardLimits()
if len(quotas) == 0 {
_, err := m.NewQuota(defaultHardLimit, usages)
if err != nil {
return err
}
return nil
}
// existent
used := usages
quotaUsed, err := types.NewResourceList(quotas[0].Used)
if types.Equals(quotaUsed, used) {
return nil
}
dao.WithTransaction(func(o orm.Ormer) error {
usage, err := m.getUsageForUpdate(o)
if err != nil {
return err
}
usage.Used = used.String()
usage.UpdateTime = time.Now()
_, err = o.Update(usage)
if err != nil {
return err
}
return nil
})
return nil
}
// AddResources add resources to usage
func (m *Manager) AddResources(resources types.ResourceList) error {
return dao.WithTransaction(func(o orm.Ormer) error {

View File

@ -21,6 +21,7 @@ import (
"testing"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/quota/driver"
"github.com/goharbor/harbor/src/common/quota/driver/mocks"
"github.com/goharbor/harbor/src/pkg/types"
@ -131,6 +132,48 @@ func (suite *ManagerSuite) TestUpdateQuota() {
}
}
func (suite *ManagerSuite) TestEnsureQuota() {
// non-existent
nonExistRefID := "3"
mgr := suite.quotaManager(nonExistRefID)
infinite := types.ResourceList{types.ResourceCount: -1, types.ResourceStorage: -1}
usage := types.ResourceList{types.ResourceCount: 10, types.ResourceStorage: 10}
err := mgr.EnsureQuota(usage)
suite.Nil(err)
query := &models.QuotaQuery{
Reference: reference,
ReferenceID: nonExistRefID,
}
quotas, err := dao.ListQuotas(query)
suite.Nil(err)
suite.Equal(usage, mustResourceList(quotas[0].Used))
suite.Equal(infinite, mustResourceList(quotas[0].Hard))
// existent
existRefID := "4"
mgr = suite.quotaManager(existRefID)
used := types.ResourceList{types.ResourceCount: 11, types.ResourceStorage: 11}
if id, err := mgr.NewQuota(hardLimits, used); suite.Nil(err) {
quota, _ := dao.GetQuota(id)
suite.Equal(hardLimits, mustResourceList(quota.Hard))
usage, _ := dao.GetQuotaUsage(id)
suite.Equal(used, mustResourceList(usage.Used))
}
usage2 := types.ResourceList{types.ResourceCount: 12, types.ResourceStorage: 12}
err = mgr.EnsureQuota(usage2)
suite.Nil(err)
query2 := &models.QuotaQuery{
Reference: reference,
ReferenceID: existRefID,
}
quotas2, err := dao.ListQuotas(query2)
suite.Equal(usage2, mustResourceList(quotas2[0].Used))
suite.Equal(hardLimits, mustResourceList(quotas2[0].Hard))
}
func (suite *ManagerSuite) TestQuotaAutoCreation() {
for i := 0; i < 10; i++ {
mgr := suite.quotaManager(fmt.Sprintf("%d", i))

View File

@ -25,11 +25,9 @@ import (
"sort"
"strconv"
"strings"
// "time"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
commonhttp "github.com/goharbor/harbor/src/common/http"
"github.com/goharbor/harbor/src/common/utils"
)
@ -407,6 +405,7 @@ func (r *Repository) monolithicBlobUpload(location, digest string, size int64, d
if err != nil {
return err
}
req.ContentLength = size
resp, err := r.client.Do(req)
if err != nil {

View File

@ -202,6 +202,8 @@ func init() {
beego.Router("/api/quotas", quotaAPIType, "get:List")
beego.Router("/api/quotas/:id([0-9]+)", quotaAPIType, "get:Get;put:Put")
beego.Router("/api/internal/switchquota", &InternalAPI{}, "put:SwitchQuota")
// syncRegistry
if err := SyncRegistry(config.GlobalProjectMgr); err != nil {
log.Fatalf("failed to sync repositories from registry: %v", err)

View File

@ -15,12 +15,16 @@
package api
import (
"errors"
"fmt"
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/quota"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/jobservice/logger"
"github.com/pkg/errors"
"strconv"
)
// InternalAPI handles request of harbor admin...
@ -69,3 +73,78 @@ func (ia *InternalAPI) RenameAdmin() {
log.Debugf("The super user has been renamed to: %s", newName)
ia.DestroySession()
}
// QuotaSwitcher ...
type QuotaSwitcher struct {
Enabled bool
}
// SwitchQuota ...
func (ia *InternalAPI) SwitchQuota() {
var req QuotaSwitcher
if err := ia.DecodeJSONReq(&req); err != nil {
ia.SendBadRequestError(err)
return
}
// quota per project from disable to enable, it needs to update the quota usage bases on the DB records.
if !config.QuotaPerProjectEnable() && req.Enabled {
if err := ia.ensureQuota(); err != nil {
ia.SendInternalServerError(err)
return
}
}
defer func() {
config.GetCfgManager().Set(common.QuotaPerProjectEnable, req.Enabled)
config.GetCfgManager().Save()
}()
return
}
func (ia *InternalAPI) ensureQuota() error {
projects, err := dao.GetProjects(nil)
if err != nil {
return err
}
for _, project := range projects {
pSize, err := dao.CountSizeOfProject(project.ProjectID)
if err != nil {
logger.Warningf("error happen on counting size of project:%d , error:%v, just skip it.", project.ProjectID, err)
continue
}
afQuery := &models.ArtifactQuery{
PID: project.ProjectID,
}
afs, err := dao.ListArtifacts(afQuery)
if err != nil {
logger.Warningf("error happen on counting number of project:%d , error:%v, just skip it.", project.ProjectID, err)
continue
}
pCount := int64(len(afs))
// it needs to append the chart count
if config.WithChartMuseum() {
count, err := chartController.GetCountOfCharts([]string{project.Name})
if err != nil {
err = errors.Wrap(err, fmt.Sprintf("get chart count of project %d failed", project.ProjectID))
logger.Error(err)
continue
}
pCount = pCount + int64(count)
}
quotaMgr, err := quota.NewManager("project", strconv.FormatInt(project.ProjectID, 10))
if err != nil {
logger.Errorf("Error occurred when to new quota manager %v, just skip it.", err)
continue
}
used := quota.ResourceList{
quota.ResourceStorage: pSize,
quota.ResourceCount: pCount,
}
if err := quotaMgr.EnsureQuota(used); err != nil {
logger.Errorf("cannot ensure quota for the project: %d, err: %v, just skip it.", project.ProjectID, err)
continue
}
}
return nil
}

View File

@ -0,0 +1,56 @@
// Copyright 2018 Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
import (
"net/http"
"testing"
)
// cannot verify the real scenario here
func TestSwitchQuota(t *testing.T) {
cases := []*codeCheckingCase{
// 401
{
request: &testingRequest{
method: http.MethodPut,
url: "/api/internal/switchquota",
},
code: http.StatusUnauthorized,
},
// 200
{
request: &testingRequest{
method: http.MethodPut,
url: "/api/internal/switchquota",
credential: sysAdmin,
bodyJSON: &QuotaSwitcher{
Enabled: true,
},
},
code: http.StatusOK,
},
// 403
{
request: &testingRequest{
url: "/api/internal/switchquota",
method: http.MethodPut,
credential: nonSysAdmin,
},
code: http.StatusForbidden,
},
}
runCodeCheckingCases(t, cases...)
}

View File

@ -139,23 +139,26 @@ func (p *ProjectAPI) Post() {
return
}
setting, err := config.QuotaSetting()
if err != nil {
log.Errorf("failed to get quota setting: %v", err)
p.SendInternalServerError(fmt.Errorf("failed to get quota setting: %v", err))
return
}
var hardLimits types.ResourceList
if config.QuotaPerProjectEnable() {
setting, err := config.QuotaSetting()
if err != nil {
log.Errorf("failed to get quota setting: %v", err)
p.SendInternalServerError(fmt.Errorf("failed to get quota setting: %v", err))
return
}
if !p.SecurityCtx.IsSysAdmin() {
pro.CountLimit = &setting.CountPerProject
pro.StorageLimit = &setting.StoragePerProject
}
if !p.SecurityCtx.IsSysAdmin() {
pro.CountLimit = &setting.CountPerProject
pro.StorageLimit = &setting.StoragePerProject
}
hardLimits, err := projectQuotaHardLimits(pro, setting)
if err != nil {
log.Errorf("Invalid project request, error: %v", err)
p.SendBadRequestError(fmt.Errorf("invalid request: %v", err))
return
hardLimits, err = projectQuotaHardLimits(pro, setting)
if err != nil {
log.Errorf("Invalid project request, error: %v", err)
p.SendBadRequestError(fmt.Errorf("invalid request: %v", err))
return
}
}
exist, err := p.ProjectMgr.Exists(pro.Name)
@ -212,14 +215,16 @@ func (p *ProjectAPI) Post() {
return
}
quotaMgr, err := quota.NewManager("project", strconv.FormatInt(projectID, 10))
if err != nil {
p.SendInternalServerError(fmt.Errorf("failed to get quota manager: %v", err))
return
}
if _, err := quotaMgr.NewQuota(hardLimits); err != nil {
p.SendInternalServerError(fmt.Errorf("failed to create quota for project: %v", err))
return
if config.QuotaPerProjectEnable() {
quotaMgr, err := quota.NewManager("project", strconv.FormatInt(projectID, 10))
if err != nil {
p.SendInternalServerError(fmt.Errorf("failed to get quota manager: %v", err))
return
}
if _, err := quotaMgr.NewQuota(hardLimits); err != nil {
p.SendInternalServerError(fmt.Errorf("failed to create quota for project: %v", err))
return
}
}
go func() {
@ -653,6 +658,11 @@ func projectQuotaHardLimits(req *models.ProjectRequest, setting *models.QuotaSet
}
func getProjectQuotaSummary(projectID int64, summary *models.ProjectSummary) {
if !config.QuotaPerProjectEnable() {
log.Debug("Quota per project disabled")
return
}
quotas, err := dao.ListQuotas(&models.QuotaQuery{Reference: "project", ReferenceID: strconv.FormatInt(projectID, 10)})
if err != nil {
log.Debugf("failed to get quota for project: %d", projectID)

View File

@ -595,11 +595,31 @@ func (ra *RepositoryAPI) GetTags() {
tags = ts
}
detail, err := ra.GetBool("detail", true)
if !detail && err == nil {
ra.Data["json"] = simpleTags(tags)
ra.ServeJSON()
return
}
ra.Data["json"] = assembleTagsInParallel(client, repoName, tags,
ra.SecurityCtx.GetUsername())
ra.ServeJSON()
}
func simpleTags(tags []string) []*models.TagResp {
var tagsResp []*models.TagResp
for _, tag := range tags {
tagsResp = append(tagsResp, &models.TagResp{
TagDetail: models.TagDetail{
Name: tag,
},
})
}
return tagsResp
}
// get config, signature and scan overview and assemble them into one
// struct for each tag in tags
func assembleTagsInParallel(client *registry.Repository, repository string,

View File

@ -1,6 +1,7 @@
package api
import (
"encoding/json"
"errors"
"fmt"
"net/http"
@ -41,30 +42,6 @@ func (r *RetentionAPI) GetMetadatas() {
data := `
{
"templates": [
{
"rule_template": "lastXDays",
"display_text": "the images from the last # days",
"action": "retain",
"params": [
{
"type": "int",
"unit": "DAYS",
"required": true
}
]
},
{
"rule_template": "latestActiveK",
"display_text": "the most recent active # images",
"action": "retain",
"params": [
{
"type": "int",
"unit": "COUNT",
"required": true
}
]
},
{
"rule_template": "latestPushedK",
"display_text": "the most recently pushed # images",
@ -90,25 +67,7 @@ func (r *RetentionAPI) GetMetadatas() {
]
},
{
"rule_template": "nothing",
"display_text": "none",
"action": "retain",
"params": []
},
{
"rule_template": "always",
"display_text": "always",
"action": "retain",
"params": [
{
"type": "int",
"unit": "COUNT",
"required": true
}
]
},
{
"rule_template": "dayspl",
"rule_template": "nDaysSinceLastPull",
"display_text": "pulled within the last # days",
"action": "retain",
"params": [
@ -120,7 +79,7 @@ func (r *RetentionAPI) GetMetadatas() {
]
},
{
"rule_template": "daysps",
"rule_template": "nDaysSinceLastPush",
"display_text": "pushed within the last # days",
"action": "retain",
"params": [
@ -130,7 +89,19 @@ func (r *RetentionAPI) GetMetadatas() {
"required": true
}
]
}
},
{
"rule_template": "nothing",
"display_text": "none",
"action": "retain",
"params": []
},
{
"rule_template": "always",
"display_text": "always",
"action": "retain",
"params": []
}
],
"scope_selectors": [
{
@ -194,6 +165,10 @@ func (r *RetentionAPI) CreateRetention() {
r.SendBadRequestError(err)
return
}
if err = r.checkRuleConflict(p); err != nil {
r.SendConflictError(err)
return
}
if !r.requireAccess(p, rbac.ActionCreate) {
return
}
@ -241,6 +216,10 @@ func (r *RetentionAPI) UpdateRetention() {
return
}
p.ID = id
if err = r.checkRuleConflict(p); err != nil {
r.SendConflictError(err)
return
}
if !r.requireAccess(p, rbac.ActionUpdate) {
return
}
@ -250,6 +229,21 @@ func (r *RetentionAPI) UpdateRetention() {
}
}
func (r *RetentionAPI) checkRuleConflict(p *policy.Metadata) error {
temp := make(map[string]int)
for n, rule := range p.Rules {
tid := rule.ID
rule.ID = 0
bs, _ := json.Marshal(rule)
if old, exists := temp[string(bs)]; exists {
return fmt.Errorf("rule %d is conflict with rule %d", n, old)
}
temp[string(bs)] = tid
rule.ID = tid
}
return nil
}
// TriggerRetentionExec Trigger Retention Execution
func (r *RetentionAPI) TriggerRetentionExec() {
id, err := r.GetIDFromURL()

View File

@ -143,6 +143,87 @@ func TestCreatePolicy(t *testing.T) {
},
code: http.StatusBadRequest,
},
{
request: &testingRequest{
method: http.MethodPost,
url: "/api/retentions",
bodyJSON: &policy.Metadata{
Algorithm: "or",
Rules: []rule.Metadata{
{
ID: 1,
Priority: 1,
Template: "recentXdays",
Parameters: rule.Parameters{
"num": 10,
},
TagSelectors: []*rule.Selector{
{
Kind: "label",
Decoration: "with",
Pattern: "latest",
},
{
Kind: "regularExpression",
Decoration: "matches",
Pattern: "release-[\\d\\.]+",
},
},
ScopeSelectors: map[string][]*rule.Selector{
"repository": {
{
Kind: "regularExpression",
Decoration: "matches",
Pattern: ".+",
},
},
},
},
{
ID: 2,
Priority: 1,
Template: "recentXdays",
Parameters: rule.Parameters{
"num": 10,
},
TagSelectors: []*rule.Selector{
{
Kind: "label",
Decoration: "with",
Pattern: "latest",
},
{
Kind: "regularExpression",
Decoration: "matches",
Pattern: "release-[\\d\\.]+",
},
},
ScopeSelectors: map[string][]*rule.Selector{
"repository": {
{
Kind: "regularExpression",
Decoration: "matches",
Pattern: ".+",
},
},
},
},
},
Trigger: &policy.Trigger{
Kind: "Schedule",
Settings: map[string]interface{}{
"cron": "* 22 11 * * *",
},
},
Scope: &policy.Scope{
Level: "project",
Reference: 1,
},
},
credential: sysAdmin,
},
code: http.StatusConflict,
},
}
runCodeCheckingCases(t, cases...)
@ -267,6 +348,87 @@ func TestPolicy(t *testing.T) {
},
code: http.StatusOK,
},
{
request: &testingRequest{
method: http.MethodPut,
url: fmt.Sprintf("/api/retentions/%d", id),
bodyJSON: &policy.Metadata{
Algorithm: "or",
Rules: []rule.Metadata{
{
ID: 1,
Priority: 1,
Template: "recentXdays",
Parameters: rule.Parameters{
"num": 10,
},
TagSelectors: []*rule.Selector{
{
Kind: "label",
Decoration: "with",
Pattern: "latest",
},
{
Kind: "regularExpression",
Decoration: "matches",
Pattern: "release-[\\d\\.]+",
},
},
ScopeSelectors: map[string][]*rule.Selector{
"repository": {
{
Kind: "regularExpression",
Decoration: "matches",
Pattern: "b.+",
},
},
},
},
{
ID: 2,
Priority: 1,
Template: "recentXdays",
Parameters: rule.Parameters{
"num": 10,
},
TagSelectors: []*rule.Selector{
{
Kind: "label",
Decoration: "with",
Pattern: "latest",
},
{
Kind: "regularExpression",
Decoration: "matches",
Pattern: "release-[\\d\\.]+",
},
},
ScopeSelectors: map[string][]*rule.Selector{
"repository": {
{
Kind: "regularExpression",
Decoration: "matches",
Pattern: "b.+",
},
},
},
},
},
Trigger: &policy.Trigger{
Kind: "Schedule",
Settings: map[string]interface{}{
"cron": "* 22 11 * * *",
},
},
Scope: &policy.Scope{
Level: "project",
Reference: 1,
},
},
credential: sysAdmin,
},
code: http.StatusConflict,
},
{
request: &testingRequest{
method: http.MethodPost,

View File

@ -331,12 +331,14 @@ func Database() (*models.Database, error) {
database := &models.Database{}
database.Type = cfgMgr.Get(common.DatabaseType).GetString()
postgresql := &models.PostGreSQL{
Host: cfgMgr.Get(common.PostGreSQLHOST).GetString(),
Port: cfgMgr.Get(common.PostGreSQLPort).GetInt(),
Username: cfgMgr.Get(common.PostGreSQLUsername).GetString(),
Password: cfgMgr.Get(common.PostGreSQLPassword).GetString(),
Database: cfgMgr.Get(common.PostGreSQLDatabase).GetString(),
SSLMode: cfgMgr.Get(common.PostGreSQLSSLMode).GetString(),
Host: cfgMgr.Get(common.PostGreSQLHOST).GetString(),
Port: cfgMgr.Get(common.PostGreSQLPort).GetInt(),
Username: cfgMgr.Get(common.PostGreSQLUsername).GetString(),
Password: cfgMgr.Get(common.PostGreSQLPassword).GetString(),
Database: cfgMgr.Get(common.PostGreSQLDatabase).GetString(),
SSLMode: cfgMgr.Get(common.PostGreSQLSSLMode).GetString(),
MaxIdleConns: cfgMgr.Get(common.PostGreSQLMaxIdleConns).GetInt(),
MaxOpenConns: cfgMgr.Get(common.PostGreSQLMaxOpenConns).GetInt(),
}
database.PostGreSQL = postgresql
@ -520,6 +522,11 @@ func NotificationEnable() bool {
return cfgMgr.Get(common.NotificationEnable).GetBool()
}
// QuotaPerProjectEnable returns a bool to indicates if quota per project enabled in harbor
func QuotaPerProjectEnable() bool {
return cfgMgr.Get(common.QuotaPerProjectEnable).GetBool()
}
// QuotaSetting returns the setting of quota.
func QuotaSetting() (*models.QuotaSetting, error) {
if err := cfgMgr.Load(); err != nil {

View File

@ -15,12 +15,13 @@
package chart
import (
"fmt"
"net/http"
"regexp"
"strconv"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/core/middlewares/interceptor"
"github.com/goharbor/harbor/src/core/middlewares/interceptor/quota"
"github.com/goharbor/harbor/src/core/middlewares/util"
@ -29,81 +30,82 @@ import (
var (
deleteChartVersionRe = regexp.MustCompile(`^/api/chartrepo/(?P<namespace>\w+)/charts/(?P<name>\w+)/(?P<version>[\w\d\.]+)/?$`)
uploadChartVersionRe = regexp.MustCompile(`^/api/chartrepo/(?P<namespace>\w+)/charts/?$`)
createChartVersionRe = regexp.MustCompile(`^/api/chartrepo/(?P<namespace>\w+)/charts/?$`)
)
var (
defaultBuilders = []interceptor.Builder{
&deleteChartVersionBuilder{},
&uploadChartVersionBuilder{},
&chartVersionDeletionBuilder{},
&chartVersionCreationBuilder{},
}
)
type deleteChartVersionBuilder struct {
}
type chartVersionDeletionBuilder struct{}
func (*deleteChartVersionBuilder) Build(req *http.Request) interceptor.Interceptor {
func (*chartVersionDeletionBuilder) Build(req *http.Request) (interceptor.Interceptor, error) {
if req.Method != http.MethodDelete {
return nil
return nil, nil
}
matches := deleteChartVersionRe.FindStringSubmatch(req.URL.String())
if len(matches) <= 1 {
return nil
return nil, nil
}
namespace, chartName, version := matches[1], matches[2], matches[3]
project, err := dao.GetProjectByName(namespace)
if err != nil {
log.Errorf("Failed to get project %s, error: %v", namespace, err)
return nil
return nil, fmt.Errorf("failed to get project %s, error: %v", namespace, err)
}
if project == nil {
log.Warningf("Project %s not found", namespace)
return nil
return nil, fmt.Errorf("project %s not found", namespace)
}
info := &util.ChartVersionInfo{
ProjectID: project.ProjectID,
Namespace: namespace,
ChartName: chartName,
Version: version,
}
opts := []quota.Option{
quota.EnforceResources(config.QuotaPerProjectEnable()),
quota.WithManager("project", strconv.FormatInt(project.ProjectID, 10)),
quota.WithAction(quota.SubtractAction),
quota.StatusCode(http.StatusOK),
quota.MutexKeys(mutexKey(namespace, chartName, version)),
quota.MutexKeys(info.MutexKey()),
quota.Resources(types.ResourceList{types.ResourceCount: 1}),
}
return quota.New(opts...)
return quota.New(opts...), nil
}
type uploadChartVersionBuilder struct {
}
type chartVersionCreationBuilder struct{}
func (*uploadChartVersionBuilder) Build(req *http.Request) interceptor.Interceptor {
func (*chartVersionCreationBuilder) Build(req *http.Request) (interceptor.Interceptor, error) {
if req.Method != http.MethodPost {
return nil
return nil, nil
}
matches := uploadChartVersionRe.FindStringSubmatch(req.URL.String())
matches := createChartVersionRe.FindStringSubmatch(req.URL.String())
if len(matches) <= 1 {
return nil
return nil, nil
}
namespace := matches[1]
project, err := dao.GetProjectByName(namespace)
if err != nil {
log.Errorf("Failed to get project %s, error: %v", namespace, err)
return nil
return nil, fmt.Errorf("failed to get project %s, error: %v", namespace, err)
}
if project == nil {
log.Warningf("Project %s not found", namespace)
return nil
return nil, fmt.Errorf("project %s not found", namespace)
}
chart, err := parseChart(req)
if err != nil {
log.Errorf("Failed to parse chart from body, error: %v", err)
return nil
return nil, fmt.Errorf("failed to parse chart from body, error: %v", err)
}
chartName, version := chart.Metadata.Name, chart.Metadata.Version
@ -117,12 +119,13 @@ func (*uploadChartVersionBuilder) Build(req *http.Request) interceptor.Intercept
*req = *req.WithContext(util.NewChartVersionInfoContext(req.Context(), info))
opts := []quota.Option{
quota.EnforceResources(config.QuotaPerProjectEnable()),
quota.WithManager("project", strconv.FormatInt(project.ProjectID, 10)),
quota.WithAction(quota.AddAction),
quota.StatusCode(http.StatusCreated),
quota.MutexKeys(mutexKey(namespace, chartName, version)),
quota.OnResources(computeQuotaForUpload),
quota.MutexKeys(info.MutexKey()),
quota.OnResources(computeResourcesForChartVersionCreation),
}
return quota.New(opts...)
return quota.New(opts...), nil
}

View File

@ -42,7 +42,13 @@ func New(next http.Handler, builders ...interceptor.Builder) http.Handler {
// ServeHTTP manifest ...
func (h *chartHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
interceptor := h.getInterceptor(req)
interceptor, err := h.getInterceptor(req)
if err != nil {
http.Error(rw, util.MarshalError("InternalError", fmt.Sprintf("Error occurred when to handle request in chart count quota handler: %v", err)),
http.StatusInternalServerError)
return
}
if interceptor == nil {
h.next.ServeHTTP(rw, req)
return
@ -61,13 +67,17 @@ func (h *chartHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
interceptor.HandleResponse(w, req)
}
func (h *chartHandler) getInterceptor(req *http.Request) interceptor.Interceptor {
func (h *chartHandler) getInterceptor(req *http.Request) (interceptor.Interceptor, error) {
for _, builder := range h.builders {
interceptor := builder.Build(req)
interceptor, err := builder.Build(req)
if err != nil {
return nil, err
}
if interceptor != nil {
return interceptor
return interceptor, nil
}
}
return nil
return nil, nil
}

View File

@ -85,7 +85,9 @@ func chartVersionExists(namespace, chartName, version string) bool {
return !chartVersion.Removed
}
func computeQuotaForUpload(req *http.Request) (types.ResourceList, error) {
// computeResourcesForChartVersionCreation returns count resource required for the chart package
// no count required if the chart package of version exists in project
func computeResourcesForChartVersionCreation(req *http.Request) (types.ResourceList, error) {
info, ok := util.ChartVersionInfoFromContext(req.Context())
if !ok {
return nil, errors.New("chart version info missing")
@ -99,10 +101,6 @@ func computeQuotaForUpload(req *http.Request) (types.ResourceList, error) {
return types.ResourceList{types.ResourceCount: 1}, nil
}
func mutexKey(str ...string) string {
return "chart:" + strings.Join(str, ":")
}
func parseChart(req *http.Request) (*chart.Chart, error) {
chartFile, _, err := req.FormFile(formFieldNameForChart)
if err != nil {

View File

@ -18,178 +18,83 @@ import (
"fmt"
"net/http"
"strconv"
"strings"
"time"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/core/middlewares/interceptor"
"github.com/goharbor/harbor/src/core/middlewares/interceptor/quota"
"github.com/goharbor/harbor/src/core/middlewares/util"
"github.com/opencontainers/go-digest"
)
var (
defaultBuilders = []interceptor.Builder{
&deleteManifestBuilder{},
&putManifestBuilder{},
&manifestDeletionBuilder{},
&manifestCreationBuilder{},
}
)
type deleteManifestBuilder struct {
}
type manifestDeletionBuilder struct{}
func (*deleteManifestBuilder) Build(req *http.Request) interceptor.Interceptor {
if req.Method != http.MethodDelete {
return nil
func (*manifestDeletionBuilder) Build(req *http.Request) (interceptor.Interceptor, error) {
if match, _, _ := util.MatchDeleteManifest(req); !match {
return nil, nil
}
match, name, reference := util.MatchManifestURL(req)
if !match {
return nil
}
info, ok := util.ManifestInfoFromContext(req.Context())
if !ok {
var err error
info, err = util.ParseManifestInfoFromPath(req)
if err != nil {
return nil, fmt.Errorf("failed to parse manifest, error %v", err)
}
dgt, err := digest.Parse(reference)
if err != nil {
// Delete manifest only accept digest as reference
return nil
// Manifest info will be used by computeResourcesForDeleteManifest
*req = *(req.WithContext(util.NewManifestInfoContext(req.Context(), info)))
}
projectName := strings.Split(name, "/")[0]
project, err := dao.GetProjectByName(projectName)
if err != nil {
log.Errorf("Failed to get project %s, error: %v", projectName, err)
return nil
}
if project == nil {
log.Warningf("Project %s not found", projectName)
return nil
}
info := &util.MfInfo{
ProjectID: project.ProjectID,
Repository: name,
Digest: dgt.String(),
}
// Manifest info will be used by computeQuotaForUpload
*req = *req.WithContext(util.NewManifestInfoContext(req.Context(), info))
opts := []quota.Option{
quota.WithManager("project", strconv.FormatInt(project.ProjectID, 10)),
quota.EnforceResources(config.QuotaPerProjectEnable()),
quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)),
quota.WithAction(quota.SubtractAction),
quota.StatusCode(http.StatusAccepted),
quota.MutexKeys(mutexKey(info)),
quota.OnResources(computeQuotaForDelete),
quota.MutexKeys(info.MutexKey("count")),
quota.OnResources(computeResourcesForManifestDeletion),
quota.OnFulfilled(func(http.ResponseWriter, *http.Request) error {
return dao.DeleteArtifactByDigest(info.ProjectID, info.Repository, info.Digest)
}),
}
return quota.New(opts...)
return quota.New(opts...), nil
}
type putManifestBuilder struct {
}
type manifestCreationBuilder struct{}
func (b *putManifestBuilder) Build(req *http.Request) interceptor.Interceptor {
if req.Method != http.MethodPut {
return nil
func (*manifestCreationBuilder) Build(req *http.Request) (interceptor.Interceptor, error) {
if match, _, _ := util.MatchPushManifest(req); !match {
return nil, nil
}
info, ok := util.ManifestInfoFromContext(req.Context())
if !ok {
// assert that manifest info will be set by others
return nil
var err error
info, err = util.ParseManifestInfo(req)
if err != nil {
return nil, fmt.Errorf("failed to parse manifest, error %v", err)
}
// Manifest info will be used by computeResourcesForCreateManifest
*req = *(req.WithContext(util.NewManifestInfoContext(req.Context(), info)))
}
opts := []quota.Option{
quota.EnforceResources(config.QuotaPerProjectEnable()),
quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)),
quota.WithAction(quota.AddAction),
quota.StatusCode(http.StatusCreated),
quota.MutexKeys(mutexKey(info)),
quota.OnResources(computeQuotaForPut),
quota.OnFulfilled(func(http.ResponseWriter, *http.Request) error {
newManifest, overwriteTag := !info.Exist, info.DigestChanged
if newManifest {
if err := b.doNewManifest(info); err != nil {
log.Errorf("Failed to handle response for new manifest, error: %v", err)
}
} else if overwriteTag {
if err := b.doOverwriteTag(info); err != nil {
log.Errorf("Failed to handle response for overwrite tag, error: %v", err)
}
}
return nil
}),
quota.MutexKeys(info.MutexKey("count")),
quota.OnResources(computeResourcesForManifestCreation),
quota.OnFulfilled(afterManifestCreated),
}
return quota.New(opts...)
}
func (b *putManifestBuilder) doNewManifest(info *util.MfInfo) error {
artifact := &models.Artifact{
PID: info.ProjectID,
Repo: info.Repository,
Tag: info.Tag,
Digest: info.Digest,
PushTime: time.Now(),
Kind: "Docker-Image",
}
if _, err := dao.AddArtifact(artifact); err != nil {
return fmt.Errorf("error to add artifact, %v", err)
}
return b.attachBlobsToArtifact(info)
}
func (b *putManifestBuilder) doOverwriteTag(info *util.MfInfo) error {
artifact := &models.Artifact{
ID: info.ArtifactID,
PID: info.ProjectID,
Repo: info.Repository,
Tag: info.Tag,
Digest: info.Digest,
PushTime: time.Now(),
Kind: "Docker-Image",
}
if err := dao.UpdateArtifactDigest(artifact); err != nil {
return fmt.Errorf("error to update artifact, %v", err)
}
return b.attachBlobsToArtifact(info)
}
func (b *putManifestBuilder) attachBlobsToArtifact(info *util.MfInfo) error {
self := &models.ArtifactAndBlob{
DigestAF: info.Digest,
DigestBlob: info.Digest,
}
artifactBlobs := append([]*models.ArtifactAndBlob{}, self)
for _, d := range info.Refrerence {
artifactBlob := &models.ArtifactAndBlob{
DigestAF: info.Digest,
DigestBlob: d.Digest.String(),
}
artifactBlobs = append(artifactBlobs, artifactBlob)
}
if err := dao.AddArtifactNBlobs(artifactBlobs); err != nil {
if strings.Contains(err.Error(), dao.ErrDupRows.Error()) {
log.Warning("the artifact and blobs have already in the DB, it maybe an existing image with different tag")
return nil
}
return fmt.Errorf("error to add artifact and blobs in proxy response handler, %v", err)
}
return nil
return quota.New(opts...), nil
}

View File

@ -42,7 +42,14 @@ func New(next http.Handler, builders ...interceptor.Builder) http.Handler {
// ServeHTTP manifest ...
func (h *countQuotaHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
interceptor := h.getInterceptor(req)
interceptor, err := h.getInterceptor(req)
if err != nil {
log.Warningf("Error occurred when to handle request in count quota handler: %v", err)
http.Error(rw, util.MarshalError("InternalError", fmt.Sprintf("Error occurred when to handle request in count quota handler: %v", err)),
http.StatusInternalServerError)
return
}
if interceptor == nil {
h.next.ServeHTTP(rw, req)
return
@ -60,13 +67,17 @@ func (h *countQuotaHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request)
interceptor.HandleResponse(rw, req)
}
func (h *countQuotaHandler) getInterceptor(req *http.Request) interceptor.Interceptor {
func (h *countQuotaHandler) getInterceptor(req *http.Request) (interceptor.Interceptor, error) {
for _, builder := range h.builders {
interceptor := builder.Build(req)
interceptor, err := builder.Build(req)
if err != nil {
return nil, err
}
if interceptor != nil {
return interceptor
return interceptor, nil
}
}
return nil
return nil, nil
}

View File

@ -26,6 +26,7 @@ import (
"github.com/docker/distribution"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/core/middlewares/util"
"github.com/goharbor/harbor/src/pkg/types"
"github.com/opencontainers/go-digest"
@ -67,7 +68,7 @@ func doDeleteManifestRequest(projectID int64, projectName, name, dgt string, nex
url := fmt.Sprintf("/v2/%s/manifests/%s", repository, dgt)
req, _ := http.NewRequest("DELETE", url, nil)
ctx := util.NewManifestInfoContext(req.Context(), &util.MfInfo{
ctx := util.NewManifestInfoContext(req.Context(), &util.ManifestInfo{
ProjectID: projectID,
Repository: repository,
Digest: dgt,
@ -96,12 +97,12 @@ func doPutManifestRequest(projectID int64, projectName, name, tag, dgt string, n
url := fmt.Sprintf("/v2/%s/manifests/%s", repository, tag)
req, _ := http.NewRequest("PUT", url, nil)
ctx := util.NewManifestInfoContext(req.Context(), &util.MfInfo{
ctx := util.NewManifestInfoContext(req.Context(), &util.ManifestInfo{
ProjectID: projectID,
Repository: repository,
Tag: tag,
Digest: dgt,
Refrerence: []distribution.Descriptor{
References: []distribution.Descriptor{
{Digest: digest.FromString(randomString(15))},
{Digest: digest.FromString(randomString(15))},
},
@ -146,11 +147,13 @@ func (suite *HandlerSuite) checkCountUsage(expected, projectID int64) {
}
func (suite *HandlerSuite) TearDownTest() {
dao.ClearTable("artifact")
dao.ClearTable("blob")
dao.ClearTable("artifact_blob")
dao.ClearTable("quota")
dao.ClearTable("quota_usage")
for _, table := range []string{
"artifact", "blob",
"artifact_blob", "project_blob",
"quota", "quota_usage",
} {
dao.ClearTable(table)
}
}
func (suite *HandlerSuite) TestPutManifestCreated() {
@ -169,9 +172,6 @@ func (suite *HandlerSuite) TestPutManifestCreated() {
total, err := dao.GetTotalOfArtifacts(&models.ArtifactQuery{Digest: dgt})
suite.Nil(err)
suite.Equal(int64(1), total, "Artifact should be created")
if exists, err := dao.HasBlobInProject(projectID, dgt); suite.Nil(err) {
suite.True(exists)
}
// Push the photon:latest with photon:dev
code = doPutManifestRequest(projectID, projectName, "photon", "dev", dgt)
@ -213,9 +213,6 @@ func (suite *HandlerSuite) TestPutManifestFailed() {
total, err := dao.GetTotalOfArtifacts(&models.ArtifactQuery{Digest: dgt})
suite.Nil(err)
suite.Equal(int64(0), total, "Artifact should not be created")
if exists, err := dao.HasBlobInProject(projectID, dgt); suite.Nil(err) {
suite.False(exists)
}
}
func (suite *HandlerSuite) TestDeleteManifestAccepted() {
@ -258,7 +255,7 @@ func (suite *HandlerSuite) TestDeleteManifestFailed() {
suite.checkCountUsage(1, projectID)
}
func (suite *HandlerSuite) TestDeleteManifesInMultiProjects() {
func (suite *HandlerSuite) TestDeleteManifestInMultiProjects() {
projectName := randomString(5)
projectID := suite.addProject(projectName)
@ -294,6 +291,7 @@ func (suite *HandlerSuite) TestDeleteManifesInMultiProjects() {
}
func TestMain(m *testing.M) {
config.Init()
dao.PrepareTestForPostgresSQL()
if result := m.Run(); result != 0 {

View File

@ -18,23 +18,35 @@ import (
"errors"
"fmt"
"net/http"
"strings"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/quota"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/middlewares/util"
"github.com/goharbor/harbor/src/pkg/types"
)
func mutexKey(info *util.MfInfo) string {
if info.Tag != "" {
return "Quota::manifest-lock::" + info.Repository + ":" + info.Tag
// computeResourcesForManifestCreation returns count resource required for manifest
// no count required if the tag of the repository exists in the project
func computeResourcesForManifestCreation(req *http.Request) (types.ResourceList, error) {
info, ok := util.ManifestInfoFromContext(req.Context())
if !ok {
return nil, errors.New("manifest info missing")
}
return "Quota::manifest-lock::" + info.Repository + ":" + info.Digest
// only count quota required when push new tag
if info.IsNewTag() {
return quota.ResourceList{quota.ResourceCount: 1}, nil
}
return nil, nil
}
func computeQuotaForDelete(req *http.Request) (types.ResourceList, error) {
// computeResourcesForManifestDeletion returns count resource will be released when manifest deleted
// then result will be the sum of manifest count of the same repository in the project
func computeResourcesForManifestDeletion(req *http.Request) (types.ResourceList, error) {
info, ok := util.ManifestInfoFromContext(req.Context())
if !ok {
return nil, errors.New("manifest info missing")
@ -53,40 +65,54 @@ func computeQuotaForDelete(req *http.Request) (types.ResourceList, error) {
return types.ResourceList{types.ResourceCount: total}, nil
}
func computeQuotaForPut(req *http.Request) (types.ResourceList, error) {
// afterManifestCreated the handler after manifest created success
// it will create or update the artifact info in db, and then attach blobs to artifact
func afterManifestCreated(w http.ResponseWriter, req *http.Request) error {
info, ok := util.ManifestInfoFromContext(req.Context())
if !ok {
return nil, errors.New("manifest info missing")
return errors.New("manifest info missing")
}
artifact, err := getArtifact(info)
if err != nil {
return nil, fmt.Errorf("error occurred when to check Manifest existence %v", err)
artifact := info.Artifact()
if artifact.ID == 0 {
if _, err := dao.AddArtifact(artifact); err != nil {
return fmt.Errorf("error to add artifact, %v", err)
}
} else {
if err := dao.UpdateArtifact(artifact); err != nil {
return fmt.Errorf("error to update artifact, %v", err)
}
}
if artifact != nil {
info.ArtifactID = artifact.ID
info.DigestChanged = artifact.Digest != info.Digest
info.Exist = true
return nil, nil
}
return quota.ResourceList{quota.ResourceCount: 1}, nil
return attachBlobsToArtifact(info)
}
// get artifact by manifest info
func getArtifact(info *util.MfInfo) (*models.Artifact, error) {
query := &models.ArtifactQuery{
PID: info.ProjectID,
Repo: info.Repository,
Tag: info.Tag,
// attachBlobsToArtifact attach the blobs which from manifest to artifact
func attachBlobsToArtifact(info *util.ManifestInfo) error {
self := &models.ArtifactAndBlob{
DigestAF: info.Digest,
DigestBlob: info.Digest,
}
artifacts, err := dao.ListArtifacts(query)
if err != nil || len(artifacts) == 0 {
return nil, err
artifactBlobs := append([]*models.ArtifactAndBlob{}, self)
for _, reference := range info.References {
artifactBlob := &models.ArtifactAndBlob{
DigestAF: info.Digest,
DigestBlob: reference.Digest.String(),
}
artifactBlobs = append(artifactBlobs, artifactBlob)
}
return artifacts[0], nil
if err := dao.AddArtifactNBlobs(artifactBlobs); err != nil {
if strings.Contains(err.Error(), dao.ErrDupRows.Error()) {
log.Warning("the artifact and blobs have already in the DB, it maybe an existing image with different tag")
return nil
}
return fmt.Errorf("error to add artifact and blobs in proxy response handler, %v", err)
}
return nil
}

View File

@ -20,8 +20,9 @@ import (
// Builder interceptor builder
type Builder interface {
// Build build interceptor from http.Request returns nil if interceptor not match the request
Build(*http.Request) Interceptor
// Build build interceptor from http.Request
// (nil, nil) must be returned if builder not match the request
Build(*http.Request) (Interceptor, error)
}
// Interceptor interceptor for middleware
@ -32,3 +33,16 @@ type Interceptor interface {
// HandleResponse won't return any error
HandleResponse(http.ResponseWriter, *http.Request)
}
// ResponseInterceptorFunc ...
type ResponseInterceptorFunc func(w http.ResponseWriter, r *http.Request)
// HandleRequest no-op HandleRequest
func (f ResponseInterceptorFunc) HandleRequest(*http.Request) error {
return nil
}
// HandleResponse calls f(w, r).
func (f ResponseInterceptorFunc) HandleResponse(w http.ResponseWriter, r *http.Request) {
f(w, r)
}

View File

@ -36,6 +36,8 @@ const (
// Options ...
type Options struct {
enforceResources *bool
Action Action
Manager *quota.Manager
MutexKeys []string
@ -48,6 +50,15 @@ type Options struct {
OnFinally func(http.ResponseWriter, *http.Request) error
}
// EnforceResources ...
func (opts *Options) EnforceResources() bool {
return opts.enforceResources != nil && *opts.enforceResources
}
func boolPtr(v bool) *bool {
return &v
}
func newOptions(opt ...Option) Options {
opts := Options{}
@ -63,9 +74,20 @@ func newOptions(opt ...Option) Options {
opts.StatusCode = http.StatusOK
}
if opts.enforceResources == nil {
opts.enforceResources = boolPtr(true)
}
return opts
}
// EnforceResources sets the interceptor enforceResources
func EnforceResources(enforceResources bool) Option {
return func(o *Options) {
o.enforceResources = boolPtr(enforceResources)
}
}
// WithAction sets the interceptor action
func WithAction(a Action) Option {
return func(o *Options) {

View File

@ -49,30 +49,19 @@ func (qi *quotaInterceptor) HandleRequest(req *http.Request) (err error) {
}
}()
opts := qi.opts
for _, key := range opts.MutexKeys {
m, err := redis.RequireLock(key)
if err != nil {
return err
}
qi.mutexes = append(qi.mutexes, m)
err = qi.requireMutexes()
if err != nil {
return
}
resources := opts.Resources
if len(resources) == 0 && opts.OnResources != nil {
resources, err = opts.OnResources(req)
if err != nil {
return fmt.Errorf("failed to compute the resources for quota, error: %v", err)
}
log.Debugf("Compute the resources for quota, got: %v", resources)
err = qi.computeResources(req)
if err != nil {
return
}
qi.resources = resources
err = qi.reserve()
if err != nil {
log.Errorf("Failed to %s resources, error: %v", opts.Action, err)
log.Errorf("Failed to %s resources, error: %v", qi.opts.Action, err)
}
return
@ -92,7 +81,9 @@ func (qi *quotaInterceptor) HandleResponse(w http.ResponseWriter, req *http.Requ
switch sr.Status() {
case opts.StatusCode:
if opts.OnFulfilled != nil {
opts.OnFulfilled(w, req)
if err := opts.OnFulfilled(w, req); err != nil {
log.Errorf("Failed to handle on fulfilled, error: %v", err)
}
}
default:
if err := qi.unreserve(); err != nil {
@ -100,15 +91,36 @@ func (qi *quotaInterceptor) HandleResponse(w http.ResponseWriter, req *http.Requ
}
if opts.OnRejected != nil {
opts.OnRejected(w, req)
if err := opts.OnRejected(w, req); err != nil {
log.Errorf("Failed to handle on rejected, error: %v", err)
}
}
}
if opts.OnFinally != nil {
opts.OnFinally(w, req)
if err := opts.OnFinally(w, req); err != nil {
log.Errorf("Failed to handle on finally, error: %v", err)
}
}
}
func (qi *quotaInterceptor) requireMutexes() error {
if !qi.opts.EnforceResources() {
// Do nothing for locks when quota interceptor not enforce resources
return nil
}
for _, key := range qi.opts.MutexKeys {
m, err := redis.RequireLock(key)
if err != nil {
return err
}
qi.mutexes = append(qi.mutexes, m)
}
return nil
}
func (qi *quotaInterceptor) freeMutexes() {
for i := len(qi.mutexes) - 1; i >= 0; i-- {
if err := redis.FreeLock(qi.mutexes[i]); err != nil {
@ -117,8 +129,29 @@ func (qi *quotaInterceptor) freeMutexes() {
}
}
func (qi *quotaInterceptor) computeResources(req *http.Request) error {
if !qi.opts.EnforceResources() {
// Do nothing in compute resources when quota interceptor not enforce resources
return nil
}
if len(qi.opts.Resources) == 0 && qi.opts.OnResources != nil {
resources, err := qi.opts.OnResources(req)
if err != nil {
return fmt.Errorf("failed to compute the resources for quota, error: %v", err)
}
qi.resources = resources
}
return nil
}
func (qi *quotaInterceptor) reserve() error {
log.Debugf("Reserve %s resources, %v", qi.opts.Action, qi.resources)
if !qi.opts.EnforceResources() {
// Do nothing in reserve resources when quota interceptor not enforce resources
return nil
}
if len(qi.resources) == 0 {
return nil
@ -135,7 +168,10 @@ func (qi *quotaInterceptor) reserve() error {
}
func (qi *quotaInterceptor) unreserve() error {
log.Debugf("Unreserve %s resources, %v", qi.opts.Action, qi.resources)
if !qi.opts.EnforceResources() {
// Do nothing in unreserve resources when quota interceptor not enforce resources
return nil
}
if len(qi.resources) == 0 {
return nil

View File

@ -0,0 +1,212 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sizequota
import (
"fmt"
"net/http"
"strconv"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/core/middlewares/interceptor"
"github.com/goharbor/harbor/src/core/middlewares/interceptor/quota"
"github.com/goharbor/harbor/src/core/middlewares/util"
)
var (
defaultBuilders = []interceptor.Builder{
&blobStreamUploadBuilder{},
&blobStorageQuotaBuilder{},
&manifestCreationBuilder{},
&manifestDeletionBuilder{},
}
)
// blobStreamUploadBuilder interceptor for PATCH /v2/<name>/blobs/uploads/<uuid>
type blobStreamUploadBuilder struct{}
func (*blobStreamUploadBuilder) Build(req *http.Request) (interceptor.Interceptor, error) {
if !match(req, http.MethodPatch, blobUploadURLRe) {
return nil, nil
}
s := blobUploadURLRe.FindStringSubmatch(req.URL.Path)
uuid := s[2]
onResponse := func(w http.ResponseWriter, req *http.Request) {
size, err := parseUploadedBlobSize(w)
if err != nil {
log.Errorf("failed to parse uploaded blob size for upload %s", uuid)
return
}
ok, err := setUploadedBlobSize(uuid, size)
if err != nil {
log.Errorf("failed to update blob update size for upload %s, error: %v", uuid, err)
return
}
if !ok {
// ToDo discuss what to do here.
log.Errorf("fail to set bunk: %s size: %d in redis, it causes unable to set correct quota for the artifact", uuid, size)
}
}
return interceptor.ResponseInterceptorFunc(onResponse), nil
}
// blobStorageQuotaBuilder interceptor builder for these requests
// PUT /v2/<name>/blobs/uploads/<uuid>?digest=<digest>
// POST /v2/<name>/blobs/uploads/?mount=<digest>&from=<repository name>
type blobStorageQuotaBuilder struct{}
func (*blobStorageQuotaBuilder) Build(req *http.Request) (interceptor.Interceptor, error) {
parseBlobInfo := getBlobInfoParser(req)
if parseBlobInfo == nil {
return nil, nil
}
info, err := parseBlobInfo(req)
if err != nil {
return nil, err
}
// replace req with blob info context
*req = *(req.WithContext(util.NewBlobInfoContext(req.Context(), info)))
opts := []quota.Option{
quota.EnforceResources(config.QuotaPerProjectEnable()),
quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)),
quota.WithAction(quota.AddAction),
quota.StatusCode(http.StatusCreated), // NOTICE: mount blob and blob upload complete both return 201 when success
quota.OnResources(computeResourcesForBlob),
quota.MutexKeys(info.MutexKey()),
quota.OnFulfilled(func(http.ResponseWriter, *http.Request) error {
return syncBlobInfoToProject(info)
}),
}
return quota.New(opts...), nil
}
// manifestCreationBuilder interceptor builder for the request PUT /v2/<name>/manifests/<reference>
type manifestCreationBuilder struct{}
func (*manifestCreationBuilder) Build(req *http.Request) (interceptor.Interceptor, error) {
if match, _, _ := util.MatchPushManifest(req); !match {
return nil, nil
}
info, err := util.ParseManifestInfo(req)
if err != nil {
return nil, err
}
// Replace request with manifests info context
*req = *req.WithContext(util.NewManifestInfoContext(req.Context(), info))
opts := []quota.Option{
quota.EnforceResources(config.QuotaPerProjectEnable()),
quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)),
quota.WithAction(quota.AddAction),
quota.StatusCode(http.StatusCreated),
quota.OnResources(computeResourcesForManifestCreation),
quota.MutexKeys(info.MutexKey("size")),
quota.OnFulfilled(func(http.ResponseWriter, *http.Request) error {
// manifest created, sync manifest itself as blob to blob and project_blob table
blobInfo, err := parseBlobInfoFromManifest(req)
if err != nil {
return err
}
if err := syncBlobInfoToProject(blobInfo); err != nil {
return err
}
// sync blobs from manifest which are not in project to project_blob table
blobs, err := info.GetBlobsNotInProject()
if err != nil {
return err
}
_, err = dao.AddBlobsToProject(info.ProjectID, blobs...)
return err
}),
}
return quota.New(opts...), nil
}
// deleteManifestBuilder interceptor builder for the request DELETE /v2/<name>/manifests/<reference>
type manifestDeletionBuilder struct{}
func (*manifestDeletionBuilder) Build(req *http.Request) (interceptor.Interceptor, error) {
if match, _, _ := util.MatchDeleteManifest(req); !match {
return nil, nil
}
info, ok := util.ManifestInfoFromContext(req.Context())
if !ok {
var err error
info, err = util.ParseManifestInfoFromPath(req)
if err != nil {
return nil, fmt.Errorf("failed to parse manifest, error %v", err)
}
// Manifest info will be used by computeResourcesForDeleteManifest
*req = *(req.WithContext(util.NewManifestInfoContext(req.Context(), info)))
}
blobs, err := dao.GetBlobsByArtifact(info.Digest)
if err != nil {
return nil, fmt.Errorf("failed to query blobs of %s, error: %v", info.Digest, err)
}
mutexKeys := []string{info.MutexKey("size")}
for _, blob := range blobs {
mutexKeys = append(mutexKeys, info.BlobMutexKey(blob))
}
opts := []quota.Option{
quota.EnforceResources(config.QuotaPerProjectEnable()),
quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)),
quota.WithAction(quota.SubtractAction),
quota.StatusCode(http.StatusAccepted),
quota.OnResources(computeResourcesForManifestDeletion),
quota.MutexKeys(mutexKeys...),
quota.OnFulfilled(func(http.ResponseWriter, *http.Request) error {
blobs := info.ExclusiveBlobs
total, err := dao.GetTotalOfArtifacts(&models.ArtifactQuery{
PID: info.ProjectID,
Digest: info.Digest,
})
if err == nil && total > 0 {
blob, err := dao.GetBlob(info.Digest)
if err == nil {
blobs = append(blobs, blob)
}
}
return dao.RemoveBlobsFromProject(info.ProjectID, blobs...)
}),
}
return quota.New(opts...), nil
}

View File

@ -15,217 +15,69 @@
package sizequota
import (
"errors"
"fmt"
"github.com/garyburd/redigo/redis"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/quota"
common_util "github.com/goharbor/harbor/src/common/utils"
"github.com/goharbor/harbor/src/common/utils/log"
common_redis "github.com/goharbor/harbor/src/common/utils/redis"
"github.com/goharbor/harbor/src/core/middlewares/util"
"net/http"
"strings"
"time"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/middlewares/interceptor"
"github.com/goharbor/harbor/src/core/middlewares/util"
)
type sizeQuotaHandler struct {
next http.Handler
builders []interceptor.Builder
next http.Handler
}
// New ...
func New(next http.Handler) http.Handler {
func New(next http.Handler, builders ...interceptor.Builder) http.Handler {
if len(builders) == 0 {
builders = defaultBuilders
}
return &sizeQuotaHandler{
next: next,
builders: builders,
next: next,
}
}
// ServeHTTP ...
func (sqh *sizeQuotaHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
sizeInteceptor := getInteceptor(req)
if sizeInteceptor == nil {
sqh.next.ServeHTTP(rw, req)
return
}
// handler request
if err := sizeInteceptor.HandleRequest(req); err != nil {
func (h *sizeQuotaHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
interceptor, err := h.getInterceptor(req)
if err != nil {
log.Warningf("Error occurred when to handle request in size quota handler: %v", err)
http.Error(rw, util.MarshalError("InternalError", fmt.Sprintf("Error occurred when to handle request in size quota handler: %v", err)),
http.StatusInternalServerError)
return
}
sqh.next.ServeHTTP(rw, req)
// handler response
sizeInteceptor.HandleResponse(*rw.(*util.CustomResponseWriter), req)
if interceptor == nil {
h.next.ServeHTTP(rw, req)
return
}
if err := interceptor.HandleRequest(req); err != nil {
log.Warningf("Error occurred when to handle request in size quota handler: %v", err)
http.Error(rw, util.MarshalError("InternalError", fmt.Sprintf("Error occurred when to handle request in size quota handler: %v", err)),
http.StatusInternalServerError)
return
}
h.next.ServeHTTP(rw, req)
interceptor.HandleResponse(rw, req)
}
func getInteceptor(req *http.Request) util.RegInterceptor {
// POST /v2/<name>/blobs/uploads/?mount=<digest>&from=<repository name>
matchMountBlob, repository, mount, _ := util.MatchMountBlobURL(req)
if matchMountBlob {
bb := util.BlobInfo{}
bb.Repository = repository
bb.Digest = mount
return NewMountBlobInterceptor(&bb)
}
// PUT /v2/<name>/blobs/uploads/<uuid>?digest=<digest>
matchPutBlob, repository := util.MatchPutBlobURL(req)
if matchPutBlob {
bb := util.BlobInfo{}
bb.Repository = repository
return NewPutBlobInterceptor(&bb)
}
// PUT /v2/<name>/manifests/<reference>
matchPushMF, repository, tag := util.MatchPushManifest(req)
if matchPushMF {
bb := util.BlobInfo{}
mfInfo := util.MfInfo{}
bb.Repository = repository
mfInfo.Repository = repository
mfInfo.Tag = tag
return NewPutManifestInterceptor(&bb, &mfInfo)
}
// PATCH /v2/<name>/blobs/uploads/<uuid>
matchPatchBlob, _ := util.MatchPatchBlobURL(req)
if matchPatchBlob {
return NewPatchBlobInterceptor()
}
return nil
}
func requireQuota(conn redis.Conn, blobInfo *util.BlobInfo) error {
projectID, err := util.GetProjectID(strings.Split(blobInfo.Repository, "/")[0])
if err != nil {
return err
}
blobInfo.ProjectID = projectID
digestLock, err := tryLockBlob(conn, blobInfo)
if err != nil {
log.Infof("failed to lock digest in redis, %v", err)
return err
}
blobInfo.DigestLock = digestLock
blobExist, err := dao.HasBlobInProject(blobInfo.ProjectID, blobInfo.Digest)
if err != nil {
tryFreeBlob(blobInfo)
return err
}
blobInfo.Exist = blobExist
if blobExist {
return nil
}
// only require quota for non existing blob.
quotaRes := &quota.ResourceList{
quota.ResourceStorage: blobInfo.Size,
}
err = util.TryRequireQuota(blobInfo.ProjectID, quotaRes)
if err != nil {
log.Infof("project id, %d, size %d", blobInfo.ProjectID, blobInfo.Size)
tryFreeBlob(blobInfo)
log.Errorf("cannot get quota for the blob %v", err)
return err
}
blobInfo.Quota = quotaRes
return nil
}
// HandleBlobCommon handles put blob complete request
// 1, add blob into DB if success
// 2, roll back resource if failure.
func HandleBlobCommon(rw util.CustomResponseWriter, req *http.Request) error {
bbInfo := req.Context().Value(util.BBInfokKey)
bb, ok := bbInfo.(*util.BlobInfo)
if !ok {
return errors.New("failed to convert blob information context into BBInfo")
}
defer func() {
_, err := bb.DigestLock.Free()
func (h *sizeQuotaHandler) getInterceptor(req *http.Request) (interceptor.Interceptor, error) {
for _, builder := range h.builders {
interceptor, err := builder.Build(req)
if err != nil {
log.Errorf("Error to unlock blob digest:%s in response handler, %v", bb.Digest, err)
return nil, err
}
if err := bb.DigestLock.Conn.Close(); err != nil {
log.Errorf("Error to close redis connection in put blob response handler, %v", err)
}
}()
// Do nothing for a existing blob.
if bb.Exist {
return nil
if interceptor != nil {
return interceptor, nil
}
}
if rw.Status() == http.StatusCreated {
blob := &models.Blob{
Digest: bb.Digest,
ContentType: bb.ContentType,
Size: bb.Size,
CreationTime: time.Now(),
}
_, err := dao.AddBlob(blob)
if err != nil {
return err
}
} else if rw.Status() >= 300 && rw.Status() <= 511 {
success := util.TryFreeQuota(bb.ProjectID, bb.Quota)
if !success {
return fmt.Errorf("Error to release resource booked for the blob, %d, digest: %s ", bb.ProjectID, bb.Digest)
}
}
return nil
}
// tryLockBlob locks blob with redis ...
func tryLockBlob(conn redis.Conn, blobInfo *util.BlobInfo) (*common_redis.Mutex, error) {
// Quota::blob-lock::projectname::digest
digestLock := common_redis.New(conn, "Quota::blob-lock::"+strings.Split(blobInfo.Repository, "/")[0]+":"+blobInfo.Digest, common_util.GenerateRandomString())
success, err := digestLock.Require()
if err != nil {
return nil, err
}
if !success {
return nil, fmt.Errorf("unable to lock digest: %s, %s ", blobInfo.Repository, blobInfo.Digest)
}
return digestLock, nil
}
func tryFreeBlob(blobInfo *util.BlobInfo) {
_, err := blobInfo.DigestLock.Free()
if err != nil {
log.Warningf("Error to unlock digest: %s,%s with error: %v ", blobInfo.Repository, blobInfo.Digest, err)
}
}
func rmBlobUploadUUID(conn redis.Conn, UUID string) (bool, error) {
exists, err := redis.Int(conn.Do("EXISTS", UUID))
if err != nil {
return false, err
}
if exists == 1 {
res, err := redis.Int(conn.Do("DEL", UUID))
if err != nil {
return false, err
}
return res == 1, nil
}
return true, nil
}
// put blob path: /v2/<name>/blobs/uploads/<uuid>
func getUUID(path string) string {
if !strings.Contains(path, "/") {
log.Infof("it's not a valid path string: %s", path)
return ""
}
strs := strings.Split(path, "/")
return strs[len(strs)-1]
return nil, nil
}

View File

@ -15,163 +15,696 @@
package sizequota
import (
"context"
"bytes"
"encoding/json"
"fmt"
"github.com/garyburd/redigo/redis"
utilstest "github.com/goharbor/harbor/src/common/utils/test"
"github.com/goharbor/harbor/src/core/middlewares/util"
"github.com/stretchr/testify/assert"
"math/rand"
"net/http"
"net/http/httptest"
"os"
"strconv"
"sync"
"testing"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest"
"github.com/docker/distribution/manifest/schema2"
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/core/middlewares/countquota"
"github.com/goharbor/harbor/src/core/middlewares/util"
"github.com/goharbor/harbor/src/pkg/types"
"github.com/opencontainers/go-digest"
"github.com/stretchr/testify/suite"
)
const testingRedisHost = "REDIS_HOST"
func init() {
rand.Seed(time.Now().UnixNano())
}
func genUUID() string {
b := make([]byte, 16)
if _, err := rand.Read(b); err != nil {
return ""
}
return fmt.Sprintf("%X-%X-%X-%X-%X", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
}
func getProjectCountUsage(projectID int64) (int64, error) {
usage := models.QuotaUsage{Reference: "project", ReferenceID: fmt.Sprintf("%d", projectID)}
err := dao.GetOrmer().Read(&usage, "reference", "reference_id")
if err != nil {
return 0, err
}
used, err := types.NewResourceList(usage.Used)
if err != nil {
return 0, err
}
return used[types.ResourceCount], nil
}
func getProjectStorageUsage(projectID int64) (int64, error) {
usage := models.QuotaUsage{Reference: "project", ReferenceID: fmt.Sprintf("%d", projectID)}
err := dao.GetOrmer().Read(&usage, "reference", "reference_id")
if err != nil {
return 0, err
}
used, err := types.NewResourceList(usage.Used)
if err != nil {
return 0, err
}
return used[types.ResourceStorage], nil
}
func randomString(n int) string {
const letterBytes = "abcdefghijklmnopqrstuvwxyz"
b := make([]byte, n)
for i := range b {
b[i] = letterBytes[rand.Intn(len(letterBytes))]
}
return string(b)
}
func makeManifest(configSize int64, layerSizes []int64) schema2.Manifest {
manifest := schema2.Manifest{
Versioned: manifest.Versioned{SchemaVersion: 2, MediaType: schema2.MediaTypeManifest},
Config: distribution.Descriptor{
MediaType: schema2.MediaTypeImageConfig,
Size: configSize,
Digest: digest.FromString(randomString(15)),
},
}
for _, size := range layerSizes {
manifest.Layers = append(manifest.Layers, distribution.Descriptor{
MediaType: schema2.MediaTypeLayer,
Size: size,
Digest: digest.FromString(randomString(15)),
})
}
return manifest
}
func manifestWithAdditionalLayers(raw schema2.Manifest, layerSizes []int64) schema2.Manifest {
var manifest schema2.Manifest
manifest.Versioned = raw.Versioned
manifest.Config = raw.Config
manifest.Layers = append(manifest.Layers, raw.Layers...)
for _, size := range layerSizes {
manifest.Layers = append(manifest.Layers, distribution.Descriptor{
MediaType: schema2.MediaTypeLayer,
Size: size,
Digest: digest.FromString(randomString(15)),
})
}
return manifest
}
func digestOfManifest(manifest schema2.Manifest) string {
bytes, _ := json.Marshal(manifest)
return digest.FromBytes(bytes).String()
}
func sizeOfManifest(manifest schema2.Manifest) int64 {
bytes, _ := json.Marshal(manifest)
return int64(len(bytes))
}
func sizeOfImage(manifest schema2.Manifest) int64 {
totalSizeOfLayers := manifest.Config.Size
for _, layer := range manifest.Layers {
totalSizeOfLayers += layer.Size
}
return sizeOfManifest(manifest) + totalSizeOfLayers
}
func doHandle(req *http.Request, next ...http.HandlerFunc) int {
rr := httptest.NewRecorder()
var n http.HandlerFunc
if len(next) > 0 {
n = next[0]
} else {
n = func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(http.StatusCreated)
}
}
h := New(http.HandlerFunc(n))
h.ServeHTTP(util.NewCustomResponseWriter(rr), req)
return rr.Code
}
func patchBlobUpload(projectName, name, uuid, blobDigest string, chunkSize int64) {
repository := fmt.Sprintf("%s/%s", projectName, name)
url := fmt.Sprintf("/v2/%s/blobs/uploads/%s?digest=%s", repository, uuid, blobDigest)
req, _ := http.NewRequest(http.MethodPatch, url, nil)
doHandle(req, func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(http.StatusAccepted)
w.Header().Add("Range", fmt.Sprintf("0-%d", chunkSize-1))
})
}
func putBlobUpload(projectName, name, uuid, blobDigest string, blobSize ...int64) {
repository := fmt.Sprintf("%s/%s", projectName, name)
url := fmt.Sprintf("/v2/%s/blobs/uploads/%s?digest=%s", repository, uuid, blobDigest)
req, _ := http.NewRequest(http.MethodPut, url, nil)
if len(blobSize) > 0 {
req.Header.Add("Content-Length", strconv.FormatInt(blobSize[0], 10))
}
doHandle(req, func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(http.StatusCreated)
})
}
func mountBlob(projectName, name, blobDigest, fromRepository string) {
repository := fmt.Sprintf("%s/%s", projectName, name)
url := fmt.Sprintf("/v2/%s/blobs/uploads/?mount=%s&from=%s", repository, blobDigest, fromRepository)
req, _ := http.NewRequest(http.MethodPost, url, nil)
doHandle(req, func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(http.StatusCreated)
})
}
func deleteManifest(projectName, name, digest string, accepted ...func() bool) {
repository := fmt.Sprintf("%s/%s", projectName, name)
url := fmt.Sprintf("/v2/%s/manifests/%s", repository, digest)
req, _ := http.NewRequest(http.MethodDelete, url, nil)
next := countquota.New(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if len(accepted) > 0 {
if accepted[0]() {
w.WriteHeader(http.StatusAccepted)
} else {
w.WriteHeader(http.StatusNotFound)
}
return
}
w.WriteHeader(http.StatusAccepted)
}))
rr := httptest.NewRecorder()
h := New(next)
h.ServeHTTP(util.NewCustomResponseWriter(rr), req)
}
func putManifest(projectName, name, tag string, manifest schema2.Manifest) {
repository := fmt.Sprintf("%s/%s", projectName, name)
buf, _ := json.Marshal(manifest)
url := fmt.Sprintf("/v2/%s/manifests/%s", repository, tag)
req, _ := http.NewRequest(http.MethodPut, url, bytes.NewReader(buf))
req.Header.Add("Content-Type", manifest.MediaType)
next := countquota.New(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(http.StatusCreated)
}))
rr := httptest.NewRecorder()
h := New(next)
h.ServeHTTP(util.NewCustomResponseWriter(rr), req)
}
func pushImage(projectName, name, tag string, manifest schema2.Manifest) {
putBlobUpload(projectName, name, genUUID(), manifest.Config.Digest.String(), manifest.Config.Size)
for _, layer := range manifest.Layers {
putBlobUpload(projectName, name, genUUID(), layer.Digest.String(), layer.Size)
}
putManifest(projectName, name, tag, manifest)
}
func withProject(f func(int64, string)) {
projectName := randomString(5)
projectID, err := dao.AddProject(models.Project{
Name: projectName,
OwnerID: 1,
})
if err != nil {
panic(err)
}
defer func() {
dao.DeleteProject(projectID)
}()
f(projectID, projectName)
}
type HandlerSuite struct {
suite.Suite
}
func (suite *HandlerSuite) checkCountUsage(expected, projectID int64) {
count, err := getProjectCountUsage(projectID)
suite.Nil(err, fmt.Sprintf("Failed to get count usage of project %d, error: %v", projectID, err))
suite.Equal(expected, count, "Failed to check count usage for project %d", projectID)
}
func (suite *HandlerSuite) checkStorageUsage(expected, projectID int64) {
value, err := getProjectStorageUsage(projectID)
suite.Nil(err, fmt.Sprintf("Failed to get storage usage of project %d, error: %v", projectID, err))
suite.Equal(expected, value, "Failed to check storage usage for project %d", projectID)
}
func (suite *HandlerSuite) TearDownTest() {
for _, table := range []string{
"artifact", "blob",
"artifact_blob", "project_blob",
"quota", "quota_usage",
} {
dao.ClearTable(table)
}
}
func (suite *HandlerSuite) TestPatchBlobUpload() {
withProject(func(projectID int64, projectName string) {
uuid := genUUID()
blobDigest := digest.FromString(randomString(15)).String()
patchBlobUpload(projectName, "photon", uuid, blobDigest, 1024)
size, err := getUploadedBlobSize(uuid)
suite.Nil(err)
suite.Equal(int64(1024), size)
})
}
func (suite *HandlerSuite) TestPutBlobUpload() {
withProject(func(projectID int64, projectName string) {
uuid := genUUID()
blobDigest := digest.FromString(randomString(15)).String()
putBlobUpload(projectName, "photon", uuid, blobDigest, 1024)
suite.checkStorageUsage(1024, projectID)
blob, err := dao.GetBlob(blobDigest)
suite.Nil(err)
suite.Equal(int64(1024), blob.Size)
})
}
func (suite *HandlerSuite) TestPutBlobUploadWithPatch() {
withProject(func(projectID int64, projectName string) {
uuid := genUUID()
blobDigest := digest.FromString(randomString(15)).String()
patchBlobUpload(projectName, "photon", uuid, blobDigest, 1024)
putBlobUpload(projectName, "photon", uuid, blobDigest)
suite.checkStorageUsage(1024, projectID)
blob, err := dao.GetBlob(blobDigest)
suite.Nil(err)
suite.Equal(int64(1024), blob.Size)
})
}
func (suite *HandlerSuite) TestMountBlob() {
withProject(func(projectID int64, projectName string) {
blobDigest := digest.FromString(randomString(15)).String()
putBlobUpload(projectName, "photon", genUUID(), blobDigest, 1024)
suite.checkStorageUsage(1024, projectID)
repository := fmt.Sprintf("%s/%s", projectName, "photon")
withProject(func(projectID int64, projectName string) {
mountBlob(projectName, "harbor", blobDigest, repository)
suite.checkStorageUsage(1024, projectID)
})
})
}
func (suite *HandlerSuite) TestPutManifestCreated() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(100, []int64{100, 100})
putBlobUpload(projectName, "photon", genUUID(), manifest.Config.Digest.String(), manifest.Config.Size)
for _, layer := range manifest.Layers {
putBlobUpload(projectName, "photon", genUUID(), layer.Digest.String(), layer.Size)
}
putManifest(projectName, "photon", "latest", manifest)
suite.checkStorageUsage(int64(300+sizeOfManifest(manifest)), projectID)
})
}
func (suite *HandlerSuite) TestDeleteManifest() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
size := sizeOfImage(manifest)
pushImage(projectName, "photon", "latest", manifest)
suite.checkStorageUsage(size, projectID)
deleteManifest(projectName, "photon", digestOfManifest(manifest))
suite.checkStorageUsage(0, projectID)
})
}
func (suite *HandlerSuite) TestImageOverwrite() {
withProject(func(projectID int64, projectName string) {
manifest1 := makeManifest(1, []int64{2, 3, 4, 5})
size1 := sizeOfImage(manifest1)
pushImage(projectName, "photon", "latest", manifest1)
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(size1, projectID)
manifest2 := makeManifest(1, []int64{2, 3, 4, 5})
size2 := sizeOfImage(manifest2)
pushImage(projectName, "photon", "latest", manifest2)
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(size1+size2, projectID)
manifest3 := makeManifest(1, []int64{2, 3, 4, 5})
size3 := sizeOfImage(manifest2)
pushImage(projectName, "photon", "latest", manifest3)
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(size1+size2+size3, projectID)
})
}
func (suite *HandlerSuite) TestPushImageMultiTimes() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
size := sizeOfImage(manifest)
pushImage(projectName, "photon", "latest", manifest)
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(size, projectID)
pushImage(projectName, "photon", "latest", manifest)
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(size, projectID)
pushImage(projectName, "photon", "latest", manifest)
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(size, projectID)
})
}
func (suite *HandlerSuite) TestPushImageToSameRepository() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
size := sizeOfImage(manifest)
pushImage(projectName, "photon", "latest", manifest)
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(size, projectID)
pushImage(projectName, "photon", "dev", manifest)
suite.checkCountUsage(2, projectID)
suite.checkStorageUsage(size, projectID)
})
}
func (suite *HandlerSuite) TestPushImageToDifferentRepositories() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
size := sizeOfImage(manifest)
pushImage(projectName, "mysql", "latest", manifest)
suite.checkStorageUsage(size, projectID)
pushImage(projectName, "redis", "latest", manifest)
suite.checkStorageUsage(size+sizeOfManifest(manifest), projectID)
pushImage(projectName, "postgres", "latest", manifest)
suite.checkStorageUsage(size+2*sizeOfManifest(manifest), projectID)
})
}
func (suite *HandlerSuite) TestPushImageToDifferentProjects() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
size := sizeOfImage(manifest)
pushImage(projectName, "mysql", "latest", manifest)
suite.checkStorageUsage(size, projectID)
withProject(func(id int64, name string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
size := sizeOfImage(manifest)
pushImage(name, "mysql", "latest", manifest)
suite.checkStorageUsage(size, id)
suite.checkStorageUsage(size, projectID)
})
})
}
func (suite *HandlerSuite) TestDeleteManifestShareLayersInSameRepository() {
withProject(func(projectID int64, projectName string) {
manifest1 := makeManifest(1, []int64{2, 3, 4, 5})
size1 := sizeOfImage(manifest1)
pushImage(projectName, "mysql", "latest", manifest1)
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(size1, projectID)
manifest2 := manifestWithAdditionalLayers(manifest1, []int64{6, 7})
pushImage(projectName, "mysql", "dev", manifest2)
suite.checkCountUsage(2, projectID)
totalSize := size1 + sizeOfManifest(manifest2) + 6 + 7
suite.checkStorageUsage(totalSize, projectID)
deleteManifest(projectName, "mysql", digestOfManifest(manifest1))
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(totalSize-sizeOfManifest(manifest1), projectID)
})
}
func (suite *HandlerSuite) TestDeleteManifestShareLayersInDifferentRepositories() {
withProject(func(projectID int64, projectName string) {
manifest1 := makeManifest(1, []int64{2, 3, 4, 5})
size1 := sizeOfImage(manifest1)
pushImage(projectName, "mysql", "latest", manifest1)
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(size1, projectID)
pushImage(projectName, "mysql", "dev", manifest1)
suite.checkCountUsage(2, projectID)
suite.checkStorageUsage(size1, projectID)
manifest2 := manifestWithAdditionalLayers(manifest1, []int64{6, 7})
pushImage(projectName, "mariadb", "latest", manifest2)
suite.checkCountUsage(3, projectID)
totalSize := size1 + sizeOfManifest(manifest2) + 6 + 7
suite.checkStorageUsage(totalSize, projectID)
deleteManifest(projectName, "mysql", digestOfManifest(manifest1))
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(totalSize-sizeOfManifest(manifest1), projectID)
})
}
func (suite *HandlerSuite) TestDeleteManifestInSameRepository() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
size := sizeOfImage(manifest)
pushImage(projectName, "photon", "latest", manifest)
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(size, projectID)
pushImage(projectName, "photon", "dev", manifest)
suite.checkCountUsage(2, projectID)
suite.checkStorageUsage(size, projectID)
deleteManifest(projectName, "photon", digestOfManifest(manifest))
suite.checkCountUsage(0, projectID)
suite.checkStorageUsage(0, projectID)
})
}
func (suite *HandlerSuite) TestDeleteManifestInDifferentRepositories() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
size := sizeOfImage(manifest)
pushImage(projectName, "mysql", "latest", manifest)
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(size, projectID)
pushImage(projectName, "mysql", "5.6", manifest)
suite.checkCountUsage(2, projectID)
suite.checkStorageUsage(size, projectID)
pushImage(projectName, "redis", "latest", manifest)
suite.checkCountUsage(3, projectID)
suite.checkStorageUsage(size+sizeOfManifest(manifest), projectID)
deleteManifest(projectName, "redis", digestOfManifest(manifest))
suite.checkCountUsage(2, projectID)
suite.checkStorageUsage(size, projectID)
pushImage(projectName, "redis", "latest", manifest)
suite.checkCountUsage(3, projectID)
suite.checkStorageUsage(size+sizeOfManifest(manifest), projectID)
})
}
func (suite *HandlerSuite) TestDeleteManifestInDifferentProjects() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
size := sizeOfImage(manifest)
pushImage(projectName, "mysql", "latest", manifest)
suite.checkStorageUsage(size, projectID)
withProject(func(id int64, name string) {
pushImage(name, "mysql", "latest", manifest)
suite.checkStorageUsage(size, id)
suite.checkStorageUsage(size, projectID)
deleteManifest(projectName, "mysql", digestOfManifest(manifest))
suite.checkCountUsage(0, projectID)
suite.checkStorageUsage(0, projectID)
})
})
}
func (suite *HandlerSuite) TestPushDeletePush() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
size := sizeOfImage(manifest)
pushImage(projectName, "photon", "latest", manifest)
suite.checkStorageUsage(size, projectID)
deleteManifest(projectName, "photon", digestOfManifest(manifest))
suite.checkStorageUsage(0, projectID)
pushImage(projectName, "photon", "latest", manifest)
suite.checkStorageUsage(size, projectID)
})
}
func (suite *HandlerSuite) TestPushImageRace() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
size := sizeOfImage(manifest)
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
wg.Add(1)
go func() {
defer wg.Done()
pushImage(projectName, "photon", "latest", manifest)
}()
}
wg.Wait()
suite.checkCountUsage(1, projectID)
suite.checkStorageUsage(size, projectID)
})
}
func (suite *HandlerSuite) TestDeleteImageRace() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
pushImage(projectName, "photon", "latest", manifest)
count := 100
size := sizeOfImage(manifest)
for i := 0; i < count; i++ {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
pushImage(projectName, "mysql", fmt.Sprintf("tag%d", i), manifest)
size += sizeOfImage(manifest)
}
suite.checkCountUsage(int64(count+1), projectID)
suite.checkStorageUsage(size, projectID)
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
deleteManifest(projectName, "photon", digestOfManifest(manifest), func() bool {
return i == 0
})
}(i)
}
wg.Wait()
suite.checkCountUsage(int64(count), projectID)
suite.checkStorageUsage(size-sizeOfImage(manifest), projectID)
})
}
func (suite *HandlerSuite) TestDisableProjectQuota() {
withProject(func(projectID int64, projectName string) {
manifest := makeManifest(1, []int64{2, 3, 4, 5})
pushImage(projectName, "photon", "latest", manifest)
quotas, err := dao.ListQuotas(&models.QuotaQuery{
Reference: "project",
ReferenceID: strconv.FormatInt(projectID, 10),
})
suite.Nil(err)
suite.Len(quotas, 1)
})
withProject(func(projectID int64, projectName string) {
cfg := config.GetCfgManager()
cfg.Set(common.QuotaPerProjectEnable, false)
defer cfg.Set(common.QuotaPerProjectEnable, true)
manifest := makeManifest(1, []int64{2, 3, 4, 5})
pushImage(projectName, "photon", "latest", manifest)
quotas, err := dao.ListQuotas(&models.QuotaQuery{
Reference: "project",
ReferenceID: strconv.FormatInt(projectID, 10),
})
suite.Nil(err)
suite.Len(quotas, 0)
})
}
func TestMain(m *testing.M) {
utilstest.InitDatabaseFromEnv()
rc := m.Run()
if rc != 0 {
os.Exit(rc)
config.Init()
dao.PrepareTestForPostgresSQL()
if result := m.Run(); result != 0 {
os.Exit(result)
}
}
func TestGetInteceptor(t *testing.T) {
assert := assert.New(t)
req1, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil)
res1 := getInteceptor(req1)
_, ok := res1.(*PutManifestInterceptor)
assert.True(ok)
req2, _ := http.NewRequest("POST", "http://127.0.0.1:5000/v2/library/ubuntu/TestGetInteceptor/14.04", nil)
res2 := getInteceptor(req2)
assert.Nil(res2)
}
func TestRequireQuota(t *testing.T) {
con, err := redis.Dial(
"tcp",
fmt.Sprintf("%s:%d", getRedisHost(), 6379),
redis.DialConnectTimeout(30*time.Second),
redis.DialReadTimeout(time.Minute+10*time.Second),
redis.DialWriteTimeout(10*time.Second),
)
assert.Nil(t, err)
defer con.Close()
assert := assert.New(t)
blobInfo := &util.BlobInfo{
Repository: "library/test",
Digest: "sha256:abcdf123sdfefeg1246",
}
err = requireQuota(con, blobInfo)
assert.Nil(err)
}
func TestGetUUID(t *testing.T) {
str1 := "test/1/2/uuid-1"
uuid1 := getUUID(str1)
assert.Equal(t, uuid1, "uuid-1")
// not a valid path, just return empty
str2 := "test-1-2-uuid-2"
uuid2 := getUUID(str2)
assert.Equal(t, uuid2, "")
}
func TestAddRmUUID(t *testing.T) {
con, err := redis.Dial(
"tcp",
fmt.Sprintf("%s:%d", getRedisHost(), 6379),
redis.DialConnectTimeout(30*time.Second),
redis.DialReadTimeout(time.Minute+10*time.Second),
redis.DialWriteTimeout(10*time.Second),
)
assert.Nil(t, err)
defer con.Close()
rmfail, err := rmBlobUploadUUID(con, "test-rm-uuid")
assert.Nil(t, err)
assert.True(t, rmfail)
success, err := util.SetBunkSize(con, "test-rm-uuid", 1000)
assert.Nil(t, err)
assert.True(t, success)
rmSuccess, err := rmBlobUploadUUID(con, "test-rm-uuid")
assert.Nil(t, err)
assert.True(t, rmSuccess)
}
func TestTryFreeLockBlob(t *testing.T) {
con, err := redis.Dial(
"tcp",
fmt.Sprintf("%s:%d", getRedisHost(), 6379),
redis.DialConnectTimeout(30*time.Second),
redis.DialReadTimeout(time.Minute+10*time.Second),
redis.DialWriteTimeout(10*time.Second),
)
assert.Nil(t, err)
defer con.Close()
blobInfo := util.BlobInfo{
Repository: "lock/test",
Digest: "sha256:abcdf123sdfefeg1246",
}
lock, err := tryLockBlob(con, &blobInfo)
assert.Nil(t, err)
blobInfo.DigestLock = lock
tryFreeBlob(&blobInfo)
}
func TestBlobCommon(t *testing.T) {
con, err := redis.Dial(
"tcp",
fmt.Sprintf("%s:%d", getRedisHost(), 6379),
redis.DialConnectTimeout(30*time.Second),
redis.DialReadTimeout(time.Minute+10*time.Second),
redis.DialWriteTimeout(10*time.Second),
)
assert.Nil(t, err)
defer con.Close()
req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil)
blobInfo := util.BlobInfo{
Repository: "TestBlobCommon/test",
Digest: "sha256:abcdf12345678sdfefeg1246",
ContentType: "ContentType",
Size: 101,
Exist: false,
}
rw := httptest.NewRecorder()
customResW := util.CustomResponseWriter{ResponseWriter: rw}
customResW.WriteHeader(201)
lock, err := tryLockBlob(con, &blobInfo)
assert.Nil(t, err)
blobInfo.DigestLock = lock
*req = *(req.WithContext(context.WithValue(req.Context(), util.BBInfokKey, &blobInfo)))
err = HandleBlobCommon(customResW, req)
assert.Nil(t, err)
}
func getRedisHost() string {
redisHost := os.Getenv(testingRedisHost)
if redisHost == "" {
redisHost = "127.0.0.1" // for local test
}
return redisHost
func TestRunHandlerSuite(t *testing.T) {
suite.Run(t, new(HandlerSuite))
}

View File

@ -1,69 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sizequota
import (
"context"
"fmt"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/middlewares/util"
"net/http"
"strings"
)
// MountBlobInterceptor ...
type MountBlobInterceptor struct {
blobInfo *util.BlobInfo
}
// NewMountBlobInterceptor ...
func NewMountBlobInterceptor(blobInfo *util.BlobInfo) *MountBlobInterceptor {
return &MountBlobInterceptor{
blobInfo: blobInfo,
}
}
// HandleRequest ...
func (mbi *MountBlobInterceptor) HandleRequest(req *http.Request) error {
tProjectID, err := util.GetProjectID(strings.Split(mbi.blobInfo.Repository, "/")[0])
if err != nil {
return fmt.Errorf("error occurred when to get target project: %d, %v", tProjectID, err)
}
blob, err := dao.GetBlob(mbi.blobInfo.Digest)
if err != nil {
return err
}
if blob == nil {
return fmt.Errorf("the blob in the mount request with digest: %s doesn't exist", mbi.blobInfo.Digest)
}
mbi.blobInfo.Size = blob.Size
con, err := util.GetRegRedisCon()
if err != nil {
return err
}
if err := requireQuota(con, mbi.blobInfo); err != nil {
return err
}
*req = *(req.WithContext(context.WithValue(req.Context(), util.BBInfokKey, mbi.blobInfo)))
return nil
}
// HandleResponse ...
func (mbi *MountBlobInterceptor) HandleResponse(rw util.CustomResponseWriter, req *http.Request) {
if err := HandleBlobCommon(rw, req); err != nil {
log.Error(err)
}
}

View File

@ -1,85 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sizequota
import (
"context"
"fmt"
"github.com/garyburd/redigo/redis"
"github.com/goharbor/harbor/src/core/middlewares/util"
"github.com/stretchr/testify/assert"
"net/http"
"net/http/httptest"
"testing"
"time"
)
func TestNewMountBlobInterceptor(t *testing.T) {
blobinfo := &util.BlobInfo{}
blobinfo.Repository = "TestNewMountBlobInterceptor/latest"
bi := NewMountBlobInterceptor(blobinfo)
assert.NotNil(t, bi)
}
func TestMountBlobHandleRequest(t *testing.T) {
blobInfo := util.BlobInfo{
Repository: "TestHandleRequest/test",
Digest: "sha256:TestHandleRequest1234",
ContentType: "ContentType",
Size: 101,
Exist: false,
}
req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil)
bi := NewMountBlobInterceptor(&blobInfo)
assert.NotNil(t, bi.HandleRequest(req))
}
func TestMountBlobHandleResponse(t *testing.T) {
con, err := redis.Dial(
"tcp",
fmt.Sprintf("%s:%d", getRedisHost(), 6379),
redis.DialConnectTimeout(30*time.Second),
redis.DialReadTimeout(time.Minute+10*time.Second),
redis.DialWriteTimeout(10*time.Second),
)
assert.Nil(t, err)
defer con.Close()
req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil)
blobInfo := util.BlobInfo{
Repository: "TestHandleResponse/test",
Digest: "sha256:TestHandleResponseabcdf12345678sdfefeg1246",
ContentType: "ContentType",
Size: 101,
Exist: false,
}
rw := httptest.NewRecorder()
customResW := util.CustomResponseWriter{ResponseWriter: rw}
customResW.WriteHeader(201)
lock, err := tryLockBlob(con, &blobInfo)
assert.Nil(t, err)
blobInfo.DigestLock = lock
*req = *(req.WithContext(context.WithValue(req.Context(), util.BBInfokKey, &blobInfo)))
bi := NewMountBlobInterceptor(&blobInfo)
assert.NotNil(t, bi)
bi.HandleResponse(customResW, req)
}

View File

@ -1,86 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sizequota
import (
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/middlewares/util"
"net/http"
"strconv"
"strings"
)
// PatchBlobInterceptor ...
type PatchBlobInterceptor struct {
}
// NewPatchBlobInterceptor ...
func NewPatchBlobInterceptor() *PatchBlobInterceptor {
return &PatchBlobInterceptor{}
}
// HandleRequest do nothing for patch blob, just let the request to proxy.
func (pbi *PatchBlobInterceptor) HandleRequest(req *http.Request) error {
return nil
}
// HandleResponse record the upload process with Range attribute, set it into redis with UUID as the key
func (pbi *PatchBlobInterceptor) HandleResponse(rw util.CustomResponseWriter, req *http.Request) {
if rw.Status() != http.StatusAccepted {
return
}
con, err := util.GetRegRedisCon()
if err != nil {
log.Error(err)
return
}
defer con.Close()
uuid := rw.Header().Get("Docker-Upload-UUID")
if uuid == "" {
log.Errorf("no UUID in the patch blob response, the request path %s ", req.URL.Path)
return
}
// Range: Range indicating the current progress of the upload.
// https://github.com/opencontainers/distribution-spec/blob/master/spec.md#get-blob-upload
patchRange := rw.Header().Get("Range")
if uuid == "" {
log.Errorf("no Range in the patch blob response, the request path %s ", req.URL.Path)
return
}
endRange := strings.Split(patchRange, "-")[1]
size, err := strconv.ParseInt(endRange, 10, 64)
// docker registry did '-1' in the response
if size > 0 {
size = size + 1
}
if err != nil {
log.Error(err)
return
}
success, err := util.SetBunkSize(con, uuid, size)
if err != nil {
log.Error(err)
return
}
if !success {
// ToDo discuss what to do here.
log.Warningf(" T_T: Fail to set bunk: %s size: %d in redis, it causes unable to set correct quota for the artifact.", uuid, size)
}
return
}

View File

@ -1,42 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sizequota
import (
"github.com/goharbor/harbor/src/core/middlewares/util"
"github.com/stretchr/testify/assert"
"net/http"
"net/http/httptest"
"testing"
)
func TestNewPatchBlobInterceptor(t *testing.T) {
bi := NewPatchBlobInterceptor()
assert.NotNil(t, bi)
}
func TestPatchBlobHandleRequest(t *testing.T) {
req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil)
bi := NewPatchBlobInterceptor()
assert.Nil(t, bi.HandleRequest(req))
}
func TestPatchBlobHandleResponse(t *testing.T) {
req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil)
rw := httptest.NewRecorder()
customResW := util.CustomResponseWriter{ResponseWriter: rw}
customResW.WriteHeader(400)
NewPatchBlobInterceptor().HandleResponse(customResW, req)
}

View File

@ -1,83 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sizequota
import (
"context"
"errors"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/middlewares/util"
"github.com/opencontainers/go-digest"
"net/http"
)
// PutBlobInterceptor ...
type PutBlobInterceptor struct {
blobInfo *util.BlobInfo
}
// NewPutBlobInterceptor ...
func NewPutBlobInterceptor(blobInfo *util.BlobInfo) *PutBlobInterceptor {
return &PutBlobInterceptor{
blobInfo: blobInfo,
}
}
// HandleRequest ...
func (pbi *PutBlobInterceptor) HandleRequest(req *http.Request) error {
// the redis connection will be closed in the put response.
con, err := util.GetRegRedisCon()
if err != nil {
return err
}
defer func() {
if pbi.blobInfo.UUID != "" {
_, err := rmBlobUploadUUID(con, pbi.blobInfo.UUID)
if err != nil {
log.Warningf("error occurred when remove UUID for blob, %v", err)
}
}
}()
dgstStr := req.FormValue("digest")
if dgstStr == "" {
return errors.New("blob digest missing")
}
dgst, err := digest.Parse(dgstStr)
if err != nil {
return errors.New("blob digest parsing failed")
}
pbi.blobInfo.Digest = dgst.String()
pbi.blobInfo.UUID = getUUID(req.URL.Path)
size, err := util.GetBlobSize(con, pbi.blobInfo.UUID)
if err != nil {
return err
}
pbi.blobInfo.Size = size
if err := requireQuota(con, pbi.blobInfo); err != nil {
return err
}
*req = *(req.WithContext(context.WithValue(req.Context(), util.BBInfokKey, pbi.blobInfo)))
return nil
}
// HandleResponse ...
func (pbi *PutBlobInterceptor) HandleResponse(rw util.CustomResponseWriter, req *http.Request) {
if err := HandleBlobCommon(rw, req); err != nil {
log.Error(err)
}
}

View File

@ -1,80 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sizequota
import (
"context"
"fmt"
"github.com/garyburd/redigo/redis"
"github.com/goharbor/harbor/src/core/middlewares/util"
"github.com/stretchr/testify/assert"
"net/http"
"net/http/httptest"
"testing"
"time"
)
func TestNewPutBlobInterceptor(t *testing.T) {
blobinfo := &util.BlobInfo{}
blobinfo.Repository = "TestNewPutBlobInterceptor/latest"
bi := NewPutBlobInterceptor(blobinfo)
assert.NotNil(t, bi)
}
func TestPutBlobHandleRequest(t *testing.T) {
req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil)
blobinfo := &util.BlobInfo{}
blobinfo.Repository = "TestPutBlobHandleRequest/latest"
bi := NewPutBlobInterceptor(blobinfo)
assert.NotNil(t, bi.HandleRequest(req))
}
func TestPutBlobHandleResponse(t *testing.T) {
con, err := redis.Dial(
"tcp",
fmt.Sprintf("%s:%d", getRedisHost(), 6379),
redis.DialConnectTimeout(30*time.Second),
redis.DialReadTimeout(time.Minute+10*time.Second),
redis.DialWriteTimeout(10*time.Second),
)
assert.Nil(t, err)
defer con.Close()
req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil)
blobInfo := util.BlobInfo{
Repository: "TestPutBlobHandleResponse/test",
Digest: "sha256:TestPutBlobHandleResponseabcdf12345678sdfefeg1246",
ContentType: "ContentType",
Size: 101,
Exist: false,
}
rw := httptest.NewRecorder()
customResW := util.CustomResponseWriter{ResponseWriter: rw}
customResW.WriteHeader(201)
lock, err := tryLockBlob(con, &blobInfo)
assert.Nil(t, err)
blobInfo.DigestLock = lock
*req = *(req.WithContext(context.WithValue(req.Context(), util.BBInfokKey, &blobInfo)))
bi := NewPutBlobInterceptor(&blobInfo)
assert.NotNil(t, bi)
bi.HandleResponse(customResW, req)
}

View File

@ -1,102 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sizequota
import (
"bytes"
"context"
"fmt"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/middlewares/util"
"io/ioutil"
"net/http"
"strings"
)
// PutManifestInterceptor ...
type PutManifestInterceptor struct {
blobInfo *util.BlobInfo
mfInfo *util.MfInfo
}
// NewPutManifestInterceptor ...
func NewPutManifestInterceptor(blobInfo *util.BlobInfo, mfInfo *util.MfInfo) *PutManifestInterceptor {
return &PutManifestInterceptor{
blobInfo: blobInfo,
mfInfo: mfInfo,
}
}
// HandleRequest ...
func (pmi *PutManifestInterceptor) HandleRequest(req *http.Request) error {
mediaType := req.Header.Get("Content-Type")
if mediaType == schema1.MediaTypeManifest ||
mediaType == schema1.MediaTypeSignedManifest ||
mediaType == schema2.MediaTypeManifest {
con, err := util.GetRegRedisCon()
if err != nil {
log.Infof("failed to get registry redis connection, %v", err)
return err
}
data, err := ioutil.ReadAll(req.Body)
if err != nil {
log.Warningf("Error occurred when to copy manifest body %v", err)
return err
}
req.Body = ioutil.NopCloser(bytes.NewBuffer(data))
manifest, desc, err := distribution.UnmarshalManifest(mediaType, data)
if err != nil {
log.Warningf("Error occurred when to Unmarshal Manifest %v", err)
return err
}
projectID, err := util.GetProjectID(strings.Split(pmi.mfInfo.Repository, "/")[0])
if err != nil {
log.Warningf("Error occurred when to get project ID %v", err)
return err
}
pmi.mfInfo.ProjectID = projectID
pmi.mfInfo.Refrerence = manifest.References()
pmi.mfInfo.Digest = desc.Digest.String()
pmi.blobInfo.ProjectID = projectID
pmi.blobInfo.Digest = desc.Digest.String()
pmi.blobInfo.Size = desc.Size
pmi.blobInfo.ContentType = mediaType
if err := requireQuota(con, pmi.blobInfo); err != nil {
return err
}
*req = *(req.WithContext(context.WithValue(req.Context(), util.MFInfokKey, pmi.mfInfo)))
*req = *(req.WithContext(context.WithValue(req.Context(), util.BBInfokKey, pmi.blobInfo)))
return nil
}
return fmt.Errorf("unsupported content type for manifest: %s", mediaType)
}
// HandleResponse ...
func (pmi *PutManifestInterceptor) HandleResponse(rw util.CustomResponseWriter, req *http.Request) {
if err := HandleBlobCommon(rw, req); err != nil {
log.Error(err)
return
}
}

View File

@ -1,92 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sizequota
import (
"context"
"fmt"
"github.com/garyburd/redigo/redis"
"github.com/goharbor/harbor/src/core/middlewares/util"
"github.com/stretchr/testify/assert"
"net/http"
"net/http/httptest"
"testing"
"time"
)
func TestNewPutManifestInterceptor(t *testing.T) {
blobinfo := &util.BlobInfo{}
blobinfo.Repository = "TestNewPutManifestInterceptor/latest"
mfinfo := &util.MfInfo{
Repository: "TestNewPutManifestInterceptor",
}
mi := NewPutManifestInterceptor(blobinfo, mfinfo)
assert.NotNil(t, mi)
}
func TestPutManifestHandleRequest(t *testing.T) {
req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil)
blobinfo := &util.BlobInfo{}
blobinfo.Repository = "TestPutManifestHandleRequest/latest"
mfinfo := &util.MfInfo{
Repository: "TestPutManifestHandleRequest",
}
mi := NewPutManifestInterceptor(blobinfo, mfinfo)
assert.NotNil(t, mi.HandleRequest(req))
}
func TestPutManifestHandleResponse(t *testing.T) {
con, err := redis.Dial(
"tcp",
fmt.Sprintf("%s:%d", getRedisHost(), 6379),
redis.DialConnectTimeout(30*time.Second),
redis.DialReadTimeout(time.Minute+10*time.Second),
redis.DialWriteTimeout(10*time.Second),
)
assert.Nil(t, err)
defer con.Close()
req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil)
blobInfo := util.BlobInfo{
Repository: "TestPutManifestandleResponse/test",
Digest: "sha256:TestPutManifestandleResponseabcdf12345678sdfefeg1246",
ContentType: "ContentType",
Size: 101,
Exist: false,
}
mfinfo := util.MfInfo{
Repository: "TestPutManifestandleResponse",
}
rw := httptest.NewRecorder()
customResW := util.CustomResponseWriter{ResponseWriter: rw}
customResW.WriteHeader(201)
lock, err := tryLockBlob(con, &blobInfo)
assert.Nil(t, err)
blobInfo.DigestLock = lock
*req = *(req.WithContext(context.WithValue(req.Context(), util.BBInfokKey, &blobInfo)))
bi := NewPutManifestInterceptor(&blobInfo, &mfinfo)
assert.NotNil(t, bi)
bi.HandleResponse(customResW, req)
}

View File

@ -0,0 +1,330 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sizequota
import (
"errors"
"fmt"
"net/http"
"regexp"
"strconv"
"strings"
"time"
"github.com/garyburd/redigo/redis"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils"
"github.com/goharbor/harbor/src/core/middlewares/util"
"github.com/goharbor/harbor/src/pkg/types"
"github.com/opencontainers/go-digest"
)
var (
blobUploadURLRe = regexp.MustCompile(`^/v2/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)+)blobs/uploads/([a-zA-Z0-9-_.=]+)/?$`)
initiateBlobUploadURLRe = regexp.MustCompile(`^/v2/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)+)blobs/uploads/?$`)
)
// parseUploadedBlobSize parse the blob stream upload response and return the size blob uploaded
func parseUploadedBlobSize(w http.ResponseWriter) (int64, error) {
// Range: Range indicating the current progress of the upload.
// https://github.com/opencontainers/distribution-spec/blob/master/spec.md#get-blob-upload
r := w.Header().Get("Range")
end := strings.Split(r, "-")[1]
size, err := strconv.ParseInt(end, 10, 64)
if err != nil {
return 0, err
}
// docker registry did '-1' in the response
if size > 0 {
size = size + 1
}
return size, nil
}
// setUploadedBlobSize update the size of stream upload blob
func setUploadedBlobSize(uuid string, size int64) (bool, error) {
conn, err := util.GetRegRedisCon()
if err != nil {
return false, err
}
defer conn.Close()
key := fmt.Sprintf("upload:%s:size", uuid)
reply, err := redis.String(conn.Do("SET", key, size))
if err != nil {
return false, err
}
return reply == "OK", nil
}
// getUploadedBlobSize returns the size of stream upload blob
func getUploadedBlobSize(uuid string) (int64, error) {
conn, err := util.GetRegRedisCon()
if err != nil {
return 0, err
}
defer conn.Close()
key := fmt.Sprintf("upload:%s:size", uuid)
size, err := redis.Int64(conn.Do("GET", key))
if err != nil {
return 0, err
}
return size, nil
}
// parseBlobSize returns blob size from blob upload complete request
func parseBlobSize(req *http.Request, uuid string) (int64, error) {
size, err := strconv.ParseInt(req.Header.Get("Content-Length"), 10, 64)
if err == nil && size != 0 {
return size, nil
}
return getUploadedBlobSize(uuid)
}
// match returns true if request method equal method and path match re
func match(req *http.Request, method string, re *regexp.Regexp) bool {
return req.Method == method && re.MatchString(req.URL.Path)
}
// parseBlobInfoFromComplete returns blob info from blob upload complete request
func parseBlobInfoFromComplete(req *http.Request) (*util.BlobInfo, error) {
if !match(req, http.MethodPut, blobUploadURLRe) {
return nil, fmt.Errorf("not match url %s for blob upload complete", req.URL.Path)
}
s := blobUploadURLRe.FindStringSubmatch(req.URL.Path)
repository, uuid := s[1][:len(s[1])-1], s[2]
projectName, _ := utils.ParseRepository(repository)
project, err := dao.GetProjectByName(projectName)
if err != nil {
return nil, fmt.Errorf("failed to get project %s, error: %v", projectName, err)
}
if project == nil {
return nil, fmt.Errorf("project %s not found", projectName)
}
dgt, err := digest.Parse(req.FormValue("digest"))
if err != nil {
return nil, fmt.Errorf("blob digest invalid for upload %s", uuid)
}
size, err := parseBlobSize(req, uuid)
if err != nil {
return nil, fmt.Errorf("failed to get content length of blob upload %s, error: %v", uuid, err)
}
return &util.BlobInfo{
ProjectID: project.ProjectID,
Repository: repository,
Digest: dgt.String(),
Size: size,
}, nil
}
// parseBlobInfoFromManifest returns blob info from put the manifest request
func parseBlobInfoFromManifest(req *http.Request) (*util.BlobInfo, error) {
info, ok := util.ManifestInfoFromContext(req.Context())
if !ok {
manifest, err := util.ParseManifestInfo(req)
if err != nil {
return nil, err
}
info = manifest
// replace the request with manifest info
*req = *(req.WithContext(util.NewManifestInfoContext(req.Context(), info)))
}
return &util.BlobInfo{
ProjectID: info.ProjectID,
Repository: info.Repository,
Digest: info.Descriptor.Digest.String(),
Size: info.Descriptor.Size,
ContentType: info.Descriptor.MediaType,
}, nil
}
// parseBlobInfoFromMount returns blob info from blob mount request
func parseBlobInfoFromMount(req *http.Request) (*util.BlobInfo, error) {
if !match(req, http.MethodPost, initiateBlobUploadURLRe) {
return nil, fmt.Errorf("not match url %s for mount blob", req.URL.Path)
}
if req.FormValue("mount") == "" || req.FormValue("from") == "" {
return nil, fmt.Errorf("not match url %s for mount blob", req.URL.Path)
}
dgt, err := digest.Parse(req.FormValue("mount"))
if err != nil {
return nil, errors.New("mount must be digest")
}
s := initiateBlobUploadURLRe.FindStringSubmatch(req.URL.Path)
repository := strings.TrimSuffix(s[1], "/")
projectName, _ := utils.ParseRepository(repository)
project, err := dao.GetProjectByName(projectName)
if err != nil {
return nil, fmt.Errorf("failed to get project %s, error: %v", projectName, err)
}
if project == nil {
return nil, fmt.Errorf("project %s not found", projectName)
}
blob, err := dao.GetBlob(dgt.String())
if err != nil {
return nil, fmt.Errorf("failed to get blob %s, error: %v", dgt.String(), err)
}
if blob == nil {
return nil, fmt.Errorf("the blob in the mount request with digest: %s doesn't exist", dgt.String())
}
return &util.BlobInfo{
ProjectID: project.ProjectID,
Repository: repository,
Digest: dgt.String(),
Size: blob.Size,
}, nil
}
// getBlobInfoParser return parse blob info function for request
// returns parseBlobInfoFromComplete when request match PUT /v2/<name>/blobs/uploads/<uuid>?digest=<digest>
// returns parseBlobInfoFromMount when request match POST /v2/<name>/blobs/uploads/?mount=<digest>&from=<repository name>
func getBlobInfoParser(req *http.Request) func(*http.Request) (*util.BlobInfo, error) {
if match(req, http.MethodPut, blobUploadURLRe) {
if req.FormValue("digest") != "" {
return parseBlobInfoFromComplete
}
}
if match(req, http.MethodPost, initiateBlobUploadURLRe) {
if req.FormValue("mount") != "" && req.FormValue("from") != "" {
return parseBlobInfoFromMount
}
}
return nil
}
// computeResourcesForBlob returns storage required for blob, no storage required if blob exists in project
func computeResourcesForBlob(req *http.Request) (types.ResourceList, error) {
info, ok := util.BlobInfoFromContext(req.Context())
if !ok {
return nil, errors.New("blob info missing")
}
exist, err := info.BlobExists()
if err != nil {
return nil, err
}
if exist {
return nil, nil
}
return types.ResourceList{types.ResourceStorage: info.Size}, nil
}
// computeResourcesForManifestCreation returns storage resource required for manifest
// no storage required if manifest exists in project
// the sum size of manifest itself and blobs not in project will return if manifest not exists in project
func computeResourcesForManifestCreation(req *http.Request) (types.ResourceList, error) {
info, ok := util.ManifestInfoFromContext(req.Context())
if !ok {
return nil, errors.New("manifest info missing")
}
exist, err := info.ManifestExists()
if err != nil {
return nil, err
}
// manifest exist in project, so no storage quota required
if exist {
return nil, nil
}
blobs, err := info.GetBlobsNotInProject()
if err != nil {
return nil, err
}
size := info.Descriptor.Size
for _, blob := range blobs {
size += blob.Size
}
return types.ResourceList{types.ResourceStorage: size}, nil
}
// computeResourcesForManifestDeletion returns storage resource will be released when manifest deleted
// then result will be the sum of manifest itself and blobs which will not be used by other manifests of project
func computeResourcesForManifestDeletion(req *http.Request) (types.ResourceList, error) {
info, ok := util.ManifestInfoFromContext(req.Context())
if !ok {
return nil, errors.New("manifest info missing")
}
blobs, err := dao.GetExclusiveBlobs(info.ProjectID, info.Repository, info.Digest)
if err != nil {
return nil, err
}
info.ExclusiveBlobs = blobs
blob, err := dao.GetBlob(info.Digest)
if err != nil {
return nil, err
}
// manifest size will always be released
size := blob.Size
for _, blob := range blobs {
size = size + blob.Size
}
return types.ResourceList{types.ResourceStorage: size}, nil
}
// syncBlobInfoToProject create the blob and add it to project
func syncBlobInfoToProject(info *util.BlobInfo) error {
_, blob, err := dao.GetOrCreateBlob(&models.Blob{
Digest: info.Digest,
ContentType: info.ContentType,
Size: info.Size,
CreationTime: time.Now(),
})
if err != nil {
return err
}
if _, err := dao.AddBlobToProject(blob.ID, info.ProjectID); err != nil {
return err
}
return nil
}

View File

@ -15,51 +15,49 @@
package util
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
"github.com/garyburd/redigo/redis"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/quota"
"github.com/goharbor/harbor/src/common/utils"
"github.com/goharbor/harbor/src/common/utils/clair"
"github.com/goharbor/harbor/src/common/utils/log"
common_redis "github.com/goharbor/harbor/src/common/utils/redis"
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/core/promgr"
"github.com/goharbor/harbor/src/pkg/scan/whitelist"
"github.com/opencontainers/go-digest"
)
type contextKey string
// ErrRequireQuota ...
var ErrRequireQuota = errors.New("cannot get quota on project for request")
const (
manifestURLPattern = `^/v2/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)+)manifests/([\w][\w.:-]{0,127})`
blobURLPattern = `^/v2/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)+)blobs/uploads/`
chartVersionInfoKey = contextKey("ChartVersionInfo")
// ImageInfoCtxKey the context key for image information
ImageInfoCtxKey = contextKey("ImageInfo")
// TokenUsername ...
// TODO: temp solution, remove after vmware/harbor#2242 is resolved.
TokenUsername = "harbor-core"
// MFInfokKey the context key for image tag redis lock
MFInfokKey = contextKey("ManifestInfo")
// BBInfokKey the context key for image tag redis lock
BBInfokKey = contextKey("BlobInfo")
// blobInfoKey the context key for blob info
blobInfoKey = contextKey("BlobInfo")
// chartVersionInfoKey the context key for chart version info
chartVersionInfoKey = contextKey("ChartVersionInfo")
// manifestInfoKey the context key for manifest info
manifestInfoKey = contextKey("ManifestInfo")
// DialConnectionTimeout ...
DialConnectionTimeout = 30 * time.Second
@ -69,6 +67,10 @@ const (
DialWriteTimeout = 10 * time.Second
)
var (
manifestURLRe = regexp.MustCompile(`^/v2/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)+)manifests/([\w][\w.:-]{0,127})`)
)
// ChartVersionInfo ...
type ChartVersionInfo struct {
ProjectID int64
@ -77,6 +79,13 @@ type ChartVersionInfo struct {
Version string
}
// MutexKey returns mutex key of the chart version
func (info *ChartVersionInfo) MutexKey(suffix ...string) string {
a := []string{"quota", info.Namespace, "chart", info.ChartName, "version", info.Version}
return strings.Join(append(a, suffix...), ":")
}
// ImageInfo ...
type ImageInfo struct {
Repository string
@ -87,46 +96,147 @@ type ImageInfo struct {
// BlobInfo ...
type BlobInfo struct {
UUID string
ProjectID int64
ContentType string
Size int64
Repository string
Tag string
Digest string
// Exist is to index the existing of the manifest in DB. If false, it's an new image for uploading.
Exist bool
Digest string
DigestLock *common_redis.Mutex
// Quota is the resource applied for the manifest upload request.
Quota *quota.ResourceList
blobExist bool
blobExistErr error
blobExistOnce sync.Once
}
// MfInfo ...
type MfInfo struct {
// BlobExists returns true when blob exists in the project
func (info *BlobInfo) BlobExists() (bool, error) {
info.blobExistOnce.Do(func() {
info.blobExist, info.blobExistErr = dao.HasBlobInProject(info.ProjectID, info.Digest)
})
return info.blobExist, info.blobExistErr
}
// MutexKey returns mutex key of the blob
func (info *BlobInfo) MutexKey(suffix ...string) string {
projectName, _ := utils.ParseRepository(info.Repository)
a := []string{"quota", projectName, "blob", info.Digest}
return strings.Join(append(a, suffix...), ":")
}
// ManifestInfo ...
type ManifestInfo struct {
// basic information of a manifest
ProjectID int64
Repository string
Tag string
Digest string
// Exist is to index the existing of the manifest in DB. If false, it's an new image for uploading.
Exist bool
References []distribution.Descriptor
Descriptor distribution.Descriptor
// ArtifactID is the ID of the artifact which query by repository and tag
ArtifactID int64
// manifestExist is to index the existing of the manifest in DB by (repository, digest)
manifestExist bool
manifestExistErr error
manifestExistOnce sync.Once
// DigestChanged true means the manifest exists but digest is changed.
// Probably it's a new image with existing repo/tag name or overwrite.
DigestChanged bool
// artifact the artifact indexed by (repository, tag) in DB
artifact *models.Artifact
artifactErr error
artifactOnce sync.Once
// used to block multiple push on same image.
TagLock *common_redis.Mutex
Refrerence []distribution.Descriptor
// ExclusiveBlobs include the blobs that belong to the manifest only
// and exclude the blobs that shared by other manifests in the same repo(project/repository).
ExclusiveBlobs []*models.Blob
}
// Quota is the resource applied for the manifest upload request.
Quota *quota.ResourceList
// MutexKey returns mutex key of the manifest
func (info *ManifestInfo) MutexKey(suffix ...string) string {
projectName, _ := utils.ParseRepository(info.Repository)
var a []string
if info.Tag != "" {
// tag not empty happened in PUT /v2/<name>/manifests/<reference>
// lock by to tag to compute the count resource required by quota
a = []string{"quota", projectName, "manifest", info.Tag}
} else {
a = []string{"quota", projectName, "manifest", info.Digest}
}
return strings.Join(append(a, suffix...), ":")
}
// BlobMutexKey returns mutex key of the blob in manifest
func (info *ManifestInfo) BlobMutexKey(blob *models.Blob, suffix ...string) string {
projectName, _ := utils.ParseRepository(info.Repository)
a := []string{"quota", projectName, "blob", blob.Digest}
return strings.Join(append(a, suffix...), ":")
}
// GetBlobsNotInProject returns blobs of the manifest which not in the project
func (info *ManifestInfo) GetBlobsNotInProject() ([]*models.Blob, error) {
var digests []string
for _, reference := range info.References {
digests = append(digests, reference.Digest.String())
}
blobs, err := dao.GetBlobsNotInProject(info.ProjectID, digests...)
if err != nil {
return nil, err
}
return blobs, nil
}
func (info *ManifestInfo) fetchArtifact() (*models.Artifact, error) {
info.artifactOnce.Do(func() {
info.artifact, info.artifactErr = dao.GetArtifact(info.Repository, info.Tag)
})
return info.artifact, info.artifactErr
}
// IsNewTag returns true if the tag of the manifest not exists in project
func (info *ManifestInfo) IsNewTag() bool {
artifact, _ := info.fetchArtifact()
return artifact == nil
}
// Artifact returns artifact of the manifest
func (info *ManifestInfo) Artifact() *models.Artifact {
result := &models.Artifact{
PID: info.ProjectID,
Repo: info.Repository,
Tag: info.Tag,
Digest: info.Digest,
Kind: "Docker-Image",
}
if artifact, _ := info.fetchArtifact(); artifact != nil {
result.ID = artifact.ID
result.CreationTime = artifact.CreationTime
result.PushTime = time.Now()
}
return result
}
// ManifestExists returns true if manifest exist in repository
func (info *ManifestInfo) ManifestExists() (bool, error) {
info.manifestExistOnce.Do(func() {
total, err := dao.GetTotalOfArtifacts(&models.ArtifactQuery{
PID: info.ProjectID,
Repo: info.Repository,
Digest: info.Digest,
})
info.manifestExist = total > 0
info.manifestExistErr = err
})
return info.manifestExist, info.manifestExistErr
}
// JSONError wraps a concrete Code and Message, it's readable for docker deamon.
@ -156,12 +266,7 @@ func MarshalError(code, msg string) string {
// MatchManifestURL ...
func MatchManifestURL(req *http.Request) (bool, string, string) {
re, err := regexp.Compile(manifestURLPattern)
if err != nil {
log.Errorf("error to match manifest url, %v", err)
return false, "", ""
}
s := re.FindStringSubmatch(req.URL.Path)
s := manifestURLRe.FindStringSubmatch(req.URL.Path)
if len(s) == 3 {
s[1] = strings.TrimSuffix(s[1], "/")
return true, s[1], s[2]
@ -169,42 +274,6 @@ func MatchManifestURL(req *http.Request) (bool, string, string) {
return false, "", ""
}
// MatchPutBlobURL ...
func MatchPutBlobURL(req *http.Request) (bool, string) {
if req.Method != http.MethodPut {
return false, ""
}
re, err := regexp.Compile(blobURLPattern)
if err != nil {
log.Errorf("error to match put blob url, %v", err)
return false, ""
}
s := re.FindStringSubmatch(req.URL.Path)
if len(s) == 2 {
s[1] = strings.TrimSuffix(s[1], "/")
return true, s[1]
}
return false, ""
}
// MatchPatchBlobURL ...
func MatchPatchBlobURL(req *http.Request) (bool, string) {
if req.Method != http.MethodPatch {
return false, ""
}
re, err := regexp.Compile(blobURLPattern)
if err != nil {
log.Errorf("error to match put blob url, %v", err)
return false, ""
}
s := re.FindStringSubmatch(req.URL.Path)
if len(s) == 2 {
s[1] = strings.TrimSuffix(s[1], "/")
return true, s[1]
}
return false, ""
}
// MatchPullManifest checks if the request looks like a request to pull manifest. If it is returns the image and tag/sha256 digest as 2nd and 3rd return values
func MatchPullManifest(req *http.Request) (bool, string, string) {
if req.Method != http.MethodGet {
@ -221,31 +290,21 @@ func MatchPushManifest(req *http.Request) (bool, string, string) {
return MatchManifestURL(req)
}
// MatchMountBlobURL POST /v2/<name>/blobs/uploads/?mount=<digest>&from=<repository name>
// If match, will return repo, mount and from as the 2nd, 3th and 4th.
func MatchMountBlobURL(req *http.Request) (bool, string, string, string) {
if req.Method != http.MethodPost {
return false, "", "", ""
// MatchDeleteManifest checks if the request
func MatchDeleteManifest(req *http.Request) (match bool, repository string, reference string) {
if req.Method != http.MethodDelete {
return
}
re, err := regexp.Compile(blobURLPattern)
if err != nil {
log.Errorf("error to match post blob url, %v", err)
return false, "", "", ""
match, repository, reference = MatchManifestURL(req)
if _, err := digest.Parse(reference); err != nil {
// Delete manifest only accept digest as reference
match = false
return
}
s := re.FindStringSubmatch(req.URL.Path)
if len(s) == 2 {
s[1] = strings.TrimSuffix(s[1], "/")
mount := req.FormValue("mount")
if mount == "" {
return false, "", "", ""
}
from := req.FormValue("from")
if from == "" {
return false, "", "", ""
}
return true, s[1], mount, from
}
return false, "", "", ""
return
}
// CopyResp ...
@ -318,72 +377,6 @@ func GetPolicyChecker() PolicyChecker {
return NewPMSPolicyChecker(config.GlobalProjectMgr)
}
// TryRequireQuota ...
func TryRequireQuota(projectID int64, quotaRes *quota.ResourceList) error {
quotaMgr, err := quota.NewManager("project", strconv.FormatInt(projectID, 10))
if err != nil {
log.Errorf("Error occurred when to new quota manager %v", err)
return err
}
if err := quotaMgr.AddResources(*quotaRes); err != nil {
log.Errorf("cannot get quota for the project resource: %d, err: %v", projectID, err)
return ErrRequireQuota
}
return nil
}
// TryFreeQuota used to release resource for failure case
func TryFreeQuota(projectID int64, qres *quota.ResourceList) bool {
quotaMgr, err := quota.NewManager("project", strconv.FormatInt(projectID, 10))
if err != nil {
log.Errorf("Error occurred when to new quota manager %v", err)
return false
}
if err := quotaMgr.SubtractResources(*qres); err != nil {
log.Errorf("cannot release quota for the project resource: %d, err: %v", projectID, err)
return false
}
return true
}
// GetBlobSize blob size with UUID in redis
func GetBlobSize(conn redis.Conn, uuid string) (int64, error) {
exists, err := redis.Int(conn.Do("EXISTS", uuid))
if err != nil {
return 0, err
}
if exists == 1 {
size, err := redis.Int64(conn.Do("GET", uuid))
if err != nil {
return 0, err
}
return size, nil
}
return 0, nil
}
// SetBunkSize sets the temp size for blob bunk with its uuid.
func SetBunkSize(conn redis.Conn, uuid string, size int64) (bool, error) {
setRes, err := redis.String(conn.Do("SET", uuid, size))
if err != nil {
return false, err
}
return setRes == "OK", nil
}
// GetProjectID ...
func GetProjectID(name string) (int64, error) {
project, err := dao.GetProjectByName(name)
if err != nil {
return 0, err
}
if project != nil {
return project.ProjectID, nil
}
return 0, fmt.Errorf("project %s is not found", name)
}
// GetRegRedisCon ...
func GetRegRedisCon() (redis.Conn, error) {
// FOR UT
@ -406,7 +399,7 @@ func GetRegRedisCon() (redis.Conn, error) {
// BlobInfoFromContext returns blob info from context
func BlobInfoFromContext(ctx context.Context) (*BlobInfo, bool) {
info, ok := ctx.Value(BBInfokKey).(*BlobInfo)
info, ok := ctx.Value(blobInfoKey).(*BlobInfo)
return info, ok
}
@ -423,14 +416,14 @@ func ImageInfoFromContext(ctx context.Context) (*ImageInfo, bool) {
}
// ManifestInfoFromContext returns manifest info from context
func ManifestInfoFromContext(ctx context.Context) (*MfInfo, bool) {
info, ok := ctx.Value(MFInfokKey).(*MfInfo)
func ManifestInfoFromContext(ctx context.Context) (*ManifestInfo, bool) {
info, ok := ctx.Value(manifestInfoKey).(*ManifestInfo)
return info, ok
}
// NewBlobInfoContext returns context with blob info
func NewBlobInfoContext(ctx context.Context, info *BlobInfo) context.Context {
return context.WithValue(ctx, BBInfokKey, info)
return context.WithValue(ctx, blobInfoKey, info)
}
// NewChartVersionInfoContext returns context with blob info
@ -444,6 +437,92 @@ func NewImageInfoContext(ctx context.Context, info *ImageInfo) context.Context {
}
// NewManifestInfoContext returns context with manifest info
func NewManifestInfoContext(ctx context.Context, info *MfInfo) context.Context {
return context.WithValue(ctx, MFInfokKey, info)
func NewManifestInfoContext(ctx context.Context, info *ManifestInfo) context.Context {
return context.WithValue(ctx, manifestInfoKey, info)
}
// ParseManifestInfo prase manifest from request
func ParseManifestInfo(req *http.Request) (*ManifestInfo, error) {
match, repository, reference := MatchManifestURL(req)
if !match {
return nil, fmt.Errorf("not match url %s for manifest", req.URL.Path)
}
var tag string
if _, err := digest.Parse(reference); err != nil {
tag = reference
}
mediaType := req.Header.Get("Content-Type")
if mediaType != schema1.MediaTypeManifest &&
mediaType != schema1.MediaTypeSignedManifest &&
mediaType != schema2.MediaTypeManifest {
return nil, fmt.Errorf("unsupported content type for manifest: %s", mediaType)
}
if req.Body == nil {
return nil, fmt.Errorf("body missing")
}
body, err := ioutil.ReadAll(req.Body)
if err != nil {
log.Warningf("Error occurred when to copy manifest body %v", err)
return nil, err
}
req.Body = ioutil.NopCloser(bytes.NewBuffer(body))
manifest, desc, err := distribution.UnmarshalManifest(mediaType, body)
if err != nil {
log.Warningf("Error occurred when to Unmarshal Manifest %v", err)
return nil, err
}
projectName, _ := utils.ParseRepository(repository)
project, err := dao.GetProjectByName(projectName)
if err != nil {
return nil, fmt.Errorf("failed to get project %s, error: %v", projectName, err)
}
if project == nil {
return nil, fmt.Errorf("project %s not found", projectName)
}
return &ManifestInfo{
ProjectID: project.ProjectID,
Repository: repository,
Tag: tag,
Digest: desc.Digest.String(),
References: manifest.References(),
Descriptor: desc,
}, nil
}
// ParseManifestInfoFromPath prase manifest from request path
func ParseManifestInfoFromPath(req *http.Request) (*ManifestInfo, error) {
match, repository, reference := MatchManifestURL(req)
if !match {
return nil, fmt.Errorf("not match url %s for manifest", req.URL.Path)
}
projectName, _ := utils.ParseRepository(repository)
project, err := dao.GetProjectByName(projectName)
if err != nil {
return nil, fmt.Errorf("failed to get project %s, error: %v", projectName, err)
}
if project == nil {
return nil, fmt.Errorf("project %s not found", projectName)
}
info := &ManifestInfo{
ProjectID: project.ProjectID,
Repository: repository,
}
dgt, err := digest.Parse(reference)
if err != nil {
info.Tag = reference
} else {
info.Digest = dgt.String()
}
return info, nil
}

View File

@ -15,33 +15,31 @@
package util
import (
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
notarytest "github.com/goharbor/harbor/src/common/utils/notary/test"
testutils "github.com/goharbor/harbor/src/common/utils/test"
"github.com/goharbor/harbor/src/core/config"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"fmt"
"github.com/garyburd/redigo/redis"
"github.com/goharbor/harbor/src/common/quota"
"bytes"
"encoding/json"
"net/http"
"net/http/httptest"
"os"
"reflect"
"testing"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest"
"github.com/docker/distribution/manifest/schema2"
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils"
notarytest "github.com/goharbor/harbor/src/common/utils/notary/test"
testutils "github.com/goharbor/harbor/src/common/utils/test"
"github.com/goharbor/harbor/src/core/config"
"github.com/opencontainers/go-digest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var endpoint = "10.117.4.142"
var notaryServer *httptest.Server
const testingRedisHost = "REDIS_HOST"
var admiralEndpoint = "http://127.0.0.1:8282"
var token = ""
func TestMain(m *testing.M) {
testutils.InitDatabaseFromEnv()
notaryServer = notarytest.NewNotaryServer(endpoint)
@ -99,56 +97,6 @@ func TestMatchPullManifest(t *testing.T) {
assert.Equal("sha256:ca4626b691f57d16ce1576231e4a2e2135554d32e13a85dcff380d51fdd13f6a", tag7)
}
func TestMatchPutBlob(t *testing.T) {
assert := assert.New(t)
req1, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/blobs/uploads/67bb4d9b-4dab-4bbe-b726-2e39322b8303?_state=7W3kWkgdr3fTW", nil)
res1, repo1 := MatchPutBlobURL(req1)
assert.True(res1, "%s %v is not a request to put blob", req1.Method, req1.URL)
assert.Equal("library/ubuntu", repo1)
req2, _ := http.NewRequest("PATCH", "http://127.0.0.1:5000/v2/library/blobs/uploads/67bb4d9b-4dab-4bbe-b726-2e39322b8303?_state=7W3kWkgdr3fTW", nil)
res2, _ := MatchPutBlobURL(req2)
assert.False(res2, "%s %v is a request to put blob", req2.Method, req2.URL)
req3, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/manifest/67bb4d9b-4dab-4bbe-b726-2e39322b8303?_state=7W3kWkgdr3fTW", nil)
res3, _ := MatchPutBlobURL(req3)
assert.False(res3, "%s %v is not a request to put blob", req3.Method, req3.URL)
}
func TestMatchMountBlobURL(t *testing.T) {
assert := assert.New(t)
req1, _ := http.NewRequest("POST", "http://127.0.0.1:5000/v2/library/ubuntu/blobs/uploads/?mount=digtest123&from=testrepo", nil)
res1, repo1, mount, from := MatchMountBlobURL(req1)
assert.True(res1, "%s %v is not a request to mount blob", req1.Method, req1.URL)
assert.Equal("library/ubuntu", repo1)
assert.Equal("digtest123", mount)
assert.Equal("testrepo", from)
req2, _ := http.NewRequest("PATCH", "http://127.0.0.1:5000/v2/library/ubuntu/blobs/uploads/?mount=digtest123&from=testrepo", nil)
res2, _, _, _ := MatchMountBlobURL(req2)
assert.False(res2, "%s %v is a request to mount blob", req2.Method, req2.URL)
req3, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/blobs/uploads/?mount=digtest123&from=testrepo", nil)
res3, _, _, _ := MatchMountBlobURL(req3)
assert.False(res3, "%s %v is not a request to put blob", req3.Method, req3.URL)
}
func TestPatchBlobURL(t *testing.T) {
assert := assert.New(t)
req1, _ := http.NewRequest("PATCH", "http://127.0.0.1:5000/v2/library/ubuntu/blobs/uploads/1234-1234-abcd", nil)
res1, repo1 := MatchPatchBlobURL(req1)
assert.True(res1, "%s %v is not a request to patch blob", req1.Method, req1.URL)
assert.Equal("library/ubuntu", repo1)
req2, _ := http.NewRequest("POST", "http://127.0.0.1:5000/v2/library/ubuntu/blobs/uploads/1234-1234-abcd", nil)
res2, _ := MatchPatchBlobURL(req2)
assert.False(res2, "%s %v is a request to patch blob", req2.Method, req2.URL)
req3, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/blobs/uploads/?mount=digtest123&from=testrepo", nil)
res3, _ := MatchPatchBlobURL(req3)
assert.False(res3, "%s %v is not a request to patch blob", req3.Method, req3.URL)
}
func TestMatchPushManifest(t *testing.T) {
assert := assert.New(t)
req1, _ := http.NewRequest("POST", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil)
@ -260,83 +208,194 @@ func TestMarshalError(t *testing.T) {
assert.Equal("{\"errors\":[{\"code\":\"DENIED\",\"message\":\"The action is denied\",\"detail\":\"The action is denied\"}]}", js2)
}
func TestTryRequireQuota(t *testing.T) {
quotaRes := &quota.ResourceList{
quota.ResourceStorage: 100,
}
err := TryRequireQuota(1, quotaRes)
assert.Nil(t, err)
}
func TestTryFreeQuota(t *testing.T) {
quotaRes := &quota.ResourceList{
quota.ResourceStorage: 1,
}
success := TryFreeQuota(1, quotaRes)
assert.True(t, success)
}
func TestGetBlobSize(t *testing.T) {
con, err := redis.Dial(
"tcp",
fmt.Sprintf("%s:%d", getRedisHost(), 6379),
redis.DialConnectTimeout(30*time.Second),
redis.DialReadTimeout(time.Minute+10*time.Second),
redis.DialWriteTimeout(10*time.Second),
)
assert.Nil(t, err)
defer con.Close()
size, err := GetBlobSize(con, "test-TestGetBlobSize")
assert.Nil(t, err)
assert.Equal(t, size, int64(0))
}
func TestSetBunkSize(t *testing.T) {
con, err := redis.Dial(
"tcp",
fmt.Sprintf("%s:%d", getRedisHost(), 6379),
redis.DialConnectTimeout(30*time.Second),
redis.DialReadTimeout(time.Minute+10*time.Second),
redis.DialWriteTimeout(10*time.Second),
)
assert.Nil(t, err)
defer con.Close()
size, err := GetBlobSize(con, "TestSetBunkSize")
assert.Nil(t, err)
assert.Equal(t, size, int64(0))
_, err = SetBunkSize(con, "TestSetBunkSize", 123)
assert.Nil(t, err)
size1, err := GetBlobSize(con, "TestSetBunkSize")
assert.Nil(t, err)
assert.Equal(t, size1, int64(123))
}
func TestGetProjectID(t *testing.T) {
name := "project_for_TestGetProjectID"
project := models.Project{
OwnerID: 1,
Name: name,
func makeManifest(configSize int64, layerSizes []int64) schema2.Manifest {
manifest := schema2.Manifest{
Versioned: manifest.Versioned{SchemaVersion: 2, MediaType: schema2.MediaTypeManifest},
Config: distribution.Descriptor{
MediaType: schema2.MediaTypeImageConfig,
Size: configSize,
Digest: digest.FromString(utils.GenerateRandomString()),
},
}
id, err := dao.AddProject(project)
if err != nil {
t.Fatalf("failed to add project: %v", err)
for _, size := range layerSizes {
manifest.Layers = append(manifest.Layers, distribution.Descriptor{
MediaType: schema2.MediaTypeLayer,
Size: size,
Digest: digest.FromString(utils.GenerateRandomString()),
})
}
idget, err := GetProjectID(name)
assert.Nil(t, err)
assert.Equal(t, id, idget)
return manifest
}
func getRedisHost() string {
redisHost := os.Getenv(testingRedisHost)
if redisHost == "" {
redisHost = "127.0.0.1" // for local test
func getDescriptor(manifest schema2.Manifest) distribution.Descriptor {
buf, _ := json.Marshal(manifest)
_, desc, _ := distribution.UnmarshalManifest(manifest.Versioned.MediaType, buf)
return desc
}
func TestParseManifestInfo(t *testing.T) {
manifest := makeManifest(1, []int64{2, 3, 4})
tests := []struct {
name string
req func() *http.Request
want *ManifestInfo
wantErr bool
}{
{
"ok",
func() *http.Request {
buf, _ := json.Marshal(manifest)
req, _ := http.NewRequest(http.MethodPut, "/v2/library/photon/manifests/latest", bytes.NewReader(buf))
req.Header.Add("Content-Type", manifest.MediaType)
return req
},
&ManifestInfo{
ProjectID: 1,
Repository: "library/photon",
Tag: "latest",
Digest: getDescriptor(manifest).Digest.String(),
References: manifest.References(),
Descriptor: getDescriptor(manifest),
},
false,
},
{
"bad content type",
func() *http.Request {
buf, _ := json.Marshal(manifest)
req, _ := http.NewRequest(http.MethodPut, "/v2/notfound/photon/manifests/latest", bytes.NewReader(buf))
req.Header.Add("Content-Type", "application/json")
return req
},
nil,
true,
},
{
"bad manifest",
func() *http.Request {
req, _ := http.NewRequest(http.MethodPut, "/v2/notfound/photon/manifests/latest", bytes.NewReader([]byte("")))
req.Header.Add("Content-Type", schema2.MediaTypeManifest)
return req
},
nil,
true,
},
{
"body missing",
func() *http.Request {
req, _ := http.NewRequest(http.MethodPut, "/v2/notfound/photon/manifests/latest", nil)
req.Header.Add("Content-Type", schema2.MediaTypeManifest)
return req
},
nil,
true,
},
{
"project not found",
func() *http.Request {
buf, _ := json.Marshal(manifest)
req, _ := http.NewRequest(http.MethodPut, "/v2/notfound/photon/manifests/latest", bytes.NewReader(buf))
req.Header.Add("Content-Type", manifest.MediaType)
return req
},
nil,
true,
},
{
"url not match",
func() *http.Request {
buf, _ := json.Marshal(manifest)
req, _ := http.NewRequest(http.MethodPut, "/v2/library/photon/manifest/latest", bytes.NewReader(buf))
req.Header.Add("Content-Type", manifest.MediaType)
return req
},
nil,
true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := ParseManifestInfo(tt.req())
if (err != nil) != tt.wantErr {
t.Errorf("ParseManifestInfo() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("ParseManifestInfo() = %v, want %v", got, tt.want)
}
})
}
}
func TestParseManifestInfoFromPath(t *testing.T) {
mustRequest := func(method, url string) *http.Request {
req, _ := http.NewRequest(method, url, nil)
return req
}
return redisHost
type args struct {
req *http.Request
}
tests := []struct {
name string
args args
want *ManifestInfo
wantErr bool
}{
{
"ok for digest",
args{mustRequest(http.MethodDelete, "/v2/library/photon/manifests/sha256:3e17b60ab9d92d953fb8ebefa25624c0d23fb95f78dde5572285d10158044059")},
&ManifestInfo{
ProjectID: 1,
Repository: "library/photon",
Digest: "sha256:3e17b60ab9d92d953fb8ebefa25624c0d23fb95f78dde5572285d10158044059",
},
false,
},
{
"ok for tag",
args{mustRequest(http.MethodDelete, "/v2/library/photon/manifests/latest")},
&ManifestInfo{
ProjectID: 1,
Repository: "library/photon",
Tag: "latest",
},
false,
},
{
"project not found",
args{mustRequest(http.MethodDelete, "/v2/notfound/photon/manifests/latest")},
nil,
true,
},
{
"url not match",
args{mustRequest(http.MethodDelete, "/v2/library/photon/manifest/latest")},
nil,
true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := ParseManifestInfoFromPath(tt.args.req)
if (err != nil) != tt.wantErr {
t.Errorf("ParseManifestInfoFromPath() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("ParseManifestInfoFromPath() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -134,6 +134,7 @@ func initRouters() {
beego.Router("/api/internal/syncregistry", &api.InternalAPI{}, "post:SyncRegistry")
beego.Router("/api/internal/renameadmin", &api.InternalAPI{}, "post:RenameAdmin")
beego.Router("/api/internal/switchquota", &api.InternalAPI{}, "put:SwitchQuota")
// external service that hosted on harbor process:
beego.Router("/service/notifications", &registry.NotificationHandler{})

View File

@ -33,12 +33,11 @@ import (
var statusMap = map[string]string{
job.JobServiceStatusPending: models.JobPending,
job.JobServiceStatusScheduled: models.JobScheduled,
job.JobServiceStatusRunning: models.JobRunning,
job.JobServiceStatusStopped: models.JobStopped,
job.JobServiceStatusCancelled: models.JobCanceled,
job.JobServiceStatusError: models.JobError,
job.JobServiceStatusSuccess: models.JobFinished,
job.JobServiceStatusScheduled: models.JobScheduled,
}
// Handler handles reqeust on /service/notifications/jobs/*, which listens to the webhook of jobservice.

View File

@ -112,7 +112,7 @@ func (n *NotificationHandler) Post() {
}()
}
if !coreutils.WaitForManifestReady(repository, tag, 5) {
if !coreutils.WaitForManifestReady(repository, tag, 6) {
log.Errorf("Manifest for image %s:%s is not ready, skip the follow up actions.", repository, tag)
return
}

View File

@ -62,14 +62,19 @@ func newRepositoryClient(endpoint, username, repository string) (*registry.Repos
// WaitForManifestReady implements exponential sleeep to wait until manifest is ready in registry.
// This is a workaround for https://github.com/docker/distribution/issues/2625
func WaitForManifestReady(repository string, tag string, maxRetry int) bool {
// The initial wait interval, hard-coded to 50ms
interval := 50 * time.Millisecond
// The initial wait interval, hard-coded to 80ms, interval will be 80ms,200ms,500ms,1.25s,3.124999936s
interval := 80 * time.Millisecond
repoClient, err := NewRepositoryClientForUI("harbor-core", repository)
if err != nil {
log.Errorf("Failed to create repo client.")
return false
}
for i := 0; i < maxRetry; i++ {
if i != 0 {
log.Warningf("manifest for image %s:%s is not ready, retry after %v", repository, tag, interval)
time.Sleep(interval)
interval = time.Duration(int64(float32(interval) * 2.5))
}
_, exist, err := repoClient.ManifestExist(tag)
if err != nil {
log.Errorf("Unexpected error when checking manifest existence, image: %s:%s, error: %v", repository, tag, err)
@ -78,9 +83,6 @@ func WaitForManifestReady(repository string, tag string, maxRetry int) bool {
if exist {
return true
}
log.Warningf("manifest for image %s:%s is not ready, retry after %v", repository, tag, interval)
time.Sleep(interval)
interval = interval * 2
}
return false
}

View File

@ -67,7 +67,7 @@ a.underline, .underline{
<span class="status-text">Page Not Found</span>
</div>
<div class="status-subtitle">
<p> <a href="/harbor" class="underline">Home</p>
<p><a href="/harbor" class="underline">Home</a></p>
</div>
</div>
</div>

View File

@ -58,12 +58,13 @@ func (e *evaluator) Action() string {
func New(params rule.Parameters) rule.Evaluator {
if params != nil {
if p, ok := params[ParameterN]; ok {
if v, ok := p.(int); ok && v >= 0 {
return &evaluator{n: v}
if v, ok := p.(float64); ok && v >= 0 {
return &evaluator{n: int(v)}
}
}
}
log.Debugf("default parameter %d used for rule %s", DefaultN, TemplateID)
log.Warningf("default parameter %d used for rule %s", DefaultN, TemplateID)
return &evaluator{n: DefaultN}
}

View File

@ -15,7 +15,7 @@
package dayspl
import (
"strconv"
"fmt"
"testing"
"time"
@ -36,8 +36,8 @@ func (e *EvaluatorTestSuite) TestNew() {
args rule.Parameters
expectedN int
}{
{Name: "Valid", args: map[string]rule.Parameter{ParameterN: 5}, expectedN: 5},
{Name: "Default If Negative", args: map[string]rule.Parameter{ParameterN: -1}, expectedN: DefaultN},
{Name: "Valid", args: map[string]rule.Parameter{ParameterN: float64(5)}, expectedN: 5},
{Name: "Default If Negative", args: map[string]rule.Parameter{ParameterN: float64(-1)}, expectedN: DefaultN},
{Name: "Default If Not Set", args: map[string]rule.Parameter{}, expectedN: DefaultN},
{Name: "Default If Wrong Type", args: map[string]rule.Parameter{ParameterN: "foo"}, expectedN: DefaultN},
}
@ -65,7 +65,7 @@ func (e *EvaluatorTestSuite) TestProcess() {
}
tests := []struct {
n int
n float64
expected int
minPullTime int64
}{
@ -80,7 +80,7 @@ func (e *EvaluatorTestSuite) TestProcess() {
}
for _, tt := range tests {
e.T().Run(strconv.Itoa(tt.n), func(t *testing.T) {
e.T().Run(fmt.Sprintf("%v", tt.n), func(t *testing.T) {
sut := New(map[string]rule.Parameter{ParameterN: tt.n})
result, err := sut.Process(data)

View File

@ -58,12 +58,13 @@ func (e *evaluator) Action() string {
func New(params rule.Parameters) rule.Evaluator {
if params != nil {
if p, ok := params[ParameterN]; ok {
if v, ok := p.(int); ok && v >= 0 {
return &evaluator{n: v}
if v, ok := p.(float64); ok && v >= 0 {
return &evaluator{n: int(v)}
}
}
}
log.Debugf("default parameter %d used for rule %s", DefaultN, TemplateID)
log.Warningf("default parameter %d used for rule %s", DefaultN, TemplateID)
return &evaluator{n: DefaultN}
}

View File

@ -15,7 +15,7 @@
package daysps
import (
"strconv"
"fmt"
"testing"
"time"
@ -36,8 +36,8 @@ func (e *EvaluatorTestSuite) TestNew() {
args rule.Parameters
expectedN int
}{
{Name: "Valid", args: map[string]rule.Parameter{ParameterN: 5}, expectedN: 5},
{Name: "Default If Negative", args: map[string]rule.Parameter{ParameterN: -1}, expectedN: DefaultN},
{Name: "Valid", args: map[string]rule.Parameter{ParameterN: float64(5)}, expectedN: 5},
{Name: "Default If Negative", args: map[string]rule.Parameter{ParameterN: float64(-1)}, expectedN: DefaultN},
{Name: "Default If Not Set", args: map[string]rule.Parameter{}, expectedN: DefaultN},
{Name: "Default If Wrong Type", args: map[string]rule.Parameter{ParameterN: "foo"}, expectedN: DefaultN},
}
@ -65,7 +65,7 @@ func (e *EvaluatorTestSuite) TestProcess() {
}
tests := []struct {
n int
n float64
expected int
minPushTime int64
}{
@ -80,7 +80,7 @@ func (e *EvaluatorTestSuite) TestProcess() {
}
for _, tt := range tests {
e.T().Run(strconv.Itoa(tt.n), func(t *testing.T) {
e.T().Run(fmt.Sprintf("%v", tt.n), func(t *testing.T) {
sut := New(map[string]rule.Parameter{ParameterN: tt.n})
result, err := sut.Process(data)

View File

@ -59,15 +59,15 @@ func (e *evaluator) Action() string {
func New(params rule.Parameters) rule.Evaluator {
if params != nil {
if param, ok := params[ParameterX]; ok {
if v, ok := param.(int); ok && v >= 0 {
if v, ok := param.(float64); ok && v >= 0 {
return &evaluator{
x: v,
x: int(v),
}
}
}
}
log.Debugf("default parameter %d used for rule %s", DefaultX, TemplateID)
log.Warningf("default parameter %d used for rule %s", DefaultX, TemplateID)
return &evaluator{
x: DefaultX,

View File

@ -21,8 +21,8 @@ func (e *EvaluatorTestSuite) TestNew() {
args rule.Parameters
expectedX int
}{
{Name: "Valid", args: map[string]rule.Parameter{ParameterX: 3}, expectedX: 3},
{Name: "Default If Negative", args: map[string]rule.Parameter{ParameterX: -3}, expectedX: DefaultX},
{Name: "Valid", args: map[string]rule.Parameter{ParameterX: float64(3)}, expectedX: 3},
{Name: "Default If Negative", args: map[string]rule.Parameter{ParameterX: float64(-3)}, expectedX: DefaultX},
{Name: "Default If Not Set", args: map[string]rule.Parameter{}, expectedX: DefaultX},
{Name: "Default If Wrong Type", args: map[string]rule.Parameter{}, expectedX: DefaultX},
}
@ -48,7 +48,7 @@ func (e *EvaluatorTestSuite) TestProcess() {
}
tests := []struct {
days int
days float64
expected int
}{
{days: 0, expected: 0},
@ -62,7 +62,7 @@ func (e *EvaluatorTestSuite) TestProcess() {
}
for _, tt := range tests {
e.T().Run(fmt.Sprintf("%d days - should keep %d", tt.days, tt.expected), func(t *testing.T) {
e.T().Run(fmt.Sprintf("%v days - should keep %d", tt.days, tt.expected), func(t *testing.T) {
e := New(rule.Parameters{ParameterX: tt.days})
result, err := e.Process(data)

View File

@ -65,9 +65,9 @@ func (e *evaluator) Action() string {
func New(params rule.Parameters) rule.Evaluator {
if params != nil {
if param, ok := params[ParameterK]; ok {
if v, ok := param.(int); ok && v >= 0 {
if v, ok := param.(float64); ok && v >= 0 {
return &evaluator{
k: v,
k: int(v),
}
}
}

View File

@ -15,7 +15,7 @@
package latestk
import (
"strconv"
"fmt"
"testing"
"github.com/goharbor/harbor/src/pkg/retention/policy/rule"
@ -58,7 +58,7 @@ func (e *EvaluatorTestSuite) TestProcess() {
{k: 99, expected: len(e.artifacts)},
}
for _, tt := range tests {
e.T().Run(strconv.Itoa(tt.k), func(t *testing.T) {
e.T().Run(fmt.Sprintf("%v", tt.k), func(t *testing.T) {
sut := &evaluator{k: tt.k}
result, err := sut.Process(e.artifacts)
@ -79,8 +79,8 @@ func (e *EvaluatorTestSuite) TestNew() {
params rule.Parameters
expectedK int
}{
{name: "Valid", params: rule.Parameters{ParameterK: 5}, expectedK: 5},
{name: "Default If Negative", params: rule.Parameters{ParameterK: -5}, expectedK: DefaultK},
{name: "Valid", params: rule.Parameters{ParameterK: float64(5)}, expectedK: 5},
{name: "Default If Negative", params: rule.Parameters{ParameterK: float64(-5)}, expectedK: DefaultK},
{name: "Default If Wrong Type", params: rule.Parameters{ParameterK: "5"}, expectedK: DefaultK},
{name: "Default If Wrong Key", params: rule.Parameters{"n": 5}, expectedK: DefaultK},
{name: "Default If Empty", params: rule.Parameters{}, expectedK: DefaultK},

View File

@ -59,13 +59,13 @@ func (e *evaluator) Action() string {
func New(params rule.Parameters) rule.Evaluator {
if params != nil {
if p, ok := params[ParameterN]; ok {
if v, ok := p.(int); ok && v >= 0 {
return &evaluator{n: v}
if v, ok := p.(float64); ok && v >= 0 {
return &evaluator{n: int(v)}
}
}
}
log.Debugf("default parameter %d used for rule %s", DefaultN, TemplateID)
log.Warningf("default parameter %d used for rule %s", DefaultN, TemplateID)
return &evaluator{n: DefaultN}
}

View File

@ -15,8 +15,8 @@
package latestpl
import (
"fmt"
"math/rand"
"strconv"
"testing"
"github.com/goharbor/harbor/src/pkg/retention/policy/rule"
@ -35,8 +35,8 @@ func (e *EvaluatorTestSuite) TestNew() {
args rule.Parameters
expectedK int
}{
{Name: "Valid", args: map[string]rule.Parameter{ParameterN: 5}, expectedK: 5},
{Name: "Default If Negative", args: map[string]rule.Parameter{ParameterN: -1}, expectedK: DefaultN},
{Name: "Valid", args: map[string]rule.Parameter{ParameterN: float64(5)}, expectedK: 5},
{Name: "Default If Negative", args: map[string]rule.Parameter{ParameterN: float64(-1)}, expectedK: DefaultN},
{Name: "Default If Not Set", args: map[string]rule.Parameter{}, expectedK: DefaultN},
{Name: "Default If Wrong Type", args: map[string]rule.Parameter{ParameterN: "foo"}, expectedK: DefaultN},
}
@ -57,7 +57,7 @@ func (e *EvaluatorTestSuite) TestProcess() {
})
tests := []struct {
n int
n float64
expected int
minPullTime int64
}{
@ -69,7 +69,7 @@ func (e *EvaluatorTestSuite) TestProcess() {
}
for _, tt := range tests {
e.T().Run(strconv.Itoa(tt.n), func(t *testing.T) {
e.T().Run(fmt.Sprintf("%v", tt.n), func(t *testing.T) {
ev := New(map[string]rule.Parameter{ParameterN: tt.n})
result, err := ev.Process(data)

View File

@ -62,15 +62,15 @@ func (e *evaluator) Action() string {
func New(params rule.Parameters) rule.Evaluator {
if params != nil {
if param, ok := params[ParameterK]; ok {
if v, ok := param.(int); ok && v >= 0 {
if v, ok := param.(float64); ok && v >= 0 {
return &evaluator{
k: v,
k: int(v),
}
}
}
}
log.Debugf("default parameter %d used for rule %s", DefaultK, TemplateID)
log.Warningf("default parameter %d used for rule %s", DefaultK, TemplateID)
return &evaluator{
k: DefaultK,

View File

@ -1,8 +1,8 @@
package latestps
import (
"fmt"
"math/rand"
"strconv"
"testing"
"github.com/stretchr/testify/suite"
@ -22,8 +22,8 @@ func (e *EvaluatorTestSuite) TestNew() {
args rule.Parameters
expectedK int
}{
{Name: "Valid", args: map[string]rule.Parameter{ParameterK: 5}, expectedK: 5},
{Name: "Default If Negative", args: map[string]rule.Parameter{ParameterK: -1}, expectedK: DefaultK},
{Name: "Valid", args: map[string]rule.Parameter{ParameterK: float64(5)}, expectedK: 5},
{Name: "Default If Negative", args: map[string]rule.Parameter{ParameterK: float64(-1)}, expectedK: DefaultK},
{Name: "Default If Not Set", args: map[string]rule.Parameter{}, expectedK: DefaultK},
{Name: "Default If Wrong Type", args: map[string]rule.Parameter{ParameterK: "foo"}, expectedK: DefaultK},
}
@ -44,7 +44,7 @@ func (e *EvaluatorTestSuite) TestProcess() {
})
tests := []struct {
k int
k float64
expected int
}{
{k: 0, expected: 0},
@ -55,7 +55,7 @@ func (e *EvaluatorTestSuite) TestProcess() {
}
for _, tt := range tests {
e.T().Run(strconv.Itoa(tt.k), func(t *testing.T) {
e.T().Run(fmt.Sprintf("%v", tt.k), func(t *testing.T) {
e := New(map[string]rule.Parameter{ParameterK: tt.k})
result, err := e.Process(data)

View File

@ -87,6 +87,7 @@ export class Configuration {
token_expiration: NumberValueItem;
scan_all_policy: ComplexValueItem;
read_only: BoolValueItem;
notification_enable: BoolValueItem;
http_authproxy_endpoint?: StringValueItem;
http_authproxy_tokenreview_endpoint?: StringValueItem;
http_authproxy_verify_cert?: BoolValueItem;
@ -140,6 +141,7 @@ export class Configuration {
}
}, true);
this.read_only = new BoolValueItem(false, true);
this.notification_enable = new BoolValueItem(false, true);
this.http_authproxy_endpoint = new StringValueItem("", true);
this.http_authproxy_tokenreview_endpoint = new StringValueItem("", true);
this.http_authproxy_verify_cert = new BoolValueItem(false, true);

View File

@ -4,6 +4,7 @@ import { GcJobViewModel } from "../gcLog";
import { GcViewModelFactory } from "../gc.viewmodel.factory";
import { ErrorHandler } from "../../../error-handler/index";
import { Subscription, timer } from "rxjs";
import { REFRESH_TIME_DIFFERENCE } from '../../../shared/shared.const';
const JOB_STATUS = {
PENDING: "pending",
RUNNING: "running"
@ -34,7 +35,7 @@ export class GcHistoryComponent implements OnInit, OnDestroy {
this.loading = false;
// to avoid some jobs not finished.
if (!this.timerDelay) {
this.timerDelay = timer(3000, 3000).subscribe(() => {
this.timerDelay = timer(REFRESH_TIME_DIFFERENCE, REFRESH_TIME_DIFFERENCE).subscribe(() => {
let count: number = 0;
this.jobs.forEach(job => {
if (

View File

@ -25,8 +25,8 @@
<span class="tooltip-content">{{'PROJECT.QUOTA_UNLIMIT_TIP' | translate }}</span>
</a>
<div class="progress-block progress-min-width progress-div" *ngIf="!defaultTextsObj.isSystemDefaultQuota">
<div class="progress success"
[class.danger]="getDangerStyle(+quotaHardLimitValue.countLimit, quotaHardLimitValue.countUsed)">
<div class="progress success" [class.warning]="isWarningColor(+quotaHardLimitValue.countLimit, quotaHardLimitValue.countUsed)"
[class.danger]="isDangerColor(+quotaHardLimitValue.countLimit, quotaHardLimitValue.countUsed)">
<progress value="{{countInput.invalid || +quotaHardLimitValue.countLimit===-1?0:quotaHardLimitValue.countUsed}}"
max="{{countInput.invalid?100:quotaHardLimitValue.countLimit}}" data-displayval="100%"></progress>
</div>
@ -60,8 +60,9 @@
<span class="tooltip-content">{{'PROJECT.QUOTA_UNLIMIT_TIP' | translate }}</span>
</a>
<div class="progress-block progress-min-width progress-div" *ngIf="!defaultTextsObj.isSystemDefaultQuota">
<div class="progress success" [class.danger]="getDangerStyle(+quotaHardLimitValue.storageLimit,quotaHardLimitValue.storageUsed, quotaHardLimitValue.storageUnit)">
<progress value="{{storageInput.invalid?0:quotaHardLimitValue.storageUsed}}"
<div class="progress success" [class.danger]="isDangerColor(+quotaHardLimitValue.storageLimit,quotaHardLimitValue.storageUsed, quotaHardLimitValue.storageUnit)"
[class.warning]="isWarningColor(+quotaHardLimitValue.storageLimit,quotaHardLimitValue.storageUsed, quotaHardLimitValue.storageUnit)">
<progress value="{{storageInput.invalid || +quotaHardLimitValue.storageLimit === -1 ?0:quotaHardLimitValue.storageUsed}}"
max="{{storageInput.invalid?0:getByte(+quotaHardLimitValue.storageLimit, quotaHardLimitValue.storageUnit)}}"
data-displayval="100%"></progress>
</div>

View File

@ -1,21 +1,25 @@
::ng-deep .modal-dialog {
width: 25rem;
}
.modal-body {
padding-top: 0.8rem;
overflow-y: visible;
overflow-x: visible;
.clr-form-compact {
div.form-group {
padding-left: 8.5rem;
.mr-3px {
margin-right: 3px;
}
.quota-input {
width: 2rem;
padding-right: 0.8rem;
}
.select-div {
width: 2.5rem;
@ -51,6 +55,22 @@
width: 9rem;
}
::ng-deep {
.progress {
&.warning>progress {
color: orange;
&::-webkit-progress-value {
background-color: orange;
}
&::-moz-progress-bar {
background-color: orange;
}
}
}
}
.progress-label {
position: absolute;
right: -2.3rem;

View File

@ -6,13 +6,10 @@ import {
OnInit,
} from '@angular/core';
import { NgForm, Validators } from '@angular/forms';
import { ActivatedRoute } from "@angular/router";
import { TranslateService } from '@ngx-translate/core';
import { InlineAlertComponent } from '../../../inline-alert/inline-alert.component';
import { QuotaUnits, QuotaUnlimited } from "../../../shared/shared.const";
import { QuotaUnits, QuotaUnlimited, QUOTA_DANGER_COEFFICIENT, QUOTA_WARNING_COEFFICIENT } from "../../../shared/shared.const";
import { clone, getSuitableUnit, getByte, GetIntegerAndUnit, validateLimit } from '../../../utils';
import { EditQuotaQuotaInterface, QuotaHardLimitInterface } from '../../../service';
@ -47,9 +44,9 @@ export class EditProjectQuotasComponent implements OnInit {
@ViewChild('quotaForm')
currentForm: NgForm;
@Output() confirmAction = new EventEmitter();
constructor(
private translateService: TranslateService,
private route: ActivatedRoute) { }
quotaDangerCoefficient: number = QUOTA_DANGER_COEFFICIENT;
quotaWarningCoefficient: number = QUOTA_WARNING_COEFFICIENT;
constructor() { }
ngOnInit() {
}
@ -134,10 +131,18 @@ export class EditProjectQuotasComponent implements OnInit {
}
return 0;
}
getDangerStyle(limit: number | string, used: number | string, unit?: string) {
isDangerColor(limit: number | string, used: number | string, unit?: string) {
if (unit) {
return limit !== QuotaUnlimited ? +used / getByte(+limit, unit) > 0.9 : false;
return limit !== QuotaUnlimited ? +used / getByte(+limit, unit) >= this.quotaDangerCoefficient : false;
}
return limit !== QuotaUnlimited ? +used / +limit > 0.9 : false;
return limit !== QuotaUnlimited ? +used / +limit >= this.quotaDangerCoefficient : false;
}
isWarningColor(limit: number | string, used: number | string, unit?: string) {
if (unit) {
return limit !== QuotaUnlimited ?
+used / getByte(+limit, unit) >= this.quotaWarningCoefficient && +used / getByte(+limit, unit) <= this.quotaDangerCoefficient : false;
}
return limit !== QuotaUnlimited ?
+used / +limit >= this.quotaWarningCoefficient && +used / +limit <= this.quotaDangerCoefficient : false;
}
}

View File

@ -37,7 +37,9 @@
<clr-dg-cell>
<div class="progress-block progress-min-width">
<div class="progress success"
[class.danger]="quota.hard.count!==-1?quota.used.count/quota.hard.count>0.9:false">
[class.danger]="quota.hard.count!==-1?quota.used.count/quota.hard.count>quotaDangerCoefficient:false"
[class.warning]="quota.hard.count!==-1?quota.used.count/quota.hard.count<=quotaDangerCoefficient &&quota.used.count/quota.hard.count>=quotaWarningCoefficient:false"
>
<progress value="{{quota.hard.count===-1? 0 : quota.used.count}}"
max="{{quota.hard.count}}" data-displayval="100%"></progress>
</div>
@ -48,7 +50,9 @@
<clr-dg-cell>
<div class="progress-block progress-min-width">
<div class="progress success"
[class.danger]="quota.hard.storage!==-1?quota.used.storage/quota.hard.storage>0.9:false">
[class.danger]="quota.hard.storage!==-1?quota.used.storage/quota.hard.storage>quotaDangerCoefficient:false"
[class.warning]="quota.hard.storage!==-1?quota.used.storage/quota.hard.storage>=quotaWarningCoefficient&&quota.used.storage/quota.hard.storage<=quotaDangerCoefficient:false"
>
<progress value="{{quota.hard.storage===-1? 0 : quota.used.storage}}"
max="{{quota.hard.storage}}" data-displayval="100%"></progress>
</div>

View File

@ -8,7 +8,7 @@ import {
, getByte, GetIntegerAndUnit
} from '../../utils';
import { ErrorHandler } from '../../error-handler/index';
import { QuotaUnits, QuotaUnlimited } from '../../shared/shared.const';
import { QuotaUnits, QuotaUnlimited, QUOTA_DANGER_COEFFICIENT, QUOTA_WARNING_COEFFICIENT } from '../../shared/shared.const';
import { EditProjectQuotasComponent } from './edit-project-quotas/edit-project-quotas.component';
import {
ConfigurationService
@ -46,6 +46,8 @@ export class ProjectQuotasComponent implements OnChanges {
currentPage = 1;
totalCount = 0;
pageSize = 15;
quotaDangerCoefficient: number = QUOTA_DANGER_COEFFICIENT;
quotaWarningCoefficient: number = QUOTA_WARNING_COEFFICIENT;
@Input()
get allConfig(): Configuration {
return this.config;

View File

@ -142,9 +142,21 @@
</div>
</div>
</div>
</div>
</div>
<div class="form-group">
<label for="webhookNotificationEnabled">{{'CONFIG.WEBHOOK_NOTIFICATION_ENABLED' | translate}}</label>
<clr-checkbox-wrapper>
<input type="checkbox" clrCheckbox name="webhookNotificationEnabled" id="webhookNotificationEnabled" [ngModel]="systemSettings.notification_enable.value"
(ngModelChange)="setWebhookNotificationEnabledValue($event)" [ngModel]="systemSettings.notification_enable.value"/>
<label>
<a href="javascript:void(0)" role="tooltip" aria-haspopup="true" class="tooltip tooltip-top-right read-tooltip">
<clr-icon shape="info-circle" class="info-tips-icon" size="24"></clr-icon>
<span class="tooltip-content">{{'CONFIG.TOOLTIP.WEBHOOK_TOOLTIP' | translate}}</span>
</a>
</label>
</clr-checkbox-wrapper>
</div>
</section>
</form>
<div>

View File

@ -108,7 +108,7 @@ export class SystemSettingsComponent implements OnChanges, OnInit {
let changes = {};
for (let prop in allChanges) {
if (prop === 'token_expiration' || prop === 'read_only' || prop === 'project_creation_restriction'
|| prop === 'robot_token_duration') {
|| prop === 'robot_token_duration' || prop === 'notification_enable') {
changes[prop] = allChanges[prop];
}
}
@ -119,6 +119,10 @@ export class SystemSettingsComponent implements OnChanges, OnInit {
this.systemSettings.read_only.value = $event;
}
setWebhookNotificationEnabledValue($event: any) {
this.systemSettings.notification_enable.value = $event;
}
disabled(prop: any): boolean {
return !(prop && prop.editable);
}

View File

@ -23,10 +23,18 @@
<button type="button" class="btn btn-primary" (click)="cancel()">{{'BUTTON.CLOSE' | translate}}</button>
</ng-template>
<ng-template [ngSwitchCase]="4">
<button type="button" class="btn btn-outline" (click)="cancel()">{{'BUTTON.CANCEL' | translate}}</button>
<button type="button" class="btn btn-primary" (click)="confirm()">{{'BUTTON.ENABLE' | translate}}</button>
</ng-template>
<ng-template [ngSwitchCase]="5">
<button type="button" class="btn btn-outline" (click)="cancel()">{{'BUTTON.CANCEL' | translate}}</button>
<button type="button" class="btn btn-danger" (click)="confirm()">{{'BUTTON.DISABLE' | translate}}</button>
</ng-template>
<ng-template [ngSwitchCase]="6">
<button type="button" class="btn btn-outline" (click)="cancel()" [hidden]="isDelete">{{'BUTTON.CANCEL' | translate}}</button>
<button type="button" class="btn btn-primary" (click)="confirm()" [hidden]="isDelete">{{'BUTTON.REPLICATE' | translate}}</button>
</ng-template>
<ng-template [ngSwitchCase]="5">
<ng-template [ngSwitchCase]="7">
<button type="button" class="btn btn-outline" (click)="cancel()" [hidden]="isDelete">{{'BUTTON.CANCEL' | translate}}</button>
<button type="button" class="btn btn-primary" (click)="confirm()" [hidden]="isDelete">{{'BUTTON.STOP' | translate}}</button>
</ng-template>

View File

@ -203,7 +203,7 @@ describe('RecentLogComponent (inline template)', () => {
fixture.detectChanges();
expect(component.recentLogs).toBeTruthy();
expect(component.logsCache).toBeTruthy();
expect(component.recentLogs.length).toEqual(3);
expect(component.recentLogs.length).toEqual(15);
});
});

View File

@ -67,7 +67,8 @@ export class RecentLogComponent implements OnInit {
}
public doFilter(terms: string): void {
if (!terms) {
// allow search by null characters
if (terms === undefined || terms === null) {
return;
}
this.currentTerm = terms.trim();

View File

@ -92,7 +92,7 @@
</span>
</div>
</div>
<clr-datagrid [(clrDgSelected)]="selectedRow" [clrDgLoading]="loading">
<clr-datagrid (clrDgRefresh)="clrLoadTasks($event)" [clrDgLoading]="loading">
<clr-dg-column [clrDgSortBy]="'id'">{{'REPLICATION.TASK_ID'| translate}}</clr-dg-column>
<clr-dg-column [clrDgField]="'resource_type'" class="resource-width">{{'REPLICATION.RESOURCE_TYPE' | translate}}</clr-dg-column>
<clr-dg-column [clrDgField]="'src_resource'">{{'REPLICATION.SOURCE' | translate}}</clr-dg-column>
@ -102,7 +102,7 @@
<clr-dg-column [clrDgSortBy]="startTimeComparator">{{'REPLICATION.CREATION_TIME' | translate}}</clr-dg-column>
<clr-dg-column [clrDgSortBy]="endTimeComparator">{{'REPLICATION.END_TIME' | translate}}</clr-dg-column>
<clr-dg-column>{{'REPLICATION.LOGS' | translate}}</clr-dg-column>
<clr-dg-row *clrDgItems="let t of tasks">
<clr-dg-row *ngFor="let t of tasks">
<clr-dg-cell>{{t.id}}</clr-dg-cell>
<clr-dg-cell class="resource-width">{{t.resource_type}}</clr-dg-cell>
<clr-dg-cell>{{t.src_resource}}</clr-dg-cell>
@ -118,8 +118,8 @@
</clr-dg-cell>
</clr-dg-row>
<clr-dg-footer>
<span *ngIf="pagination.totalItems">{{pagination.firstItem + 1}} - {{pagination.lastItem +1 }} {{'REPLICATION.OF' | translate}} </span>{{pagination.totalItems }} {{'REPLICATION.ITEMS' | translate}}
<clr-dg-pagination #pagination [(clrDgPage)]="currentPage" [clrDgPageSize]="pageSize"></clr-dg-pagination>
<span *ngIf="totalCount">{{pagination.firstItem + 1}} - {{pagination.lastItem +1 }} {{'REPLICATION.OF' | translate}} </span>{{totalCount }} {{'REPLICATION.ITEMS' | translate}}
<clr-dg-pagination #pagination [(clrDgPage)]="currentPage" [clrDgTotalItems]="totalCount" [clrDgPageSize]="pageSize"></clr-dg-pagination>
</clr-dg-footer>
</clr-datagrid>
</div>

View File

@ -6,8 +6,9 @@ import { finalize } from "rxjs/operators";
import { Subscription, timer } from "rxjs";
import { ErrorHandler } from "../../error-handler/error-handler";
import { ReplicationJob, ReplicationTasks, Comparator, ReplicationJobItem, State } from "../../service/interface";
import { CustomComparator, DEFAULT_PAGE_SIZE, calculatePage, doFiltering, doSorting } from "../../utils";
import { CustomComparator, DEFAULT_PAGE_SIZE } from "../../utils";
import { RequestQueryParams } from "../../service/RequestQueryParams";
import { REFRESH_TIME_DIFFERENCE } from '../../shared/shared.const';
const executionStatus = 'InProgress';
@Component({
selector: 'replication-tasks',
@ -18,8 +19,8 @@ export class ReplicationTasksComponent implements OnInit, OnDestroy {
isOpenFilterTag: boolean;
inProgress: boolean = false;
currentPage: number = 1;
selectedRow: [];
pageSize: number = DEFAULT_PAGE_SIZE;
totalCount: number;
loading = true;
searchTask: string;
defaultFilter = "resource_type";
@ -47,7 +48,6 @@ export class ReplicationTasksComponent implements OnInit, OnDestroy {
ngOnInit(): void {
this.searchTask = '';
this.getExecutionDetail();
this.clrLoadTasks();
}
getExecutionDetail(): void {
@ -67,14 +67,17 @@ export class ReplicationTasksComponent implements OnInit, OnDestroy {
clrLoadPage(): void {
if (!this.timerDelay) {
this.timerDelay = timer(10000, 10000).subscribe(() => {
this.timerDelay = timer(REFRESH_TIME_DIFFERENCE, REFRESH_TIME_DIFFERENCE).subscribe(() => {
let count: number = 0;
if (this.executions['status'] === executionStatus) {
count++;
}
if (this.executions['status'] === executionStatus) {
count++;
}
if (count > 0) {
this.getExecutionDetail();
this.clrLoadTasks();
let state: State = {
page: {}
};
this.clrLoadTasks(state);
} else {
this.timerDelay.unsubscribe();
this.timerDelay = null;
@ -136,16 +139,30 @@ export class ReplicationTasksComponent implements OnInit, OnDestroy {
}
}
clrLoadTasks(): void {
this.loading = true;
clrLoadTasks(state: State): void {
if (!state || !state.page || !this.executionId) {
return;
}
let params: RequestQueryParams = new RequestQueryParams();
params = params.set('page_size', this.pageSize + '').set('page', this.currentPage + '');
if (this.searchTask && this.searchTask !== "") {
params = params.set(this.defaultFilter, this.searchTask);
}
this.loading = true;
this.replicationService.getReplicationTasks(this.executionId, params)
.pipe(finalize(() => (this.loading = false)))
.pipe(finalize(() => {
this.loading = false;
}))
.subscribe(res => {
this.tasks = res; // Keep the data
if (res.headers) {
let xHeader: string = res.headers.get("X-Total-Count");
if (xHeader) {
this.totalCount = parseInt(xHeader, 0);
}
}
this.tasks = res.body; // Keep the data
},
error => {
this.errorHandler.error(error);
@ -162,23 +179,20 @@ export class ReplicationTasksComponent implements OnInit, OnDestroy {
// refresh icon
refreshTasks(): void {
this.loading = true;
this.currentPage = 1;
this.replicationService.getReplicationTasks(this.executionId)
.subscribe(res => {
this.tasks = res;
this.loading = false;
},
error => {
this.loading = false;
this.errorHandler.error(error);
});
let state: State = {
page: {}
};
this.clrLoadTasks(state);
}
public doSearch(value: string): void {
this.currentPage = 1;
this.searchTask = value.trim();
this.loading = true;
this.clrLoadTasks();
let state: State = {
page: {}
};
this.clrLoadTasks(state);
}
openFilter(isOpen: boolean): void {

View File

@ -48,7 +48,8 @@ import {
import {
ConfirmationTargets,
ConfirmationButtons,
ConfirmationState
ConfirmationState,
REFRESH_TIME_DIFFERENCE
} from "../shared/shared.const";
import { ConfirmationMessage } from "../confirmation-dialog/confirmation-message";
import { ConfirmationDialogComponent } from "../confirmation-dialog/confirmation-dialog.component";
@ -214,7 +215,7 @@ export class ReplicationComponent implements OnInit, OnDestroy {
this.totalCount = response.metadata.xTotalCount;
this.jobs = response.data;
if (!this.timerDelay) {
this.timerDelay = timer(10000, 10000).subscribe(() => {
this.timerDelay = timer(REFRESH_TIME_DIFFERENCE, REFRESH_TIME_DIFFERENCE).subscribe(() => {
let count: number = 0;
this.jobs.forEach(job => {
if (

View File

@ -66,6 +66,8 @@ export interface Tag extends Base {
signature?: string;
scan_overview?: VulnerabilitySummary;
labels: Label[];
push_time?: string;
pull_time?: string;
}
/**

View File

@ -144,5 +144,12 @@ export const USERSTATICPERMISSION = {
"PUSH": "push"
}
},
"WEBHOOK": {
"KEY": "notification-policy",
"VALUE": {
"LIST": "list",
"READ": "read",
}
},
};

View File

@ -296,8 +296,7 @@ export class ReplicationDefaultService extends ReplicationService {
}
let url: string = `${this._replicateUrl}/executions/${executionId}/tasks`;
return this.http
.get(url,
queryParams ? buildHttpRequestOptions(queryParams) : HTTP_GET_OPTIONS)
.get(url, buildHttpRequestOptionsWithObserveResponse(queryParams))
.pipe(map(response => response as ReplicationTasks)
, catchError(error => observableThrowError(error)));
}

Some files were not shown because too many files have changed in this diff Show More