diff --git a/docs/swagger.yaml b/docs/swagger.yaml index 01071390b..e62ecaecc 100644 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -1263,11 +1263,16 @@ paths: type: string required: true description: Relevant repository name. - - name: label_ids + - name: label_id in: query type: string required: false - description: A list of comma separated label IDs. + description: A label ID. + - name: detail + in: query + type: boolean + required: false + description: Bool value indicating whether return detailed information of the tag, such as vulnerability scan info, if set to false, only tag name is returned. tags: - Products responses: @@ -3684,7 +3689,7 @@ paths: description: Unexpected internal errors. '/projects/{project_id}/webhook/policies': get: - sumary: List project webhook policies. + summary: List project webhook policies. description: | This endpoint returns webhook policies of a project. parameters: @@ -3712,7 +3717,7 @@ paths: '500': description: Unexpected internal errors. post: - sumary: Create project webhook policy. + summary: Create project webhook policy. description: | This endpoint create a webhook policy if the project does not have one. parameters: @@ -3757,7 +3762,7 @@ paths: in: path description: The id of webhook policy. required: true - type: int64 + type: integer format: int64 tags: - Products @@ -3791,7 +3796,7 @@ paths: in: path description: The id of webhook policy. required: true - type: int64 + type: integer format: int64 - name: policy in: body @@ -3829,7 +3834,7 @@ paths: in: path description: The id of webhook policy. required: true - type: int64 + type: integer format: int64 tags: - Products @@ -3908,7 +3913,7 @@ paths: description: Internal server errors. '/projects/{project_id}/webhook/jobs': get: - sumary: List project webhook jobs + summary: List project webhook jobs description: | This endpoint returns webhook jobs of a project. parameters: @@ -4083,16 +4088,20 @@ definitions: description: 'The public status of the project. The valid values are "true", "false".' enable_content_trust: type: string - description: 'Whether content trust is enabled or not. If it is enabled, user cann''t pull unsigned images from this project. The valid values are "true", "false".' + description: 'Whether content trust is enabled or not. If it is enabled, user can''t pull unsigned images from this project. The valid values are "true", "false".' prevent_vul: type: string description: 'Whether prevent the vulnerable images from running. The valid values are "true", "false".' severity: type: string - description: 'If the vulnerability is high than severity defined here, the images cann''t be pulled. The valid values are "negligible", "low", "medium", "high", "critical".' + description: 'If the vulnerability is high than severity defined here, the images can''t be pulled. The valid values are "negligible", "low", "medium", "high", "critical".' auto_scan: type: string description: 'Whether scan images automatically when pushing. The valid values are "true", "false".' + reuse_sys_cve_whitelist: + type: string + description: 'Whether this project reuse the system level CVE whitelist as the whitelist of its own. The valid values are "true", "false". + If it is set to "true" the actual whitelist associate with this project, if any, will be ignored.' ProjectSummary: type: object properties: @@ -4841,6 +4850,9 @@ definitions: project_creation_restriction: type: string description: This attribute restricts what users have the permission to create project. It can be "everyone" or "adminonly". + quota_per_project_enable: + type: boolean + description: This attribute indicates whether quota per project enabled in harbor read_only: type: boolean description: '''docker push'' is prohibited by Harbor if you set it to true. ' @@ -4938,6 +4950,9 @@ definitions: project_creation_restriction: $ref: '#/definitions/StringConfigItem' description: This attribute restricts what users have the permission to create project. It can be "everyone" or "adminonly". + quota_per_project_enable: + $ref: '#/definitions/BoolConfigItem' + description: This attribute indicates whether quota per project enabled in harbor read_only: $ref: '#/definitions/BoolConfigItem' description: '''docker push'' is prohibited by Harbor if you set it to true. ' @@ -5349,7 +5364,9 @@ definitions: properties: type: type: string - description: The schedule type. The valid values are hourly, daily, weekly, custom and None. 'None' means to cancel the schedule. + description: | + The schedule type. The valid values are 'Hourly', 'Daily', 'Weekly', 'Custom', 'Manually' and 'None'. + 'Manually' means to trigger it right away and 'None' means to cancel the schedule. cron: type: string description: A cron expression, a time-based job scheduler. @@ -5724,7 +5741,7 @@ definitions: description: The webhook job ID. policy_id: type: integer - fromat: int64 + format: int64 description: The webhook policy ID. event_type: type: string diff --git a/make/harbor.yml b/make/harbor.yml index 347ef0c8c..ba860ffca 100644 --- a/make/harbor.yml +++ b/make/harbor.yml @@ -30,6 +30,11 @@ harbor_admin_password: Harbor12345 database: # The password for the root user of Harbor DB. Change this before any production use. password: root123 + # The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained. + max_idle_conns: 50 + # The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections. + # Note: the default number of connections is 100 for postgres. + max_open_conns: 100 # The default data volume data_volume: /data @@ -50,18 +55,12 @@ data_volume: /data # disabled: false # Clair configuration -clair: +clair: # The interval of clair updaters, the unit is hour, set to 0 to disable the updaters. updaters_interval: 12 - # Config http proxy for Clair, e.g. http://my.proxy.com:3128 - # Clair doesn't need to connect to harbor internal components via http proxy. - http_proxy: - https_proxy: - no_proxy: 127.0.0.1,localhost,core,registry - jobservice: - # Maximum number of job workers in job service + # Maximum number of job workers in job service max_job_workers: 10 notification: @@ -80,8 +79,8 @@ log: local: # Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated. rotate_count: 50 - # Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes. - # If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G + # Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes. + # If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G # are all valid. rotate_size: 200M # The directory on your host that store log @@ -143,3 +142,20 @@ _version: 1.8.0 # Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert. # uaa: # ca_file: /path/to/ca + +# Global proxy +# Config http proxy for components, e.g. http://my.proxy.com:3128 +# Components doesn't need to connect to each others via http proxy. +# Remove component from `components` array if want disable proxy +# for it. If you want use proxy for replication, MUST enable proxy +# for core and jobservice, and set `http_proxy` and `https_proxy`. +# Add domain to the `no_proxy` field, when you want disable proxy +# for some special registry. +proxy: + http_proxy: + https_proxy: + no_proxy: 127.0.0.1,localhost,.local,.internal,log,db,redis,nginx,core,portal,postgresql,jobservice,registry,registryctl,clair + components: + - core + - jobservice + - clair diff --git a/make/migrations/postgresql/0010_1.9.0_schema.up.sql b/make/migrations/postgresql/0010_1.9.0_schema.up.sql index 9303fa79b..7fd3a4241 100644 --- a/make/migrations/postgresql/0010_1.9.0_schema.up.sql +++ b/make/migrations/postgresql/0010_1.9.0_schema.up.sql @@ -23,6 +23,15 @@ CREATE TABLE blob UNIQUE (digest) ); +/* add the table for project and blob */ +CREATE TABLE project_blob ( + id SERIAL PRIMARY KEY NOT NULL, + project_id int NOT NULL, + blob_id int NOT NULL, + creation_time timestamp default CURRENT_TIMESTAMP, + CONSTRAINT unique_project_blob UNIQUE (project_id, blob_id) +); + CREATE TABLE artifact ( id SERIAL PRIMARY KEY NOT NULL, diff --git a/make/photon/portal/Dockerfile b/make/photon/portal/Dockerfile index f6adb9bf1..9f71410f7 100644 --- a/make/photon/portal/Dockerfile +++ b/make/photon/portal/Dockerfile @@ -1,7 +1,8 @@ FROM node:10.15.0 as nodeportal COPY src/portal /portal_src -COPY ./docs/swagger.yaml /portal_src +COPY ./docs/swagger.yaml /portal_src +COPY ./LICENSE /portal_src WORKDIR /build_dir @@ -21,6 +22,7 @@ FROM photon:2.0 COPY --from=nodeportal /build_dir/dist /usr/share/nginx/html COPY --from=nodeportal /build_dir/swagger.yaml /usr/share/nginx/html COPY --from=nodeportal /build_dir/swagger.json /usr/share/nginx/html +COPY --from=nodeportal /build_dir/LICENSE /usr/share/nginx/html COPY make/photon/portal/nginx.conf /etc/nginx/nginx.conf diff --git a/make/photon/prepare/g.py b/make/photon/prepare/g.py index f0eab0675..229f61a54 100644 --- a/make/photon/prepare/g.py +++ b/make/photon/prepare/g.py @@ -12,11 +12,12 @@ REDIS_UID = 999 REDIS_GID = 999 ## Global variable +host_root_dir = '/hostfs' + base_dir = '/harbor_make' templates_dir = "/usr/src/app/templates" config_dir = '/config' data_dir = '/data' - secret_dir = '/secret' secret_key_dir='/secret/keys' diff --git a/make/photon/prepare/templates/clair/clair_env.jinja b/make/photon/prepare/templates/clair/clair_env.jinja index 038f1a130..3825ca8fb 100644 --- a/make/photon/prepare/templates/clair/clair_env.jinja +++ b/make/photon/prepare/templates/clair/clair_env.jinja @@ -1,3 +1,3 @@ -http_proxy={{clair_http_proxy}} -https_proxy={{clair_https_proxy}} -no_proxy={{clair_no_proxy}} +HTTP_PROXY={{clair_http_proxy}} +HTTPS_PROXY={{clair_https_proxy}} +NO_PROXY={{clair_no_proxy}} diff --git a/make/photon/prepare/templates/core/env.jinja b/make/photon/prepare/templates/core/env.jinja index bc29a505d..d6413678e 100644 --- a/make/photon/prepare/templates/core/env.jinja +++ b/make/photon/prepare/templates/core/env.jinja @@ -15,6 +15,8 @@ POSTGRESQL_USERNAME={{harbor_db_username}} POSTGRESQL_PASSWORD={{harbor_db_password}} POSTGRESQL_DATABASE={{harbor_db_name}} POSTGRESQL_SSLMODE={{harbor_db_sslmode}} +POSTGRESQL_MAX_IDLE_CONNS={{harbor_db_max_idle_conns}} +POSTGRESQL_MAX_OPEN_CONNS={{harbor_db_max_open_conns}} REGISTRY_URL={{registry_url}} TOKEN_SERVICE_URL={{token_service_url}} HARBOR_ADMIN_PASSWORD={{harbor_admin_password}} @@ -41,3 +43,7 @@ RELOAD_KEY={{reload_key}} CHART_REPOSITORY_URL={{chart_repository_url}} REGISTRY_CONTROLLER_URL={{registry_controller_url}} WITH_CHARTMUSEUM={{with_chartmuseum}} + +HTTP_PROXY={{core_http_proxy}} +HTTPS_PROXY={{core_https_proxy}} +NO_PROXY={{core_no_proxy}} diff --git a/make/photon/prepare/templates/docker_compose/docker-compose.yml.jinja b/make/photon/prepare/templates/docker_compose/docker-compose.yml.jinja index 9e70cc8de..cb6785766 100644 --- a/make/photon/prepare/templates/docker_compose/docker-compose.yml.jinja +++ b/make/photon/prepare/templates/docker_compose/docker-compose.yml.jinja @@ -276,12 +276,7 @@ services: volumes: - ./common/config/nginx:/etc/nginx:z {% if protocol == 'https' %} - - type: bind - source: {{cert_key_path}} - target: /etc/cert/server.key - - type: bind - source: {{cert_path}} - target: /etc/cert/server.crt + - {{data_volume}}/secret/cert:/etc/cert:z {% endif %} networks: - harbor diff --git a/make/photon/prepare/templates/jobservice/env.jinja b/make/photon/prepare/templates/jobservice/env.jinja index d9e32c521..c38534f02 100644 --- a/make/photon/prepare/templates/jobservice/env.jinja +++ b/make/photon/prepare/templates/jobservice/env.jinja @@ -2,3 +2,7 @@ CORE_SECRET={{core_secret}} JOBSERVICE_SECRET={{jobservice_secret}} CORE_URL={{core_url}} JOBSERVICE_WEBHOOK_JOB_MAX_RETRY={{notification_webhook_job_max_retry}} + +HTTP_PROXY={{jobservice_http_proxy}} +HTTPS_PROXY={{jobservice_https_proxy}} +NO_PROXY={{jobservice_no_proxy}} diff --git a/make/photon/prepare/utils/configs.py b/make/photon/prepare/utils/configs.py index c57856845..df14a53de 100644 --- a/make/photon/prepare/utils/configs.py +++ b/make/photon/prepare/utils/configs.py @@ -112,6 +112,11 @@ def parse_yaml_config(config_file_path): config_dict['harbor_db_username'] = 'postgres' config_dict['harbor_db_password'] = db_configs.get("password") or '' config_dict['harbor_db_sslmode'] = 'disable' + + default_max_idle_conns = 2 # NOTE: https://golang.org/pkg/database/sql/#DB.SetMaxIdleConns + default_max_open_conns = 0 # NOTE: https://golang.org/pkg/database/sql/#DB.SetMaxOpenConns + config_dict['harbor_db_max_idle_conns'] = db_configs.get("max_idle_conns") or default_max_idle_conns + config_dict['harbor_db_max_open_conns'] = db_configs.get("max_open_conns") or default_max_open_conns # clari db config_dict['clair_db_host'] = 'postgresql' config_dict['clair_db_port'] = 5432 @@ -171,13 +176,18 @@ def parse_yaml_config(config_file_path): if storage_config.get('redirect'): config_dict['storage_redirect_disabled'] = storage_config['redirect']['disabled'] + # Global proxy configs + proxy_config = configs.get('proxy') or {} + proxy_components = proxy_config.get('components') or [] + for proxy_component in proxy_components: + config_dict[proxy_component + '_http_proxy'] = proxy_config.get('http_proxy') or '' + config_dict[proxy_component + '_https_proxy'] = proxy_config.get('https_proxy') or '' + config_dict[proxy_component + '_no_proxy'] = proxy_config.get('no_proxy') or '127.0.0.1,localhost,core,registry' + # Clair configs, optional clair_configs = configs.get("clair") or {} config_dict['clair_db'] = 'postgres' config_dict['clair_updaters_interval'] = clair_configs.get("updaters_interval") or 12 - config_dict['clair_http_proxy'] = clair_configs.get('http_proxy') or '' - config_dict['clair_https_proxy'] = clair_configs.get('https_proxy') or '' - config_dict['clair_no_proxy'] = clair_configs.get('no_proxy') or '127.0.0.1,localhost,core,registry' # Chart configs chart_configs = configs.get("chart") or {} @@ -286,4 +296,4 @@ def parse_yaml_config(config_file_path): # UAA configs config_dict['uaa'] = configs.get('uaa') or {} - return config_dict \ No newline at end of file + return config_dict diff --git a/make/photon/prepare/utils/nginx.py b/make/photon/prepare/utils/nginx.py index 74fc3deab..0d1117448 100644 --- a/make/photon/prepare/utils/nginx.py +++ b/make/photon/prepare/utils/nginx.py @@ -2,11 +2,13 @@ import os, shutil from fnmatch import fnmatch from pathlib import Path -from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID +from g import config_dir, templates_dir, host_root_dir, DEFAULT_GID, DEFAULT_UID, data_dir from utils.misc import prepare_dir, mark_file from utils.jinja import render_jinja from utils.cert import SSL_CERT_KEY_PATH, SSL_CERT_PATH +host_ngx_real_cert_dir = Path(os.path.join(data_dir, 'secret', 'cert')) + nginx_conf = os.path.join(config_dir, "nginx", "nginx.conf") nginx_confd_dir = os.path.join(config_dir, "nginx", "conf.d") nginx_https_conf_template = os.path.join(templates_dir, "nginx", "nginx.https.conf.jinja") @@ -20,8 +22,38 @@ def prepare_nginx(config_dict): prepare_dir(nginx_confd_dir, uid=DEFAULT_UID, gid=DEFAULT_GID) render_nginx_template(config_dict) + +def prepare_nginx_certs(cert_key_path, cert_path): + """ + Prepare the certs file with proper ownership + 1. Remove nginx cert files in secret dir + 2. Copy cert files on host filesystem to secret dir + 3. Change the permission to 644 and ownership to 10000:10000 + """ + host_ngx_cert_key_path = Path(os.path.join(host_root_dir, cert_key_path.lstrip('/'))) + host_ngx_cert_path = Path(os.path.join(host_root_dir, cert_path.lstrip('/'))) + + if host_ngx_real_cert_dir.exists() and host_ngx_real_cert_dir.is_dir(): + shutil.rmtree(host_ngx_real_cert_dir) + + os.makedirs(host_ngx_real_cert_dir, mode=0o755) + real_key_path = os.path.join(host_ngx_real_cert_dir, 'server.key') + real_crt_path = os.path.join(host_ngx_real_cert_dir, 'server.crt') + shutil.copy2(host_ngx_cert_key_path, real_key_path) + shutil.copy2(host_ngx_cert_path, real_crt_path) + + os.chown(host_ngx_real_cert_dir, uid=DEFAULT_UID, gid=DEFAULT_GID) + mark_file(real_key_path, uid=DEFAULT_UID, gid=DEFAULT_GID) + mark_file(real_crt_path, uid=DEFAULT_UID, gid=DEFAULT_GID) + + def render_nginx_template(config_dict): - if config_dict['protocol'] == "https": + """ + 1. render nginx config file through protocol + 2. copy additional configs to cert.d dir + """ + if config_dict['protocol'] == 'https': + prepare_nginx_certs(config_dict['cert_key_path'], config_dict['cert_path']) render_jinja( nginx_https_conf_template, nginx_conf, @@ -30,12 +62,7 @@ def render_nginx_template(config_dict): ssl_cert=SSL_CERT_PATH, ssl_cert_key=SSL_CERT_KEY_PATH) location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTPS - cert_dir = Path(os.path.join(config_dir, 'cert')) - ssl_key_path = Path(os.path.join(cert_dir, 'server.key')) - ssl_crt_path = Path(os.path.join(cert_dir, 'server.crt')) - cert_dir.mkdir(parents=True, exist_ok=True) - ssl_key_path.touch() - ssl_crt_path.touch() + else: render_jinja( nginx_http_conf_template, @@ -45,22 +72,23 @@ def render_nginx_template(config_dict): location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTP copy_nginx_location_configs_if_exist(nginx_template_ext_dir, nginx_confd_dir, location_file_pattern) -def add_additional_location_config(src, dst): - """ - These conf files is used for user that wanna add additional customized locations to harbor proxy - :params src: source of the file - :params dst: destination file path - """ - if not os.path.isfile(src): - return - print("Copying nginx configuration file {src} to {dst}".format( - src=src, dst=dst)) - shutil.copy2(src, dst) - mark_file(dst, mode=0o644) def copy_nginx_location_configs_if_exist(src_config_dir, dst_config_dir, filename_pattern): if not os.path.exists(src_config_dir): return + + def add_additional_location_config(src, dst): + """ + These conf files is used for user that wanna add additional customized locations to harbor proxy + :params src: source of the file + :params dst: destination file path + """ + if not os.path.isfile(src): + return + print("Copying nginx configuration file {src} to {dst}".format(src=src, dst=dst)) + shutil.copy2(src, dst) + mark_file(dst, mode=0o644) + map(lambda filename: add_additional_location_config( os.path.join(src_config_dir, filename), os.path.join(dst_config_dir, filename)), diff --git a/make/prepare b/make/prepare index 28d570c92..eada07a40 100755 --- a/make/prepare +++ b/make/prepare @@ -50,6 +50,7 @@ docker run --rm -v $input_dir:/input:z \ -v $harbor_prepare_path:/compose_location:z \ -v $config_dir:/config:z \ -v $secret_dir:/secret:z \ + -v /:/hostfs:z \ goharbor/prepare:dev $@ echo "Clean up the input dir" diff --git a/src/common/config/manager.go b/src/common/config/manager.go index 0df6eaa47..3886f160f 100644 --- a/src/common/config/manager.go +++ b/src/common/config/manager.go @@ -210,12 +210,14 @@ func (c *CfgManager) GetDatabaseCfg() *models.Database { return &models.Database{ Type: c.Get(common.DatabaseType).GetString(), PostGreSQL: &models.PostGreSQL{ - Host: c.Get(common.PostGreSQLHOST).GetString(), - Port: c.Get(common.PostGreSQLPort).GetInt(), - Username: c.Get(common.PostGreSQLUsername).GetString(), - Password: c.Get(common.PostGreSQLPassword).GetString(), - Database: c.Get(common.PostGreSQLDatabase).GetString(), - SSLMode: c.Get(common.PostGreSQLSSLMode).GetString(), + Host: c.Get(common.PostGreSQLHOST).GetString(), + Port: c.Get(common.PostGreSQLPort).GetInt(), + Username: c.Get(common.PostGreSQLUsername).GetString(), + Password: c.Get(common.PostGreSQLPassword).GetString(), + Database: c.Get(common.PostGreSQLDatabase).GetString(), + SSLMode: c.Get(common.PostGreSQLSSLMode).GetString(), + MaxIdleConns: c.Get(common.PostGreSQLMaxIdleConns).GetInt(), + MaxOpenConns: c.Get(common.PostGreSQLMaxOpenConns).GetInt(), }, } } diff --git a/src/common/config/metadata/metadatalist.go b/src/common/config/metadata/metadatalist.go index 3aa42f619..7106a38c6 100644 --- a/src/common/config/metadata/metadatalist.go +++ b/src/common/config/metadata/metadatalist.go @@ -116,6 +116,8 @@ var ( {Name: common.PostGreSQLPort, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_PORT", DefaultValue: "5432", ItemType: &PortType{}, Editable: false}, {Name: common.PostGreSQLSSLMode, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_SSLMODE", DefaultValue: "disable", ItemType: &StringType{}, Editable: false}, {Name: common.PostGreSQLUsername, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_USERNAME", DefaultValue: "postgres", ItemType: &StringType{}, Editable: false}, + {Name: common.PostGreSQLMaxIdleConns, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_MAX_IDLE_CONNS", DefaultValue: "2", ItemType: &IntType{}, Editable: false}, + {Name: common.PostGreSQLMaxOpenConns, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_MAX_OPEN_CONNS", DefaultValue: "0", ItemType: &IntType{}, Editable: false}, {Name: common.ProjectCreationRestriction, Scope: UserScope, Group: BasicGroup, EnvKey: "PROJECT_CREATION_RESTRICTION", DefaultValue: common.ProCrtRestrEveryone, ItemType: &ProjectCreationRestrictionType{}, Editable: false}, {Name: common.ReadOnly, Scope: UserScope, Group: BasicGroup, EnvKey: "READ_ONLY", DefaultValue: "false", ItemType: &BoolType{}, Editable: false}, @@ -151,6 +153,7 @@ var ( {Name: common.RobotTokenDuration, Scope: UserScope, Group: BasicGroup, EnvKey: "ROBOT_TOKEN_DURATION", DefaultValue: "43200", ItemType: &IntType{}, Editable: true}, {Name: common.NotificationEnable, Scope: UserScope, Group: BasicGroup, EnvKey: "NOTIFICATION_ENABLE", DefaultValue: "true", ItemType: &BoolType{}, Editable: true}, + {Name: common.QuotaPerProjectEnable, Scope: UserScope, Group: QuotaGroup, EnvKey: "QUOTA_PER_PROJECT_ENABLE", DefaultValue: "true", ItemType: &BoolType{}, Editable: true}, {Name: common.CountPerProject, Scope: UserScope, Group: QuotaGroup, EnvKey: "COUNT_PER_PROJECT", DefaultValue: "-1", ItemType: &QuotaType{}, Editable: true}, {Name: common.StoragePerProject, Scope: UserScope, Group: QuotaGroup, EnvKey: "STORAGE_PER_PROJECT", DefaultValue: "-1", ItemType: &QuotaType{}, Editable: true}, } diff --git a/src/common/const.go b/src/common/const.go index f2778a48e..dbb8dec57 100755 --- a/src/common/const.go +++ b/src/common/const.go @@ -53,6 +53,8 @@ const ( PostGreSQLPassword = "postgresql_password" PostGreSQLDatabase = "postgresql_database" PostGreSQLSSLMode = "postgresql_sslmode" + PostGreSQLMaxIdleConns = "postgresql_max_idle_conns" + PostGreSQLMaxOpenConns = "postgresql_max_open_conns" SelfRegistration = "self_registration" CoreURL = "core_url" CoreLocalURL = "core_local_url" @@ -146,7 +148,9 @@ const ( // Global notification enable configuration NotificationEnable = "notification_enable" + // Quota setting items for project - CountPerProject = "count_per_project" - StoragePerProject = "storage_per_project" + QuotaPerProjectEnable = "quota_per_project_enable" + CountPerProject = "count_per_project" + StoragePerProject = "storage_per_project" ) diff --git a/src/common/dao/artifact.go b/src/common/dao/artifact.go index c66930876..bac77d74b 100644 --- a/src/common/dao/artifact.go +++ b/src/common/dao/artifact.go @@ -26,6 +26,8 @@ import ( func AddArtifact(af *models.Artifact) (int64, error) { now := time.Now() af.CreationTime = now + af.PushTime = now + id, err := GetOrmer().Insert(af) if err != nil { if strings.Contains(err.Error(), "duplicate key value violates unique constraint") { @@ -36,6 +38,12 @@ func AddArtifact(af *models.Artifact) (int64, error) { return id, nil } +// UpdateArtifact ... +func UpdateArtifact(af *models.Artifact) error { + _, err := GetOrmer().Update(af) + return err +} + // UpdateArtifactDigest ... func UpdateArtifactDigest(af *models.Artifact) error { _, err := GetOrmer().Update(af, "digest") diff --git a/src/common/dao/base.go b/src/common/dao/base.go index 253b02692..43ded29ef 100644 --- a/src/common/dao/base.go +++ b/src/common/dao/base.go @@ -121,12 +121,16 @@ func getDatabase(database *models.Database) (db Database, err error) { switch database.Type { case "", "postgresql": - db = NewPGSQL(database.PostGreSQL.Host, + db = NewPGSQL( + database.PostGreSQL.Host, strconv.Itoa(database.PostGreSQL.Port), database.PostGreSQL.Username, database.PostGreSQL.Password, database.PostGreSQL.Database, - database.PostGreSQL.SSLMode) + database.PostGreSQL.SSLMode, + database.PostGreSQL.MaxIdleConns, + database.PostGreSQL.MaxOpenConns, + ) default: err = fmt.Errorf("invalid database: %s", database.Type) } @@ -139,6 +143,8 @@ var once sync.Once // GetOrmer :set ormer singleton func GetOrmer() orm.Ormer { once.Do(func() { + // override the default value(1000) to return all records when setting no limit + orm.DefaultRowsLimit = -1 globalOrm = orm.NewOrm() }) return globalOrm diff --git a/src/common/dao/blob.go b/src/common/dao/blob.go index 9a50bc3bd..ddcca42e1 100644 --- a/src/common/dao/blob.go +++ b/src/common/dao/blob.go @@ -2,11 +2,11 @@ package dao import ( "fmt" - "github.com/astaxie/beego/orm" - "github.com/goharbor/harbor/src/common/models" - "github.com/goharbor/harbor/src/common/utils/log" "strings" "time" + + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils/log" ) // AddBlob ... @@ -23,6 +23,20 @@ func AddBlob(blob *models.Blob) (int64, error) { return id, nil } +// GetOrCreateBlob returns blob by digest, create it if not exists +func GetOrCreateBlob(blob *models.Blob) (bool, *models.Blob, error) { + blob.CreationTime = time.Now() + + created, id, err := GetOrmer().ReadOrCreate(blob, "digest") + if err != nil { + return false, nil, err + } + + blob.ID = id + + return created, blob, nil +} + // GetBlob ... func GetBlob(digest string) (*models.Blob, error) { o := GetOrmer() @@ -50,15 +64,73 @@ func DeleteBlob(digest string) error { return err } -// HasBlobInProject ... -func HasBlobInProject(projectID int64, digest string) (bool, error) { - var res []orm.Params - num, err := GetOrmer().Raw(`SELECT * FROM artifact af LEFT JOIN artifact_blob afnb ON af.digest = afnb.digest_af WHERE af.project_id = ? and afnb.digest_blob = ? `, projectID, digest).Values(&res) - if err != nil { - return false, err +// GetBlobsByArtifact returns blobs of artifact +func GetBlobsByArtifact(artifactDigest string) ([]*models.Blob, error) { + sql := `SELECT * FROM blob WHERE digest IN (SELECT digest_blob FROM artifact_blob WHERE digest_af = ?)` + + var blobs []*models.Blob + if _, err := GetOrmer().Raw(sql, artifactDigest).QueryRows(&blobs); err != nil { + return nil, err } - if num == 0 { - return false, nil - } - return true, nil + + return blobs, nil +} + +// GetExclusiveBlobs returns layers of repository:tag which are not shared with other repositories in the project +func GetExclusiveBlobs(projectID int64, repository, digest string) ([]*models.Blob, error) { + blobs, err := GetBlobsByArtifact(digest) + if err != nil { + return nil, err + } + + sql := fmt.Sprintf(` +SELECT + DISTINCT b.digest_blob AS digest +FROM + ( + SELECT + digest + FROM + artifact + WHERE + ( + project_id = ? + AND repo != ? + ) + OR ( + project_id = ? + AND digest != ? + ) + ) AS a + LEFT JOIN artifact_blob b ON a.digest = b.digest_af + AND b.digest_blob IN (%s)`, ParamPlaceholderForIn(len(blobs)-1)) + + params := []interface{}{projectID, repository, projectID, digest} + for _, blob := range blobs { + if blob.Digest != digest { + params = append(params, blob.Digest) + } + } + + var rows []struct { + Digest string + } + + if _, err := GetOrmer().Raw(sql, params...).QueryRows(&rows); err != nil { + return nil, err + } + + shared := map[string]bool{} + for _, row := range rows { + shared[row.Digest] = true + } + + var exclusive []*models.Blob + for _, blob := range blobs { + if blob.Digest != digest && !shared[blob.Digest] { + exclusive = append(exclusive, blob) + } + } + + return exclusive, nil } diff --git a/src/common/dao/blob_test.go b/src/common/dao/blob_test.go index 9d5403563..26dc5e492 100644 --- a/src/common/dao/blob_test.go +++ b/src/common/dao/blob_test.go @@ -15,10 +15,15 @@ package dao import ( + "strings" + "testing" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils" + "github.com/opencontainers/go-digest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "testing" + "github.com/stretchr/testify/suite" ) func TestAddBlob(t *testing.T) { @@ -64,42 +69,154 @@ func TestDeleteBlob(t *testing.T) { require.Nil(t, err) } -func TestHasBlobInProject(t *testing.T) { - af := &models.Artifact{ - PID: 1, - Repo: "TestHasBlobInProject", - Tag: "latest", - Digest: "tttt", - Kind: "image", - } - - // add - _, err := AddArtifact(af) - require.Nil(t, err) - - afnb1 := &models.ArtifactAndBlob{ - DigestAF: "tttt", - DigestBlob: "zzza", - } - afnb2 := &models.ArtifactAndBlob{ - DigestAF: "tttt", - DigestBlob: "zzzb", - } - afnb3 := &models.ArtifactAndBlob{ - DigestAF: "tttt", - DigestBlob: "zzzc", +func prepareImage(projectID int64, projectName, name, tag string, layerDigests ...string) (string, error) { + digest := digest.FromString(strings.Join(layerDigests, ":")).String() + artifact := &models.Artifact{PID: projectID, Repo: projectName + "/" + name, Digest: digest, Tag: tag} + if _, err := AddArtifact(artifact); err != nil { + return "", err } var afnbs []*models.ArtifactAndBlob - afnbs = append(afnbs, afnb1) - afnbs = append(afnbs, afnb2) - afnbs = append(afnbs, afnb3) - // add - err = AddArtifactNBlobs(afnbs) - require.Nil(t, err) + blobDigests := append([]string{digest}, layerDigests...) + for _, blobDigest := range blobDigests { + blob := &models.Blob{Digest: blobDigest, Size: 1} + if _, _, err := GetOrCreateBlob(blob); err != nil { + return "", err + } - has, err := HasBlobInProject(1, "zzzb") - require.Nil(t, err) - assert.True(t, has) + afnbs = append(afnbs, &models.ArtifactAndBlob{DigestAF: digest, DigestBlob: blobDigest}) + } + + total, err := GetTotalOfArtifacts(&models.ArtifactQuery{Digest: digest}) + if err != nil { + return "", err + } + + if total == 1 { + if err := AddArtifactNBlobs(afnbs); err != nil { + return "", err + } + } + + return digest, nil +} + +func withProject(f func(int64, string)) { + projectName := utils.GenerateRandomString() + + projectID, err := AddProject(models.Project{ + Name: projectName, + OwnerID: 1, + }) + if err != nil { + panic(err) + } + + defer func() { + DeleteProject(projectID) + }() + + f(projectID, projectName) +} + +type GetExclusiveBlobsSuite struct { + suite.Suite +} + +func (suite *GetExclusiveBlobsSuite) mustPrepareImage(projectID int64, projectName, name, tag string, layerDigests ...string) string { + digest, err := prepareImage(projectID, projectName, name, tag, layerDigests...) + suite.Nil(err) + + return digest +} + +func (suite *GetExclusiveBlobsSuite) TestInSameRepository() { + withProject(func(projectID int64, projectName string) { + digest1 := digest.FromString(utils.GenerateRandomString()).String() + digest2 := digest.FromString(utils.GenerateRandomString()).String() + digest3 := digest.FromString(utils.GenerateRandomString()).String() + + manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2) + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) { + suite.Len(blobs, 2) + } + + manifest2 := suite.mustPrepareImage(projectID, projectName, "mysql", "8.0", digest1, digest2) + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) { + suite.Len(blobs, 2) + } + + manifest3 := suite.mustPrepareImage(projectID, projectName, "mysql", "dev", digest1, digest2, digest3) + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) { + suite.Len(blobs, 0) + } + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) { + suite.Len(blobs, 0) + } + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest3); suite.Nil(err) { + suite.Len(blobs, 1) + suite.Equal(digest3, blobs[0].Digest) + } + }) +} + +func (suite *GetExclusiveBlobsSuite) TestInDifferentRepositories() { + withProject(func(projectID int64, projectName string) { + digest1 := digest.FromString(utils.GenerateRandomString()).String() + digest2 := digest.FromString(utils.GenerateRandomString()).String() + digest3 := digest.FromString(utils.GenerateRandomString()).String() + + manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2) + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) { + suite.Len(blobs, 2) + } + + manifest2 := suite.mustPrepareImage(projectID, projectName, "mariadb", "latest", digest1, digest2) + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) { + suite.Len(blobs, 0) + } + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mariadb", manifest2); suite.Nil(err) { + suite.Len(blobs, 0) + } + + manifest3 := suite.mustPrepareImage(projectID, projectName, "mysql", "dev", digest1, digest2, digest3) + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) { + suite.Len(blobs, 0) + } + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) { + suite.Len(blobs, 0) + } + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest3); suite.Nil(err) { + suite.Len(blobs, 1) + suite.Equal(digest3, blobs[0].Digest) + } + }) +} + +func (suite *GetExclusiveBlobsSuite) TestInDifferentProjects() { + withProject(func(projectID int64, projectName string) { + digest1 := digest.FromString(utils.GenerateRandomString()).String() + digest2 := digest.FromString(utils.GenerateRandomString()).String() + + manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2) + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) { + suite.Len(blobs, 2) + } + + withProject(func(id int64, name string) { + manifest2 := suite.mustPrepareImage(id, name, "mysql", "latest", digest1, digest2) + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) { + suite.Len(blobs, 2) + } + if blobs, err := GetExclusiveBlobs(id, name+"/mysql", manifest2); suite.Nil(err) { + suite.Len(blobs, 2) + } + }) + + }) +} + +func TestRunGetExclusiveBlobsSuite(t *testing.T) { + suite.Run(t, new(GetExclusiveBlobsSuite)) } diff --git a/src/common/dao/pgsql.go b/src/common/dao/pgsql.go index e1b3da6cb..bf98c6b08 100644 --- a/src/common/dao/pgsql.go +++ b/src/common/dao/pgsql.go @@ -31,12 +31,14 @@ import ( const defaultMigrationPath = "migrations/postgresql/" type pgsql struct { - host string - port string - usr string - pwd string - database string - sslmode string + host string + port string + usr string + pwd string + database string + sslmode string + maxIdleConns int + maxOpenConns int } // Name returns the name of PostgreSQL @@ -51,17 +53,19 @@ func (p *pgsql) String() string { } // NewPGSQL returns an instance of postgres -func NewPGSQL(host string, port string, usr string, pwd string, database string, sslmode string) Database { +func NewPGSQL(host string, port string, usr string, pwd string, database string, sslmode string, maxIdleConns int, maxOpenConns int) Database { if len(sslmode) == 0 { sslmode = "disable" } return &pgsql{ - host: host, - port: port, - usr: usr, - pwd: pwd, - database: database, - sslmode: sslmode, + host: host, + port: port, + usr: usr, + pwd: pwd, + database: database, + sslmode: sslmode, + maxIdleConns: maxIdleConns, + maxOpenConns: maxOpenConns, } } @@ -82,7 +86,7 @@ func (p *pgsql) Register(alias ...string) error { info := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=%s", p.host, p.port, p.usr, p.pwd, p.database, p.sslmode) - return orm.RegisterDataBase(an, "postgres", info) + return orm.RegisterDataBase(an, "postgres", info, p.maxIdleConns, p.maxOpenConns) } // UpgradeSchema calls migrate tool to upgrade schema to the latest based on the SQL scripts. diff --git a/src/common/dao/pro_meta.go b/src/common/dao/pro_meta.go index d4a9c4e6f..a6593e2ef 100644 --- a/src/common/dao/pro_meta.go +++ b/src/common/dao/pro_meta.go @@ -44,7 +44,7 @@ func DeleteProjectMetadata(projectID int64, name ...string) error { params = append(params, projectID) if len(name) > 0 { - sql += fmt.Sprintf(` and name in ( %s )`, paramPlaceholder(len(name))) + sql += fmt.Sprintf(` and name in ( %s )`, ParamPlaceholderForIn(len(name))) params = append(params, name) } @@ -74,7 +74,7 @@ func GetProjectMetadata(projectID int64, name ...string) ([]*models.ProjectMetad params = append(params, projectID) if len(name) > 0 { - sql += fmt.Sprintf(` and name in ( %s )`, paramPlaceholder(len(name))) + sql += fmt.Sprintf(` and name in ( %s )`, ParamPlaceholderForIn(len(name))) params = append(params, name) } @@ -82,7 +82,9 @@ func GetProjectMetadata(projectID int64, name ...string) ([]*models.ProjectMetad return proMetas, err } -func paramPlaceholder(n int) string { +// ParamPlaceholderForIn returns a string that contains placeholders for sql keyword "in" +// e.g. n=3, returns "?,?,?" +func ParamPlaceholderForIn(n int) string { placeholders := []string{} for i := 0; i < n; i++ { placeholders = append(placeholders, "?") diff --git a/src/common/dao/project.go b/src/common/dao/project.go index b3066bcf1..e027ec221 100644 --- a/src/common/dao/project.go +++ b/src/common/dao/project.go @@ -167,9 +167,10 @@ func GetGroupProjects(groupIDs []int, query *models.ProjectQueryParam) ([]*model from project p left join project_member pm on p.project_id = pm.project_id left join user_group ug on ug.id = pm.entity_id and pm.entity_type = 'g' - where ug.id in ( %s ) order by name`, + where ug.id in ( %s )`, sql, groupIDCondition) } + sql = sql + ` order by name` sqlStr, queryParams := CreatePagination(query, sql, params) log.Debugf("query sql:%v", sql) var projects []*models.Project @@ -259,7 +260,7 @@ func projectQueryConditions(query *models.ProjectQueryParam) (string, []interfac } if len(query.ProjectIDs) > 0 { sql += fmt.Sprintf(` and p.project_id in ( %s )`, - paramPlaceholder(len(query.ProjectIDs))) + ParamPlaceholderForIn(len(query.ProjectIDs))) params = append(params, query.ProjectIDs) } return sql, params diff --git a/src/common/dao/project_blob.go b/src/common/dao/project_blob.go new file mode 100644 index 000000000..b6ade9938 --- /dev/null +++ b/src/common/dao/project_blob.go @@ -0,0 +1,122 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "fmt" + "time" + + "github.com/goharbor/harbor/src/common/models" +) + +// AddBlobToProject ... +func AddBlobToProject(blobID, projectID int64) (int64, error) { + pb := &models.ProjectBlob{ + BlobID: blobID, + ProjectID: projectID, + CreationTime: time.Now(), + } + + _, id, err := GetOrmer().ReadOrCreate(pb, "blob_id", "project_id") + return id, err +} + +// AddBlobsToProject ... +func AddBlobsToProject(projectID int64, blobs ...*models.Blob) (int64, error) { + if len(blobs) == 0 { + return 0, nil + } + + now := time.Now() + + var projectBlobs []*models.ProjectBlob + for _, blob := range blobs { + projectBlobs = append(projectBlobs, &models.ProjectBlob{ + BlobID: blob.ID, + ProjectID: projectID, + CreationTime: now, + }) + } + + return GetOrmer().InsertMulti(len(projectBlobs), projectBlobs) +} + +// RemoveBlobsFromProject ... +func RemoveBlobsFromProject(projectID int64, blobs ...*models.Blob) error { + var blobIDs []interface{} + for _, blob := range blobs { + blobIDs = append(blobIDs, blob.ID) + } + + if len(blobIDs) == 0 { + return nil + } + + sql := fmt.Sprintf(`DELETE FROM project_blob WHERE blob_id IN (%s)`, ParamPlaceholderForIn(len(blobIDs))) + + _, err := GetOrmer().Raw(sql, blobIDs).Exec() + return err +} + +// HasBlobInProject ... +func HasBlobInProject(projectID int64, digest string) (bool, error) { + sql := `SELECT COUNT(*) FROM project_blob JOIN blob ON project_blob.blob_id = blob.id AND project_id = ? AND digest = ?` + + var count int64 + if err := GetOrmer().Raw(sql, projectID, digest).QueryRow(&count); err != nil { + return false, err + } + + return count > 0, nil +} + +// GetBlobsNotInProject returns blobs not in project +func GetBlobsNotInProject(projectID int64, blobDigests ...string) ([]*models.Blob, error) { + if len(blobDigests) == 0 { + return nil, nil + } + + sql := fmt.Sprintf("SELECT * FROM blob WHERE id NOT IN (SELECT blob_id FROM project_blob WHERE project_id = ?) AND digest IN (%s)", + ParamPlaceholderForIn(len(blobDigests))) + + params := []interface{}{projectID} + for _, digest := range blobDigests { + params = append(params, digest) + } + + var blobs []*models.Blob + if _, err := GetOrmer().Raw(sql, params...).QueryRows(&blobs); err != nil { + return nil, err + } + + return blobs, nil +} + +// CountSizeOfProject ... +func CountSizeOfProject(pid int64) (int64, error) { + var blobs []models.Blob + + _, err := GetOrmer().Raw(`SELECT bb.id, bb.digest, bb.content_type, bb.size, bb.creation_time FROM project_blob pb LEFT JOIN blob bb ON pb.blob_id = bb.id WHERE pb.project_id = ? `, pid).QueryRows(&blobs) + if err != nil { + return 0, err + } + + var size int64 + for _, blob := range blobs { + size += blob.Size + } + + return size, err +} diff --git a/src/common/dao/project_blob_test.go b/src/common/dao/project_blob_test.go new file mode 100644 index 000000000..3d3643aee --- /dev/null +++ b/src/common/dao/project_blob_test.go @@ -0,0 +1,68 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "testing" + + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils" + "github.com/opencontainers/go-digest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHasBlobInProject(t *testing.T) { + _, blob, err := GetOrCreateBlob(&models.Blob{ + Digest: digest.FromString(utils.GenerateRandomString()).String(), + Size: 100, + }) + require.Nil(t, err) + + _, err = AddBlobToProject(blob.ID, 1) + require.Nil(t, err) + + has, err := HasBlobInProject(1, blob.Digest) + require.Nil(t, err) + assert.True(t, has) +} + +func TestCountSizeOfProject(t *testing.T) { + id1, err := AddBlob(&models.Blob{ + Digest: "CountSizeOfProject_blob1", + Size: 101, + }) + require.Nil(t, err) + + id2, err := AddBlob(&models.Blob{ + Digest: "CountSizeOfProject_blob2", + Size: 202, + }) + require.Nil(t, err) + + pid1, err := AddProject(models.Project{ + Name: "CountSizeOfProject_project1", + OwnerID: 1, + }) + require.Nil(t, err) + + _, err = AddBlobToProject(id1, pid1) + require.Nil(t, err) + _, err = AddBlobToProject(id2, pid1) + require.Nil(t, err) + + pSize, err := CountSizeOfProject(pid1) + assert.Equal(t, pSize, int64(303)) +} diff --git a/src/common/dao/quota.go b/src/common/dao/quota.go index 6cf130d3d..c86c53797 100644 --- a/src/common/dao/quota.go +++ b/src/common/dao/quota.go @@ -193,7 +193,7 @@ func quotaQueryConditions(query ...*models.QuotaQuery) (string, []interface{}) { } if len(q.ReferenceIDs) != 0 { - sql += fmt.Sprintf(`AND a.reference_id IN (%s) `, paramPlaceholder(len(q.ReferenceIDs))) + sql += fmt.Sprintf(`AND a.reference_id IN (%s) `, ParamPlaceholderForIn(len(q.ReferenceIDs))) params = append(params, q.ReferenceIDs) } diff --git a/src/common/dao/quota_usage.go b/src/common/dao/quota_usage.go index 8e2f7ca48..d8b55db9b 100644 --- a/src/common/dao/quota_usage.go +++ b/src/common/dao/quota_usage.go @@ -111,7 +111,7 @@ func quotaUsageQueryConditions(query ...*models.QuotaUsageQuery) (string, []inte params = append(params, q.ReferenceID) } if len(q.ReferenceIDs) != 0 { - sql += fmt.Sprintf(`and reference_id in (%s) `, paramPlaceholder(len(q.ReferenceIDs))) + sql += fmt.Sprintf(`and reference_id in (%s) `, ParamPlaceholderForIn(len(q.ReferenceIDs))) params = append(params, q.ReferenceIDs) } diff --git a/src/common/dao/repository.go b/src/common/dao/repository.go index c05a46899..abb859525 100644 --- a/src/common/dao/repository.go +++ b/src/common/dao/repository.go @@ -178,7 +178,7 @@ func repositoryQueryConditions(query ...*models.RepositoryQuery) (string, []inte if len(q.ProjectIDs) > 0 { sql += fmt.Sprintf(`and r.project_id in ( %s ) `, - paramPlaceholder(len(q.ProjectIDs))) + ParamPlaceholderForIn(len(q.ProjectIDs))) params = append(params, q.ProjectIDs) } diff --git a/src/common/models/base.go b/src/common/models/base.go index 3dcb5869c..de04d0285 100644 --- a/src/common/models/base.go +++ b/src/common/models/base.go @@ -40,6 +40,7 @@ func init() { new(NotificationPolicy), new(NotificationJob), new(Blob), + new(ProjectBlob), new(Artifact), new(ArtifactAndBlob), new(CVEWhitelist), diff --git a/src/common/models/config.go b/src/common/models/config.go index b8c7a0e6b..dfd13d4bb 100644 --- a/src/common/models/config.go +++ b/src/common/models/config.go @@ -45,12 +45,14 @@ type SQLite struct { // PostGreSQL ... type PostGreSQL struct { - Host string `json:"host"` - Port int `json:"port"` - Username string `json:"username"` - Password string `json:"password,omitempty"` - Database string `json:"database"` - SSLMode string `json:"sslmode"` + Host string `json:"host"` + Port int `json:"port"` + Username string `json:"username"` + Password string `json:"password,omitempty"` + Database string `json:"database"` + SSLMode string `json:"sslmode"` + MaxIdleConns int `json:"max_idle_conns"` + MaxOpenConns int `json:"max_open_conns"` } // Email ... diff --git a/src/core/middlewares/util/reginteceptor.go b/src/common/models/project_blob.go similarity index 56% rename from src/core/middlewares/util/reginteceptor.go rename to src/common/models/project_blob.go index 902b66f0a..119dadbc0 100644 --- a/src/core/middlewares/util/reginteceptor.go +++ b/src/common/models/project_blob.go @@ -12,17 +12,21 @@ // See the License for the specific language governing permissions and // limitations under the License. -package util +package models import ( - "net/http" + "time" ) -// RegInterceptor ... -type RegInterceptor interface { - // HandleRequest ... - HandleRequest(req *http.Request) error - - // HandleResponse won't return any error - HandleResponse(rw CustomResponseWriter, req *http.Request) +// ProjectBlob holds the relationship between manifest and blob. +type ProjectBlob struct { + ID int64 `orm:"pk;auto;column(id)" json:"id"` + ProjectID int64 `orm:"column(project_id)" json:"project_id"` + BlobID int64 `orm:"column(blob_id)" json:"blob_id"` + CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"` +} + +// TableName ... +func (*ProjectBlob) TableName() string { + return "project_blob" } diff --git a/src/common/quota/manager.go b/src/common/quota/manager.go index 43d70777b..9e477f680 100644 --- a/src/common/quota/manager.go +++ b/src/common/quota/manager.go @@ -176,16 +176,63 @@ func (m *Manager) DeleteQuota() error { // UpdateQuota update the quota resource spec func (m *Manager) UpdateQuota(hardLimits types.ResourceList) error { + o := dao.GetOrmer() if err := m.driver.Validate(hardLimits); err != nil { return err } sql := `UPDATE quota SET hard = ? WHERE reference = ? AND reference_id = ?` - _, err := dao.GetOrmer().Raw(sql, hardLimits.String(), m.reference, m.referenceID).Exec() + _, err := o.Raw(sql, hardLimits.String(), m.reference, m.referenceID).Exec() return err } +// EnsureQuota ensures the reference has quota and usage, +// if non-existent, will create new quota and usage. +// if existent, update the quota and usage. +func (m *Manager) EnsureQuota(usages types.ResourceList) error { + query := &models.QuotaQuery{ + Reference: m.reference, + ReferenceID: m.referenceID, + } + quotas, err := dao.ListQuotas(query) + if err != nil { + return err + } + + // non-existent: create quota and usage + defaultHardLimit := m.driver.HardLimits() + if len(quotas) == 0 { + _, err := m.NewQuota(defaultHardLimit, usages) + if err != nil { + return err + } + return nil + } + + // existent + used := usages + quotaUsed, err := types.NewResourceList(quotas[0].Used) + if types.Equals(quotaUsed, used) { + return nil + } + dao.WithTransaction(func(o orm.Ormer) error { + usage, err := m.getUsageForUpdate(o) + if err != nil { + return err + } + usage.Used = used.String() + usage.UpdateTime = time.Now() + _, err = o.Update(usage) + if err != nil { + return err + } + return nil + }) + + return nil +} + // AddResources add resources to usage func (m *Manager) AddResources(resources types.ResourceList) error { return dao.WithTransaction(func(o orm.Ormer) error { diff --git a/src/common/quota/manager_test.go b/src/common/quota/manager_test.go index 7de96d998..fde4b7d82 100644 --- a/src/common/quota/manager_test.go +++ b/src/common/quota/manager_test.go @@ -21,6 +21,7 @@ import ( "testing" "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/common/quota/driver" "github.com/goharbor/harbor/src/common/quota/driver/mocks" "github.com/goharbor/harbor/src/pkg/types" @@ -131,6 +132,48 @@ func (suite *ManagerSuite) TestUpdateQuota() { } } +func (suite *ManagerSuite) TestEnsureQuota() { + // non-existent + nonExistRefID := "3" + mgr := suite.quotaManager(nonExistRefID) + infinite := types.ResourceList{types.ResourceCount: -1, types.ResourceStorage: -1} + usage := types.ResourceList{types.ResourceCount: 10, types.ResourceStorage: 10} + err := mgr.EnsureQuota(usage) + suite.Nil(err) + query := &models.QuotaQuery{ + Reference: reference, + ReferenceID: nonExistRefID, + } + quotas, err := dao.ListQuotas(query) + suite.Nil(err) + suite.Equal(usage, mustResourceList(quotas[0].Used)) + suite.Equal(infinite, mustResourceList(quotas[0].Hard)) + + // existent + existRefID := "4" + mgr = suite.quotaManager(existRefID) + used := types.ResourceList{types.ResourceCount: 11, types.ResourceStorage: 11} + if id, err := mgr.NewQuota(hardLimits, used); suite.Nil(err) { + quota, _ := dao.GetQuota(id) + suite.Equal(hardLimits, mustResourceList(quota.Hard)) + + usage, _ := dao.GetQuotaUsage(id) + suite.Equal(used, mustResourceList(usage.Used)) + } + + usage2 := types.ResourceList{types.ResourceCount: 12, types.ResourceStorage: 12} + err = mgr.EnsureQuota(usage2) + suite.Nil(err) + query2 := &models.QuotaQuery{ + Reference: reference, + ReferenceID: existRefID, + } + quotas2, err := dao.ListQuotas(query2) + suite.Equal(usage2, mustResourceList(quotas2[0].Used)) + suite.Equal(hardLimits, mustResourceList(quotas2[0].Hard)) + +} + func (suite *ManagerSuite) TestQuotaAutoCreation() { for i := 0; i < 10; i++ { mgr := suite.quotaManager(fmt.Sprintf("%d", i)) diff --git a/src/common/utils/registry/repository.go b/src/common/utils/registry/repository.go index f7304499c..7a4a1c6c7 100644 --- a/src/common/utils/registry/repository.go +++ b/src/common/utils/registry/repository.go @@ -25,11 +25,9 @@ import ( "sort" "strconv" "strings" - // "time" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" - commonhttp "github.com/goharbor/harbor/src/common/http" "github.com/goharbor/harbor/src/common/utils" ) @@ -407,6 +405,7 @@ func (r *Repository) monolithicBlobUpload(location, digest string, size int64, d if err != nil { return err } + req.ContentLength = size resp, err := r.client.Do(req) if err != nil { diff --git a/src/core/api/harborapi_test.go b/src/core/api/harborapi_test.go index 5357a6579..f76530f88 100644 --- a/src/core/api/harborapi_test.go +++ b/src/core/api/harborapi_test.go @@ -202,6 +202,8 @@ func init() { beego.Router("/api/quotas", quotaAPIType, "get:List") beego.Router("/api/quotas/:id([0-9]+)", quotaAPIType, "get:Get;put:Put") + beego.Router("/api/internal/switchquota", &InternalAPI{}, "put:SwitchQuota") + // syncRegistry if err := SyncRegistry(config.GlobalProjectMgr); err != nil { log.Fatalf("failed to sync repositories from registry: %v", err) diff --git a/src/core/api/internal.go b/src/core/api/internal.go index 71f1f317e..08fa377ed 100644 --- a/src/core/api/internal.go +++ b/src/core/api/internal.go @@ -15,12 +15,16 @@ package api import ( - "errors" - + "fmt" "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common/dao" "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/quota" "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/jobservice/logger" + "github.com/pkg/errors" + "strconv" ) // InternalAPI handles request of harbor admin... @@ -69,3 +73,78 @@ func (ia *InternalAPI) RenameAdmin() { log.Debugf("The super user has been renamed to: %s", newName) ia.DestroySession() } + +// QuotaSwitcher ... +type QuotaSwitcher struct { + Enabled bool +} + +// SwitchQuota ... +func (ia *InternalAPI) SwitchQuota() { + var req QuotaSwitcher + if err := ia.DecodeJSONReq(&req); err != nil { + ia.SendBadRequestError(err) + return + } + // quota per project from disable to enable, it needs to update the quota usage bases on the DB records. + if !config.QuotaPerProjectEnable() && req.Enabled { + if err := ia.ensureQuota(); err != nil { + ia.SendInternalServerError(err) + return + } + } + defer func() { + config.GetCfgManager().Set(common.QuotaPerProjectEnable, req.Enabled) + config.GetCfgManager().Save() + }() + return +} + +func (ia *InternalAPI) ensureQuota() error { + projects, err := dao.GetProjects(nil) + if err != nil { + return err + } + for _, project := range projects { + pSize, err := dao.CountSizeOfProject(project.ProjectID) + if err != nil { + logger.Warningf("error happen on counting size of project:%d , error:%v, just skip it.", project.ProjectID, err) + continue + } + afQuery := &models.ArtifactQuery{ + PID: project.ProjectID, + } + afs, err := dao.ListArtifacts(afQuery) + if err != nil { + logger.Warningf("error happen on counting number of project:%d , error:%v, just skip it.", project.ProjectID, err) + continue + } + pCount := int64(len(afs)) + + // it needs to append the chart count + if config.WithChartMuseum() { + count, err := chartController.GetCountOfCharts([]string{project.Name}) + if err != nil { + err = errors.Wrap(err, fmt.Sprintf("get chart count of project %d failed", project.ProjectID)) + logger.Error(err) + continue + } + pCount = pCount + int64(count) + } + + quotaMgr, err := quota.NewManager("project", strconv.FormatInt(project.ProjectID, 10)) + if err != nil { + logger.Errorf("Error occurred when to new quota manager %v, just skip it.", err) + continue + } + used := quota.ResourceList{ + quota.ResourceStorage: pSize, + quota.ResourceCount: pCount, + } + if err := quotaMgr.EnsureQuota(used); err != nil { + logger.Errorf("cannot ensure quota for the project: %d, err: %v, just skip it.", project.ProjectID, err) + continue + } + } + return nil +} diff --git a/src/core/api/internal_test.go b/src/core/api/internal_test.go new file mode 100644 index 000000000..87f4eb8b9 --- /dev/null +++ b/src/core/api/internal_test.go @@ -0,0 +1,56 @@ +// Copyright 2018 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "net/http" + "testing" +) + +// cannot verify the real scenario here +func TestSwitchQuota(t *testing.T) { + cases := []*codeCheckingCase{ + // 401 + { + request: &testingRequest{ + method: http.MethodPut, + url: "/api/internal/switchquota", + }, + code: http.StatusUnauthorized, + }, + // 200 + { + request: &testingRequest{ + method: http.MethodPut, + url: "/api/internal/switchquota", + credential: sysAdmin, + bodyJSON: &QuotaSwitcher{ + Enabled: true, + }, + }, + code: http.StatusOK, + }, + // 403 + { + request: &testingRequest{ + url: "/api/internal/switchquota", + method: http.MethodPut, + credential: nonSysAdmin, + }, + code: http.StatusForbidden, + }, + } + runCodeCheckingCases(t, cases...) +} diff --git a/src/core/api/project.go b/src/core/api/project.go index 4a71dd316..1c98242ca 100644 --- a/src/core/api/project.go +++ b/src/core/api/project.go @@ -139,23 +139,26 @@ func (p *ProjectAPI) Post() { return } - setting, err := config.QuotaSetting() - if err != nil { - log.Errorf("failed to get quota setting: %v", err) - p.SendInternalServerError(fmt.Errorf("failed to get quota setting: %v", err)) - return - } + var hardLimits types.ResourceList + if config.QuotaPerProjectEnable() { + setting, err := config.QuotaSetting() + if err != nil { + log.Errorf("failed to get quota setting: %v", err) + p.SendInternalServerError(fmt.Errorf("failed to get quota setting: %v", err)) + return + } - if !p.SecurityCtx.IsSysAdmin() { - pro.CountLimit = &setting.CountPerProject - pro.StorageLimit = &setting.StoragePerProject - } + if !p.SecurityCtx.IsSysAdmin() { + pro.CountLimit = &setting.CountPerProject + pro.StorageLimit = &setting.StoragePerProject + } - hardLimits, err := projectQuotaHardLimits(pro, setting) - if err != nil { - log.Errorf("Invalid project request, error: %v", err) - p.SendBadRequestError(fmt.Errorf("invalid request: %v", err)) - return + hardLimits, err = projectQuotaHardLimits(pro, setting) + if err != nil { + log.Errorf("Invalid project request, error: %v", err) + p.SendBadRequestError(fmt.Errorf("invalid request: %v", err)) + return + } } exist, err := p.ProjectMgr.Exists(pro.Name) @@ -212,14 +215,16 @@ func (p *ProjectAPI) Post() { return } - quotaMgr, err := quota.NewManager("project", strconv.FormatInt(projectID, 10)) - if err != nil { - p.SendInternalServerError(fmt.Errorf("failed to get quota manager: %v", err)) - return - } - if _, err := quotaMgr.NewQuota(hardLimits); err != nil { - p.SendInternalServerError(fmt.Errorf("failed to create quota for project: %v", err)) - return + if config.QuotaPerProjectEnable() { + quotaMgr, err := quota.NewManager("project", strconv.FormatInt(projectID, 10)) + if err != nil { + p.SendInternalServerError(fmt.Errorf("failed to get quota manager: %v", err)) + return + } + if _, err := quotaMgr.NewQuota(hardLimits); err != nil { + p.SendInternalServerError(fmt.Errorf("failed to create quota for project: %v", err)) + return + } } go func() { @@ -653,6 +658,11 @@ func projectQuotaHardLimits(req *models.ProjectRequest, setting *models.QuotaSet } func getProjectQuotaSummary(projectID int64, summary *models.ProjectSummary) { + if !config.QuotaPerProjectEnable() { + log.Debug("Quota per project disabled") + return + } + quotas, err := dao.ListQuotas(&models.QuotaQuery{Reference: "project", ReferenceID: strconv.FormatInt(projectID, 10)}) if err != nil { log.Debugf("failed to get quota for project: %d", projectID) diff --git a/src/core/api/repository.go b/src/core/api/repository.go index 5da092b10..a77f02c34 100755 --- a/src/core/api/repository.go +++ b/src/core/api/repository.go @@ -595,11 +595,31 @@ func (ra *RepositoryAPI) GetTags() { tags = ts } + detail, err := ra.GetBool("detail", true) + if !detail && err == nil { + ra.Data["json"] = simpleTags(tags) + ra.ServeJSON() + return + } + ra.Data["json"] = assembleTagsInParallel(client, repoName, tags, ra.SecurityCtx.GetUsername()) ra.ServeJSON() } +func simpleTags(tags []string) []*models.TagResp { + var tagsResp []*models.TagResp + for _, tag := range tags { + tagsResp = append(tagsResp, &models.TagResp{ + TagDetail: models.TagDetail{ + Name: tag, + }, + }) + } + + return tagsResp +} + // get config, signature and scan overview and assemble them into one // struct for each tag in tags func assembleTagsInParallel(client *registry.Repository, repository string, diff --git a/src/core/api/retention.go b/src/core/api/retention.go index 7e9e113e7..1a14bfdaa 100644 --- a/src/core/api/retention.go +++ b/src/core/api/retention.go @@ -1,6 +1,7 @@ package api import ( + "encoding/json" "errors" "fmt" "net/http" @@ -41,30 +42,6 @@ func (r *RetentionAPI) GetMetadatas() { data := ` { "templates": [ - { - "rule_template": "lastXDays", - "display_text": "the images from the last # days", - "action": "retain", - "params": [ - { - "type": "int", - "unit": "DAYS", - "required": true - } - ] - }, - { - "rule_template": "latestActiveK", - "display_text": "the most recent active # images", - "action": "retain", - "params": [ - { - "type": "int", - "unit": "COUNT", - "required": true - } - ] - }, { "rule_template": "latestPushedK", "display_text": "the most recently pushed # images", @@ -90,25 +67,7 @@ func (r *RetentionAPI) GetMetadatas() { ] }, { - "rule_template": "nothing", - "display_text": "none", - "action": "retain", - "params": [] - }, - { - "rule_template": "always", - "display_text": "always", - "action": "retain", - "params": [ - { - "type": "int", - "unit": "COUNT", - "required": true - } - ] - }, - { - "rule_template": "dayspl", + "rule_template": "nDaysSinceLastPull", "display_text": "pulled within the last # days", "action": "retain", "params": [ @@ -120,7 +79,7 @@ func (r *RetentionAPI) GetMetadatas() { ] }, { - "rule_template": "daysps", + "rule_template": "nDaysSinceLastPush", "display_text": "pushed within the last # days", "action": "retain", "params": [ @@ -130,7 +89,19 @@ func (r *RetentionAPI) GetMetadatas() { "required": true } ] - } + }, + { + "rule_template": "nothing", + "display_text": "none", + "action": "retain", + "params": [] + }, + { + "rule_template": "always", + "display_text": "always", + "action": "retain", + "params": [] + } ], "scope_selectors": [ { @@ -194,6 +165,10 @@ func (r *RetentionAPI) CreateRetention() { r.SendBadRequestError(err) return } + if err = r.checkRuleConflict(p); err != nil { + r.SendConflictError(err) + return + } if !r.requireAccess(p, rbac.ActionCreate) { return } @@ -241,6 +216,10 @@ func (r *RetentionAPI) UpdateRetention() { return } p.ID = id + if err = r.checkRuleConflict(p); err != nil { + r.SendConflictError(err) + return + } if !r.requireAccess(p, rbac.ActionUpdate) { return } @@ -250,6 +229,21 @@ func (r *RetentionAPI) UpdateRetention() { } } +func (r *RetentionAPI) checkRuleConflict(p *policy.Metadata) error { + temp := make(map[string]int) + for n, rule := range p.Rules { + tid := rule.ID + rule.ID = 0 + bs, _ := json.Marshal(rule) + if old, exists := temp[string(bs)]; exists { + return fmt.Errorf("rule %d is conflict with rule %d", n, old) + } + temp[string(bs)] = tid + rule.ID = tid + } + return nil +} + // TriggerRetentionExec Trigger Retention Execution func (r *RetentionAPI) TriggerRetentionExec() { id, err := r.GetIDFromURL() diff --git a/src/core/api/retention_test.go b/src/core/api/retention_test.go index 0dee6fd13..0fe47f628 100644 --- a/src/core/api/retention_test.go +++ b/src/core/api/retention_test.go @@ -143,6 +143,87 @@ func TestCreatePolicy(t *testing.T) { }, code: http.StatusBadRequest, }, + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/retentions", + bodyJSON: &policy.Metadata{ + Algorithm: "or", + Rules: []rule.Metadata{ + { + ID: 1, + Priority: 1, + Template: "recentXdays", + Parameters: rule.Parameters{ + "num": 10, + }, + TagSelectors: []*rule.Selector{ + { + Kind: "label", + Decoration: "with", + Pattern: "latest", + }, + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: "release-[\\d\\.]+", + }, + }, + ScopeSelectors: map[string][]*rule.Selector{ + "repository": { + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: ".+", + }, + }, + }, + }, + { + ID: 2, + Priority: 1, + Template: "recentXdays", + Parameters: rule.Parameters{ + "num": 10, + }, + TagSelectors: []*rule.Selector{ + { + Kind: "label", + Decoration: "with", + Pattern: "latest", + }, + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: "release-[\\d\\.]+", + }, + }, + ScopeSelectors: map[string][]*rule.Selector{ + "repository": { + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: ".+", + }, + }, + }, + }, + }, + Trigger: &policy.Trigger{ + Kind: "Schedule", + Settings: map[string]interface{}{ + "cron": "* 22 11 * * *", + }, + }, + Scope: &policy.Scope{ + Level: "project", + Reference: 1, + }, + }, + credential: sysAdmin, + }, + code: http.StatusConflict, + }, } runCodeCheckingCases(t, cases...) @@ -267,6 +348,87 @@ func TestPolicy(t *testing.T) { }, code: http.StatusOK, }, + { + request: &testingRequest{ + method: http.MethodPut, + url: fmt.Sprintf("/api/retentions/%d", id), + bodyJSON: &policy.Metadata{ + Algorithm: "or", + Rules: []rule.Metadata{ + { + ID: 1, + Priority: 1, + Template: "recentXdays", + Parameters: rule.Parameters{ + "num": 10, + }, + TagSelectors: []*rule.Selector{ + { + Kind: "label", + Decoration: "with", + Pattern: "latest", + }, + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: "release-[\\d\\.]+", + }, + }, + ScopeSelectors: map[string][]*rule.Selector{ + "repository": { + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: "b.+", + }, + }, + }, + }, + { + ID: 2, + Priority: 1, + Template: "recentXdays", + Parameters: rule.Parameters{ + "num": 10, + }, + TagSelectors: []*rule.Selector{ + { + Kind: "label", + Decoration: "with", + Pattern: "latest", + }, + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: "release-[\\d\\.]+", + }, + }, + ScopeSelectors: map[string][]*rule.Selector{ + "repository": { + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: "b.+", + }, + }, + }, + }, + }, + Trigger: &policy.Trigger{ + Kind: "Schedule", + Settings: map[string]interface{}{ + "cron": "* 22 11 * * *", + }, + }, + Scope: &policy.Scope{ + Level: "project", + Reference: 1, + }, + }, + credential: sysAdmin, + }, + code: http.StatusConflict, + }, { request: &testingRequest{ method: http.MethodPost, diff --git a/src/core/config/config.go b/src/core/config/config.go index 57c02bad1..b3808745d 100755 --- a/src/core/config/config.go +++ b/src/core/config/config.go @@ -331,12 +331,14 @@ func Database() (*models.Database, error) { database := &models.Database{} database.Type = cfgMgr.Get(common.DatabaseType).GetString() postgresql := &models.PostGreSQL{ - Host: cfgMgr.Get(common.PostGreSQLHOST).GetString(), - Port: cfgMgr.Get(common.PostGreSQLPort).GetInt(), - Username: cfgMgr.Get(common.PostGreSQLUsername).GetString(), - Password: cfgMgr.Get(common.PostGreSQLPassword).GetString(), - Database: cfgMgr.Get(common.PostGreSQLDatabase).GetString(), - SSLMode: cfgMgr.Get(common.PostGreSQLSSLMode).GetString(), + Host: cfgMgr.Get(common.PostGreSQLHOST).GetString(), + Port: cfgMgr.Get(common.PostGreSQLPort).GetInt(), + Username: cfgMgr.Get(common.PostGreSQLUsername).GetString(), + Password: cfgMgr.Get(common.PostGreSQLPassword).GetString(), + Database: cfgMgr.Get(common.PostGreSQLDatabase).GetString(), + SSLMode: cfgMgr.Get(common.PostGreSQLSSLMode).GetString(), + MaxIdleConns: cfgMgr.Get(common.PostGreSQLMaxIdleConns).GetInt(), + MaxOpenConns: cfgMgr.Get(common.PostGreSQLMaxOpenConns).GetInt(), } database.PostGreSQL = postgresql @@ -520,6 +522,11 @@ func NotificationEnable() bool { return cfgMgr.Get(common.NotificationEnable).GetBool() } +// QuotaPerProjectEnable returns a bool to indicates if quota per project enabled in harbor +func QuotaPerProjectEnable() bool { + return cfgMgr.Get(common.QuotaPerProjectEnable).GetBool() +} + // QuotaSetting returns the setting of quota. func QuotaSetting() (*models.QuotaSetting, error) { if err := cfgMgr.Load(); err != nil { diff --git a/src/core/middlewares/chart/builder.go b/src/core/middlewares/chart/builder.go index 56a4ce2c9..19827e2d3 100644 --- a/src/core/middlewares/chart/builder.go +++ b/src/core/middlewares/chart/builder.go @@ -15,12 +15,13 @@ package chart import ( + "fmt" "net/http" "regexp" "strconv" "github.com/goharbor/harbor/src/common/dao" - "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/config" "github.com/goharbor/harbor/src/core/middlewares/interceptor" "github.com/goharbor/harbor/src/core/middlewares/interceptor/quota" "github.com/goharbor/harbor/src/core/middlewares/util" @@ -29,81 +30,82 @@ import ( var ( deleteChartVersionRe = regexp.MustCompile(`^/api/chartrepo/(?P\w+)/charts/(?P\w+)/(?P[\w\d\.]+)/?$`) - uploadChartVersionRe = regexp.MustCompile(`^/api/chartrepo/(?P\w+)/charts/?$`) + createChartVersionRe = regexp.MustCompile(`^/api/chartrepo/(?P\w+)/charts/?$`) ) var ( defaultBuilders = []interceptor.Builder{ - &deleteChartVersionBuilder{}, - &uploadChartVersionBuilder{}, + &chartVersionDeletionBuilder{}, + &chartVersionCreationBuilder{}, } ) -type deleteChartVersionBuilder struct { -} +type chartVersionDeletionBuilder struct{} -func (*deleteChartVersionBuilder) Build(req *http.Request) interceptor.Interceptor { +func (*chartVersionDeletionBuilder) Build(req *http.Request) (interceptor.Interceptor, error) { if req.Method != http.MethodDelete { - return nil + return nil, nil } matches := deleteChartVersionRe.FindStringSubmatch(req.URL.String()) if len(matches) <= 1 { - return nil + return nil, nil } namespace, chartName, version := matches[1], matches[2], matches[3] project, err := dao.GetProjectByName(namespace) if err != nil { - log.Errorf("Failed to get project %s, error: %v", namespace, err) - return nil + return nil, fmt.Errorf("failed to get project %s, error: %v", namespace, err) } if project == nil { - log.Warningf("Project %s not found", namespace) - return nil + return nil, fmt.Errorf("project %s not found", namespace) + } + + info := &util.ChartVersionInfo{ + ProjectID: project.ProjectID, + Namespace: namespace, + ChartName: chartName, + Version: version, } opts := []quota.Option{ + quota.EnforceResources(config.QuotaPerProjectEnable()), quota.WithManager("project", strconv.FormatInt(project.ProjectID, 10)), quota.WithAction(quota.SubtractAction), quota.StatusCode(http.StatusOK), - quota.MutexKeys(mutexKey(namespace, chartName, version)), + quota.MutexKeys(info.MutexKey()), quota.Resources(types.ResourceList{types.ResourceCount: 1}), } - return quota.New(opts...) + return quota.New(opts...), nil } -type uploadChartVersionBuilder struct { -} +type chartVersionCreationBuilder struct{} -func (*uploadChartVersionBuilder) Build(req *http.Request) interceptor.Interceptor { +func (*chartVersionCreationBuilder) Build(req *http.Request) (interceptor.Interceptor, error) { if req.Method != http.MethodPost { - return nil + return nil, nil } - matches := uploadChartVersionRe.FindStringSubmatch(req.URL.String()) + matches := createChartVersionRe.FindStringSubmatch(req.URL.String()) if len(matches) <= 1 { - return nil + return nil, nil } namespace := matches[1] project, err := dao.GetProjectByName(namespace) if err != nil { - log.Errorf("Failed to get project %s, error: %v", namespace, err) - return nil + return nil, fmt.Errorf("failed to get project %s, error: %v", namespace, err) } if project == nil { - log.Warningf("Project %s not found", namespace) - return nil + return nil, fmt.Errorf("project %s not found", namespace) } chart, err := parseChart(req) if err != nil { - log.Errorf("Failed to parse chart from body, error: %v", err) - return nil + return nil, fmt.Errorf("failed to parse chart from body, error: %v", err) } chartName, version := chart.Metadata.Name, chart.Metadata.Version @@ -117,12 +119,13 @@ func (*uploadChartVersionBuilder) Build(req *http.Request) interceptor.Intercept *req = *req.WithContext(util.NewChartVersionInfoContext(req.Context(), info)) opts := []quota.Option{ + quota.EnforceResources(config.QuotaPerProjectEnable()), quota.WithManager("project", strconv.FormatInt(project.ProjectID, 10)), quota.WithAction(quota.AddAction), quota.StatusCode(http.StatusCreated), - quota.MutexKeys(mutexKey(namespace, chartName, version)), - quota.OnResources(computeQuotaForUpload), + quota.MutexKeys(info.MutexKey()), + quota.OnResources(computeResourcesForChartVersionCreation), } - return quota.New(opts...) + return quota.New(opts...), nil } diff --git a/src/core/middlewares/chart/handler.go b/src/core/middlewares/chart/handler.go index edad44554..dd1fa583b 100644 --- a/src/core/middlewares/chart/handler.go +++ b/src/core/middlewares/chart/handler.go @@ -42,7 +42,13 @@ func New(next http.Handler, builders ...interceptor.Builder) http.Handler { // ServeHTTP manifest ... func (h *chartHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - interceptor := h.getInterceptor(req) + interceptor, err := h.getInterceptor(req) + if err != nil { + http.Error(rw, util.MarshalError("InternalError", fmt.Sprintf("Error occurred when to handle request in chart count quota handler: %v", err)), + http.StatusInternalServerError) + return + } + if interceptor == nil { h.next.ServeHTTP(rw, req) return @@ -61,13 +67,17 @@ func (h *chartHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { interceptor.HandleResponse(w, req) } -func (h *chartHandler) getInterceptor(req *http.Request) interceptor.Interceptor { +func (h *chartHandler) getInterceptor(req *http.Request) (interceptor.Interceptor, error) { for _, builder := range h.builders { - interceptor := builder.Build(req) + interceptor, err := builder.Build(req) + if err != nil { + return nil, err + } + if interceptor != nil { - return interceptor + return interceptor, nil } } - return nil + return nil, nil } diff --git a/src/core/middlewares/chart/util.go b/src/core/middlewares/chart/util.go index 768cf3831..03b899498 100644 --- a/src/core/middlewares/chart/util.go +++ b/src/core/middlewares/chart/util.go @@ -85,7 +85,9 @@ func chartVersionExists(namespace, chartName, version string) bool { return !chartVersion.Removed } -func computeQuotaForUpload(req *http.Request) (types.ResourceList, error) { +// computeResourcesForChartVersionCreation returns count resource required for the chart package +// no count required if the chart package of version exists in project +func computeResourcesForChartVersionCreation(req *http.Request) (types.ResourceList, error) { info, ok := util.ChartVersionInfoFromContext(req.Context()) if !ok { return nil, errors.New("chart version info missing") @@ -99,10 +101,6 @@ func computeQuotaForUpload(req *http.Request) (types.ResourceList, error) { return types.ResourceList{types.ResourceCount: 1}, nil } -func mutexKey(str ...string) string { - return "chart:" + strings.Join(str, ":") -} - func parseChart(req *http.Request) (*chart.Chart, error) { chartFile, _, err := req.FormFile(formFieldNameForChart) if err != nil { diff --git a/src/core/middlewares/countquota/builder.go b/src/core/middlewares/countquota/builder.go index fd507ce02..089c4a5d6 100644 --- a/src/core/middlewares/countquota/builder.go +++ b/src/core/middlewares/countquota/builder.go @@ -18,178 +18,83 @@ import ( "fmt" "net/http" "strconv" - "strings" - "time" "github.com/goharbor/harbor/src/common/dao" - "github.com/goharbor/harbor/src/common/models" - "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/config" "github.com/goharbor/harbor/src/core/middlewares/interceptor" "github.com/goharbor/harbor/src/core/middlewares/interceptor/quota" "github.com/goharbor/harbor/src/core/middlewares/util" - "github.com/opencontainers/go-digest" ) var ( defaultBuilders = []interceptor.Builder{ - &deleteManifestBuilder{}, - &putManifestBuilder{}, + &manifestDeletionBuilder{}, + &manifestCreationBuilder{}, } ) -type deleteManifestBuilder struct { -} +type manifestDeletionBuilder struct{} -func (*deleteManifestBuilder) Build(req *http.Request) interceptor.Interceptor { - if req.Method != http.MethodDelete { - return nil +func (*manifestDeletionBuilder) Build(req *http.Request) (interceptor.Interceptor, error) { + if match, _, _ := util.MatchDeleteManifest(req); !match { + return nil, nil } - match, name, reference := util.MatchManifestURL(req) - if !match { - return nil - } + info, ok := util.ManifestInfoFromContext(req.Context()) + if !ok { + var err error + info, err = util.ParseManifestInfoFromPath(req) + if err != nil { + return nil, fmt.Errorf("failed to parse manifest, error %v", err) + } - dgt, err := digest.Parse(reference) - if err != nil { - // Delete manifest only accept digest as reference - return nil + // Manifest info will be used by computeResourcesForDeleteManifest + *req = *(req.WithContext(util.NewManifestInfoContext(req.Context(), info))) } - projectName := strings.Split(name, "/")[0] - project, err := dao.GetProjectByName(projectName) - if err != nil { - log.Errorf("Failed to get project %s, error: %v", projectName, err) - return nil - } - if project == nil { - log.Warningf("Project %s not found", projectName) - return nil - } - - info := &util.MfInfo{ - ProjectID: project.ProjectID, - Repository: name, - Digest: dgt.String(), - } - - // Manifest info will be used by computeQuotaForUpload - *req = *req.WithContext(util.NewManifestInfoContext(req.Context(), info)) - opts := []quota.Option{ - quota.WithManager("project", strconv.FormatInt(project.ProjectID, 10)), + quota.EnforceResources(config.QuotaPerProjectEnable()), + quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)), quota.WithAction(quota.SubtractAction), quota.StatusCode(http.StatusAccepted), - quota.MutexKeys(mutexKey(info)), - quota.OnResources(computeQuotaForDelete), + quota.MutexKeys(info.MutexKey("count")), + quota.OnResources(computeResourcesForManifestDeletion), quota.OnFulfilled(func(http.ResponseWriter, *http.Request) error { return dao.DeleteArtifactByDigest(info.ProjectID, info.Repository, info.Digest) }), } - return quota.New(opts...) + return quota.New(opts...), nil } -type putManifestBuilder struct { -} +type manifestCreationBuilder struct{} -func (b *putManifestBuilder) Build(req *http.Request) interceptor.Interceptor { - if req.Method != http.MethodPut { - return nil +func (*manifestCreationBuilder) Build(req *http.Request) (interceptor.Interceptor, error) { + if match, _, _ := util.MatchPushManifest(req); !match { + return nil, nil } info, ok := util.ManifestInfoFromContext(req.Context()) if !ok { - // assert that manifest info will be set by others - return nil + var err error + info, err = util.ParseManifestInfo(req) + if err != nil { + return nil, fmt.Errorf("failed to parse manifest, error %v", err) + } + + // Manifest info will be used by computeResourcesForCreateManifest + *req = *(req.WithContext(util.NewManifestInfoContext(req.Context(), info))) } opts := []quota.Option{ + quota.EnforceResources(config.QuotaPerProjectEnable()), quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)), quota.WithAction(quota.AddAction), quota.StatusCode(http.StatusCreated), - quota.MutexKeys(mutexKey(info)), - quota.OnResources(computeQuotaForPut), - quota.OnFulfilled(func(http.ResponseWriter, *http.Request) error { - newManifest, overwriteTag := !info.Exist, info.DigestChanged - - if newManifest { - if err := b.doNewManifest(info); err != nil { - log.Errorf("Failed to handle response for new manifest, error: %v", err) - } - } else if overwriteTag { - if err := b.doOverwriteTag(info); err != nil { - log.Errorf("Failed to handle response for overwrite tag, error: %v", err) - } - } - - return nil - }), + quota.MutexKeys(info.MutexKey("count")), + quota.OnResources(computeResourcesForManifestCreation), + quota.OnFulfilled(afterManifestCreated), } - return quota.New(opts...) -} - -func (b *putManifestBuilder) doNewManifest(info *util.MfInfo) error { - artifact := &models.Artifact{ - PID: info.ProjectID, - Repo: info.Repository, - Tag: info.Tag, - Digest: info.Digest, - PushTime: time.Now(), - Kind: "Docker-Image", - } - - if _, err := dao.AddArtifact(artifact); err != nil { - return fmt.Errorf("error to add artifact, %v", err) - } - - return b.attachBlobsToArtifact(info) -} - -func (b *putManifestBuilder) doOverwriteTag(info *util.MfInfo) error { - artifact := &models.Artifact{ - ID: info.ArtifactID, - PID: info.ProjectID, - Repo: info.Repository, - Tag: info.Tag, - Digest: info.Digest, - PushTime: time.Now(), - Kind: "Docker-Image", - } - - if err := dao.UpdateArtifactDigest(artifact); err != nil { - return fmt.Errorf("error to update artifact, %v", err) - } - - return b.attachBlobsToArtifact(info) -} - -func (b *putManifestBuilder) attachBlobsToArtifact(info *util.MfInfo) error { - self := &models.ArtifactAndBlob{ - DigestAF: info.Digest, - DigestBlob: info.Digest, - } - - artifactBlobs := append([]*models.ArtifactAndBlob{}, self) - - for _, d := range info.Refrerence { - artifactBlob := &models.ArtifactAndBlob{ - DigestAF: info.Digest, - DigestBlob: d.Digest.String(), - } - - artifactBlobs = append(artifactBlobs, artifactBlob) - } - - if err := dao.AddArtifactNBlobs(artifactBlobs); err != nil { - if strings.Contains(err.Error(), dao.ErrDupRows.Error()) { - log.Warning("the artifact and blobs have already in the DB, it maybe an existing image with different tag") - return nil - } - - return fmt.Errorf("error to add artifact and blobs in proxy response handler, %v", err) - } - - return nil + return quota.New(opts...), nil } diff --git a/src/core/middlewares/countquota/handler.go b/src/core/middlewares/countquota/handler.go index 0537f4dc6..1b05a4cf5 100644 --- a/src/core/middlewares/countquota/handler.go +++ b/src/core/middlewares/countquota/handler.go @@ -42,7 +42,14 @@ func New(next http.Handler, builders ...interceptor.Builder) http.Handler { // ServeHTTP manifest ... func (h *countQuotaHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - interceptor := h.getInterceptor(req) + interceptor, err := h.getInterceptor(req) + if err != nil { + log.Warningf("Error occurred when to handle request in count quota handler: %v", err) + http.Error(rw, util.MarshalError("InternalError", fmt.Sprintf("Error occurred when to handle request in count quota handler: %v", err)), + http.StatusInternalServerError) + return + } + if interceptor == nil { h.next.ServeHTTP(rw, req) return @@ -60,13 +67,17 @@ func (h *countQuotaHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) interceptor.HandleResponse(rw, req) } -func (h *countQuotaHandler) getInterceptor(req *http.Request) interceptor.Interceptor { +func (h *countQuotaHandler) getInterceptor(req *http.Request) (interceptor.Interceptor, error) { for _, builder := range h.builders { - interceptor := builder.Build(req) + interceptor, err := builder.Build(req) + if err != nil { + return nil, err + } + if interceptor != nil { - return interceptor + return interceptor, nil } } - return nil + return nil, nil } diff --git a/src/core/middlewares/countquota/handler_test.go b/src/core/middlewares/countquota/handler_test.go index 020a7da98..a2ebb5a69 100644 --- a/src/core/middlewares/countquota/handler_test.go +++ b/src/core/middlewares/countquota/handler_test.go @@ -26,6 +26,7 @@ import ( "github.com/docker/distribution" "github.com/goharbor/harbor/src/common/dao" "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/core/config" "github.com/goharbor/harbor/src/core/middlewares/util" "github.com/goharbor/harbor/src/pkg/types" "github.com/opencontainers/go-digest" @@ -67,7 +68,7 @@ func doDeleteManifestRequest(projectID int64, projectName, name, dgt string, nex url := fmt.Sprintf("/v2/%s/manifests/%s", repository, dgt) req, _ := http.NewRequest("DELETE", url, nil) - ctx := util.NewManifestInfoContext(req.Context(), &util.MfInfo{ + ctx := util.NewManifestInfoContext(req.Context(), &util.ManifestInfo{ ProjectID: projectID, Repository: repository, Digest: dgt, @@ -96,12 +97,12 @@ func doPutManifestRequest(projectID int64, projectName, name, tag, dgt string, n url := fmt.Sprintf("/v2/%s/manifests/%s", repository, tag) req, _ := http.NewRequest("PUT", url, nil) - ctx := util.NewManifestInfoContext(req.Context(), &util.MfInfo{ + ctx := util.NewManifestInfoContext(req.Context(), &util.ManifestInfo{ ProjectID: projectID, Repository: repository, Tag: tag, Digest: dgt, - Refrerence: []distribution.Descriptor{ + References: []distribution.Descriptor{ {Digest: digest.FromString(randomString(15))}, {Digest: digest.FromString(randomString(15))}, }, @@ -146,11 +147,13 @@ func (suite *HandlerSuite) checkCountUsage(expected, projectID int64) { } func (suite *HandlerSuite) TearDownTest() { - dao.ClearTable("artifact") - dao.ClearTable("blob") - dao.ClearTable("artifact_blob") - dao.ClearTable("quota") - dao.ClearTable("quota_usage") + for _, table := range []string{ + "artifact", "blob", + "artifact_blob", "project_blob", + "quota", "quota_usage", + } { + dao.ClearTable(table) + } } func (suite *HandlerSuite) TestPutManifestCreated() { @@ -169,9 +172,6 @@ func (suite *HandlerSuite) TestPutManifestCreated() { total, err := dao.GetTotalOfArtifacts(&models.ArtifactQuery{Digest: dgt}) suite.Nil(err) suite.Equal(int64(1), total, "Artifact should be created") - if exists, err := dao.HasBlobInProject(projectID, dgt); suite.Nil(err) { - suite.True(exists) - } // Push the photon:latest with photon:dev code = doPutManifestRequest(projectID, projectName, "photon", "dev", dgt) @@ -213,9 +213,6 @@ func (suite *HandlerSuite) TestPutManifestFailed() { total, err := dao.GetTotalOfArtifacts(&models.ArtifactQuery{Digest: dgt}) suite.Nil(err) suite.Equal(int64(0), total, "Artifact should not be created") - if exists, err := dao.HasBlobInProject(projectID, dgt); suite.Nil(err) { - suite.False(exists) - } } func (suite *HandlerSuite) TestDeleteManifestAccepted() { @@ -258,7 +255,7 @@ func (suite *HandlerSuite) TestDeleteManifestFailed() { suite.checkCountUsage(1, projectID) } -func (suite *HandlerSuite) TestDeleteManifesInMultiProjects() { +func (suite *HandlerSuite) TestDeleteManifestInMultiProjects() { projectName := randomString(5) projectID := suite.addProject(projectName) @@ -294,6 +291,7 @@ func (suite *HandlerSuite) TestDeleteManifesInMultiProjects() { } func TestMain(m *testing.M) { + config.Init() dao.PrepareTestForPostgresSQL() if result := m.Run(); result != 0 { diff --git a/src/core/middlewares/countquota/util.go b/src/core/middlewares/countquota/util.go index 9f4fc011b..8275cb7ae 100644 --- a/src/core/middlewares/countquota/util.go +++ b/src/core/middlewares/countquota/util.go @@ -18,23 +18,35 @@ import ( "errors" "fmt" "net/http" + "strings" "github.com/goharbor/harbor/src/common/dao" "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/common/quota" + "github.com/goharbor/harbor/src/common/utils/log" "github.com/goharbor/harbor/src/core/middlewares/util" "github.com/goharbor/harbor/src/pkg/types" ) -func mutexKey(info *util.MfInfo) string { - if info.Tag != "" { - return "Quota::manifest-lock::" + info.Repository + ":" + info.Tag +// computeResourcesForManifestCreation returns count resource required for manifest +// no count required if the tag of the repository exists in the project +func computeResourcesForManifestCreation(req *http.Request) (types.ResourceList, error) { + info, ok := util.ManifestInfoFromContext(req.Context()) + if !ok { + return nil, errors.New("manifest info missing") } - return "Quota::manifest-lock::" + info.Repository + ":" + info.Digest + // only count quota required when push new tag + if info.IsNewTag() { + return quota.ResourceList{quota.ResourceCount: 1}, nil + } + + return nil, nil } -func computeQuotaForDelete(req *http.Request) (types.ResourceList, error) { +// computeResourcesForManifestDeletion returns count resource will be released when manifest deleted +// then result will be the sum of manifest count of the same repository in the project +func computeResourcesForManifestDeletion(req *http.Request) (types.ResourceList, error) { info, ok := util.ManifestInfoFromContext(req.Context()) if !ok { return nil, errors.New("manifest info missing") @@ -53,40 +65,54 @@ func computeQuotaForDelete(req *http.Request) (types.ResourceList, error) { return types.ResourceList{types.ResourceCount: total}, nil } -func computeQuotaForPut(req *http.Request) (types.ResourceList, error) { +// afterManifestCreated the handler after manifest created success +// it will create or update the artifact info in db, and then attach blobs to artifact +func afterManifestCreated(w http.ResponseWriter, req *http.Request) error { info, ok := util.ManifestInfoFromContext(req.Context()) if !ok { - return nil, errors.New("manifest info missing") + return errors.New("manifest info missing") } - artifact, err := getArtifact(info) - if err != nil { - return nil, fmt.Errorf("error occurred when to check Manifest existence %v", err) + artifact := info.Artifact() + if artifact.ID == 0 { + if _, err := dao.AddArtifact(artifact); err != nil { + return fmt.Errorf("error to add artifact, %v", err) + } + } else { + if err := dao.UpdateArtifact(artifact); err != nil { + return fmt.Errorf("error to update artifact, %v", err) + } } - if artifact != nil { - info.ArtifactID = artifact.ID - info.DigestChanged = artifact.Digest != info.Digest - info.Exist = true - - return nil, nil - } - - return quota.ResourceList{quota.ResourceCount: 1}, nil + return attachBlobsToArtifact(info) } -// get artifact by manifest info -func getArtifact(info *util.MfInfo) (*models.Artifact, error) { - query := &models.ArtifactQuery{ - PID: info.ProjectID, - Repo: info.Repository, - Tag: info.Tag, +// attachBlobsToArtifact attach the blobs which from manifest to artifact +func attachBlobsToArtifact(info *util.ManifestInfo) error { + self := &models.ArtifactAndBlob{ + DigestAF: info.Digest, + DigestBlob: info.Digest, } - artifacts, err := dao.ListArtifacts(query) - if err != nil || len(artifacts) == 0 { - return nil, err + artifactBlobs := append([]*models.ArtifactAndBlob{}, self) + + for _, reference := range info.References { + artifactBlob := &models.ArtifactAndBlob{ + DigestAF: info.Digest, + DigestBlob: reference.Digest.String(), + } + + artifactBlobs = append(artifactBlobs, artifactBlob) } - return artifacts[0], nil + if err := dao.AddArtifactNBlobs(artifactBlobs); err != nil { + if strings.Contains(err.Error(), dao.ErrDupRows.Error()) { + log.Warning("the artifact and blobs have already in the DB, it maybe an existing image with different tag") + return nil + } + + return fmt.Errorf("error to add artifact and blobs in proxy response handler, %v", err) + } + + return nil } diff --git a/src/core/middlewares/interceptor/interceptor.go b/src/core/middlewares/interceptor/interceptor.go index ae4469c3f..ab8cf6ec6 100644 --- a/src/core/middlewares/interceptor/interceptor.go +++ b/src/core/middlewares/interceptor/interceptor.go @@ -20,8 +20,9 @@ import ( // Builder interceptor builder type Builder interface { - // Build build interceptor from http.Request returns nil if interceptor not match the request - Build(*http.Request) Interceptor + // Build build interceptor from http.Request + // (nil, nil) must be returned if builder not match the request + Build(*http.Request) (Interceptor, error) } // Interceptor interceptor for middleware @@ -32,3 +33,16 @@ type Interceptor interface { // HandleResponse won't return any error HandleResponse(http.ResponseWriter, *http.Request) } + +// ResponseInterceptorFunc ... +type ResponseInterceptorFunc func(w http.ResponseWriter, r *http.Request) + +// HandleRequest no-op HandleRequest +func (f ResponseInterceptorFunc) HandleRequest(*http.Request) error { + return nil +} + +// HandleResponse calls f(w, r). +func (f ResponseInterceptorFunc) HandleResponse(w http.ResponseWriter, r *http.Request) { + f(w, r) +} diff --git a/src/core/middlewares/interceptor/quota/options.go b/src/core/middlewares/interceptor/quota/options.go index ca43c4165..ddf102a74 100644 --- a/src/core/middlewares/interceptor/quota/options.go +++ b/src/core/middlewares/interceptor/quota/options.go @@ -36,6 +36,8 @@ const ( // Options ... type Options struct { + enforceResources *bool + Action Action Manager *quota.Manager MutexKeys []string @@ -48,6 +50,15 @@ type Options struct { OnFinally func(http.ResponseWriter, *http.Request) error } +// EnforceResources ... +func (opts *Options) EnforceResources() bool { + return opts.enforceResources != nil && *opts.enforceResources +} + +func boolPtr(v bool) *bool { + return &v +} + func newOptions(opt ...Option) Options { opts := Options{} @@ -63,9 +74,20 @@ func newOptions(opt ...Option) Options { opts.StatusCode = http.StatusOK } + if opts.enforceResources == nil { + opts.enforceResources = boolPtr(true) + } + return opts } +// EnforceResources sets the interceptor enforceResources +func EnforceResources(enforceResources bool) Option { + return func(o *Options) { + o.enforceResources = boolPtr(enforceResources) + } +} + // WithAction sets the interceptor action func WithAction(a Action) Option { return func(o *Options) { diff --git a/src/core/middlewares/interceptor/quota/quota.go b/src/core/middlewares/interceptor/quota/quota.go index bb8074b5d..2914af8ee 100644 --- a/src/core/middlewares/interceptor/quota/quota.go +++ b/src/core/middlewares/interceptor/quota/quota.go @@ -49,30 +49,19 @@ func (qi *quotaInterceptor) HandleRequest(req *http.Request) (err error) { } }() - opts := qi.opts - - for _, key := range opts.MutexKeys { - m, err := redis.RequireLock(key) - if err != nil { - return err - } - qi.mutexes = append(qi.mutexes, m) + err = qi.requireMutexes() + if err != nil { + return } - resources := opts.Resources - if len(resources) == 0 && opts.OnResources != nil { - resources, err = opts.OnResources(req) - if err != nil { - return fmt.Errorf("failed to compute the resources for quota, error: %v", err) - } - - log.Debugf("Compute the resources for quota, got: %v", resources) + err = qi.computeResources(req) + if err != nil { + return } - qi.resources = resources err = qi.reserve() if err != nil { - log.Errorf("Failed to %s resources, error: %v", opts.Action, err) + log.Errorf("Failed to %s resources, error: %v", qi.opts.Action, err) } return @@ -92,7 +81,9 @@ func (qi *quotaInterceptor) HandleResponse(w http.ResponseWriter, req *http.Requ switch sr.Status() { case opts.StatusCode: if opts.OnFulfilled != nil { - opts.OnFulfilled(w, req) + if err := opts.OnFulfilled(w, req); err != nil { + log.Errorf("Failed to handle on fulfilled, error: %v", err) + } } default: if err := qi.unreserve(); err != nil { @@ -100,15 +91,36 @@ func (qi *quotaInterceptor) HandleResponse(w http.ResponseWriter, req *http.Requ } if opts.OnRejected != nil { - opts.OnRejected(w, req) + if err := opts.OnRejected(w, req); err != nil { + log.Errorf("Failed to handle on rejected, error: %v", err) + } } } if opts.OnFinally != nil { - opts.OnFinally(w, req) + if err := opts.OnFinally(w, req); err != nil { + log.Errorf("Failed to handle on finally, error: %v", err) + } } } +func (qi *quotaInterceptor) requireMutexes() error { + if !qi.opts.EnforceResources() { + // Do nothing for locks when quota interceptor not enforce resources + return nil + } + + for _, key := range qi.opts.MutexKeys { + m, err := redis.RequireLock(key) + if err != nil { + return err + } + qi.mutexes = append(qi.mutexes, m) + } + + return nil +} + func (qi *quotaInterceptor) freeMutexes() { for i := len(qi.mutexes) - 1; i >= 0; i-- { if err := redis.FreeLock(qi.mutexes[i]); err != nil { @@ -117,8 +129,29 @@ func (qi *quotaInterceptor) freeMutexes() { } } +func (qi *quotaInterceptor) computeResources(req *http.Request) error { + if !qi.opts.EnforceResources() { + // Do nothing in compute resources when quota interceptor not enforce resources + return nil + } + + if len(qi.opts.Resources) == 0 && qi.opts.OnResources != nil { + resources, err := qi.opts.OnResources(req) + if err != nil { + return fmt.Errorf("failed to compute the resources for quota, error: %v", err) + } + + qi.resources = resources + } + + return nil +} + func (qi *quotaInterceptor) reserve() error { - log.Debugf("Reserve %s resources, %v", qi.opts.Action, qi.resources) + if !qi.opts.EnforceResources() { + // Do nothing in reserve resources when quota interceptor not enforce resources + return nil + } if len(qi.resources) == 0 { return nil @@ -135,7 +168,10 @@ func (qi *quotaInterceptor) reserve() error { } func (qi *quotaInterceptor) unreserve() error { - log.Debugf("Unreserve %s resources, %v", qi.opts.Action, qi.resources) + if !qi.opts.EnforceResources() { + // Do nothing in unreserve resources when quota interceptor not enforce resources + return nil + } if len(qi.resources) == 0 { return nil diff --git a/src/core/middlewares/sizequota/builder.go b/src/core/middlewares/sizequota/builder.go new file mode 100644 index 000000000..a6e1ecf92 --- /dev/null +++ b/src/core/middlewares/sizequota/builder.go @@ -0,0 +1,212 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sizequota + +import ( + "fmt" + "net/http" + "strconv" + + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/core/middlewares/interceptor" + "github.com/goharbor/harbor/src/core/middlewares/interceptor/quota" + "github.com/goharbor/harbor/src/core/middlewares/util" +) + +var ( + defaultBuilders = []interceptor.Builder{ + &blobStreamUploadBuilder{}, + &blobStorageQuotaBuilder{}, + &manifestCreationBuilder{}, + &manifestDeletionBuilder{}, + } +) + +// blobStreamUploadBuilder interceptor for PATCH /v2//blobs/uploads/ +type blobStreamUploadBuilder struct{} + +func (*blobStreamUploadBuilder) Build(req *http.Request) (interceptor.Interceptor, error) { + if !match(req, http.MethodPatch, blobUploadURLRe) { + return nil, nil + } + + s := blobUploadURLRe.FindStringSubmatch(req.URL.Path) + uuid := s[2] + + onResponse := func(w http.ResponseWriter, req *http.Request) { + size, err := parseUploadedBlobSize(w) + if err != nil { + log.Errorf("failed to parse uploaded blob size for upload %s", uuid) + return + } + + ok, err := setUploadedBlobSize(uuid, size) + if err != nil { + log.Errorf("failed to update blob update size for upload %s, error: %v", uuid, err) + return + } + + if !ok { + // ToDo discuss what to do here. + log.Errorf("fail to set bunk: %s size: %d in redis, it causes unable to set correct quota for the artifact", uuid, size) + } + } + + return interceptor.ResponseInterceptorFunc(onResponse), nil +} + +// blobStorageQuotaBuilder interceptor builder for these requests +// PUT /v2//blobs/uploads/?digest= +// POST /v2//blobs/uploads/?mount=&from= +type blobStorageQuotaBuilder struct{} + +func (*blobStorageQuotaBuilder) Build(req *http.Request) (interceptor.Interceptor, error) { + parseBlobInfo := getBlobInfoParser(req) + if parseBlobInfo == nil { + return nil, nil + } + + info, err := parseBlobInfo(req) + if err != nil { + return nil, err + } + + // replace req with blob info context + *req = *(req.WithContext(util.NewBlobInfoContext(req.Context(), info))) + + opts := []quota.Option{ + quota.EnforceResources(config.QuotaPerProjectEnable()), + quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)), + quota.WithAction(quota.AddAction), + quota.StatusCode(http.StatusCreated), // NOTICE: mount blob and blob upload complete both return 201 when success + quota.OnResources(computeResourcesForBlob), + quota.MutexKeys(info.MutexKey()), + quota.OnFulfilled(func(http.ResponseWriter, *http.Request) error { + return syncBlobInfoToProject(info) + }), + } + + return quota.New(opts...), nil +} + +// manifestCreationBuilder interceptor builder for the request PUT /v2//manifests/ +type manifestCreationBuilder struct{} + +func (*manifestCreationBuilder) Build(req *http.Request) (interceptor.Interceptor, error) { + if match, _, _ := util.MatchPushManifest(req); !match { + return nil, nil + } + + info, err := util.ParseManifestInfo(req) + if err != nil { + return nil, err + } + + // Replace request with manifests info context + *req = *req.WithContext(util.NewManifestInfoContext(req.Context(), info)) + + opts := []quota.Option{ + quota.EnforceResources(config.QuotaPerProjectEnable()), + quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)), + quota.WithAction(quota.AddAction), + quota.StatusCode(http.StatusCreated), + quota.OnResources(computeResourcesForManifestCreation), + quota.MutexKeys(info.MutexKey("size")), + quota.OnFulfilled(func(http.ResponseWriter, *http.Request) error { + // manifest created, sync manifest itself as blob to blob and project_blob table + blobInfo, err := parseBlobInfoFromManifest(req) + if err != nil { + return err + } + + if err := syncBlobInfoToProject(blobInfo); err != nil { + return err + } + + // sync blobs from manifest which are not in project to project_blob table + blobs, err := info.GetBlobsNotInProject() + if err != nil { + return err + } + + _, err = dao.AddBlobsToProject(info.ProjectID, blobs...) + + return err + }), + } + + return quota.New(opts...), nil +} + +// deleteManifestBuilder interceptor builder for the request DELETE /v2//manifests/ +type manifestDeletionBuilder struct{} + +func (*manifestDeletionBuilder) Build(req *http.Request) (interceptor.Interceptor, error) { + if match, _, _ := util.MatchDeleteManifest(req); !match { + return nil, nil + } + + info, ok := util.ManifestInfoFromContext(req.Context()) + if !ok { + var err error + info, err = util.ParseManifestInfoFromPath(req) + if err != nil { + return nil, fmt.Errorf("failed to parse manifest, error %v", err) + } + + // Manifest info will be used by computeResourcesForDeleteManifest + *req = *(req.WithContext(util.NewManifestInfoContext(req.Context(), info))) + } + + blobs, err := dao.GetBlobsByArtifact(info.Digest) + if err != nil { + return nil, fmt.Errorf("failed to query blobs of %s, error: %v", info.Digest, err) + } + + mutexKeys := []string{info.MutexKey("size")} + for _, blob := range blobs { + mutexKeys = append(mutexKeys, info.BlobMutexKey(blob)) + } + + opts := []quota.Option{ + quota.EnforceResources(config.QuotaPerProjectEnable()), + quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)), + quota.WithAction(quota.SubtractAction), + quota.StatusCode(http.StatusAccepted), + quota.OnResources(computeResourcesForManifestDeletion), + quota.MutexKeys(mutexKeys...), + quota.OnFulfilled(func(http.ResponseWriter, *http.Request) error { + blobs := info.ExclusiveBlobs + + total, err := dao.GetTotalOfArtifacts(&models.ArtifactQuery{ + PID: info.ProjectID, + Digest: info.Digest, + }) + if err == nil && total > 0 { + blob, err := dao.GetBlob(info.Digest) + if err == nil { + blobs = append(blobs, blob) + } + } + + return dao.RemoveBlobsFromProject(info.ProjectID, blobs...) + }), + } + + return quota.New(opts...), nil +} diff --git a/src/core/middlewares/sizequota/handler.go b/src/core/middlewares/sizequota/handler.go index a94773a6c..244e55589 100644 --- a/src/core/middlewares/sizequota/handler.go +++ b/src/core/middlewares/sizequota/handler.go @@ -15,217 +15,69 @@ package sizequota import ( - "errors" "fmt" - "github.com/garyburd/redigo/redis" - "github.com/goharbor/harbor/src/common/dao" - "github.com/goharbor/harbor/src/common/models" - "github.com/goharbor/harbor/src/common/quota" - common_util "github.com/goharbor/harbor/src/common/utils" - "github.com/goharbor/harbor/src/common/utils/log" - common_redis "github.com/goharbor/harbor/src/common/utils/redis" - "github.com/goharbor/harbor/src/core/middlewares/util" "net/http" - "strings" - "time" + + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/middlewares/interceptor" + "github.com/goharbor/harbor/src/core/middlewares/util" ) type sizeQuotaHandler struct { - next http.Handler + builders []interceptor.Builder + next http.Handler } // New ... -func New(next http.Handler) http.Handler { +func New(next http.Handler, builders ...interceptor.Builder) http.Handler { + if len(builders) == 0 { + builders = defaultBuilders + } + return &sizeQuotaHandler{ - next: next, + builders: builders, + next: next, } } // ServeHTTP ... -func (sqh *sizeQuotaHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - sizeInteceptor := getInteceptor(req) - if sizeInteceptor == nil { - sqh.next.ServeHTTP(rw, req) - return - } - - // handler request - if err := sizeInteceptor.HandleRequest(req); err != nil { +func (h *sizeQuotaHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + interceptor, err := h.getInterceptor(req) + if err != nil { log.Warningf("Error occurred when to handle request in size quota handler: %v", err) http.Error(rw, util.MarshalError("InternalError", fmt.Sprintf("Error occurred when to handle request in size quota handler: %v", err)), http.StatusInternalServerError) return } - sqh.next.ServeHTTP(rw, req) - // handler response - sizeInteceptor.HandleResponse(*rw.(*util.CustomResponseWriter), req) + if interceptor == nil { + h.next.ServeHTTP(rw, req) + return + } + + if err := interceptor.HandleRequest(req); err != nil { + log.Warningf("Error occurred when to handle request in size quota handler: %v", err) + http.Error(rw, util.MarshalError("InternalError", fmt.Sprintf("Error occurred when to handle request in size quota handler: %v", err)), + http.StatusInternalServerError) + return + } + + h.next.ServeHTTP(rw, req) + + interceptor.HandleResponse(rw, req) } -func getInteceptor(req *http.Request) util.RegInterceptor { - // POST /v2//blobs/uploads/?mount=&from= - matchMountBlob, repository, mount, _ := util.MatchMountBlobURL(req) - if matchMountBlob { - bb := util.BlobInfo{} - bb.Repository = repository - bb.Digest = mount - return NewMountBlobInterceptor(&bb) - } - - // PUT /v2//blobs/uploads/?digest= - matchPutBlob, repository := util.MatchPutBlobURL(req) - if matchPutBlob { - bb := util.BlobInfo{} - bb.Repository = repository - return NewPutBlobInterceptor(&bb) - } - - // PUT /v2//manifests/ - matchPushMF, repository, tag := util.MatchPushManifest(req) - if matchPushMF { - bb := util.BlobInfo{} - mfInfo := util.MfInfo{} - bb.Repository = repository - mfInfo.Repository = repository - mfInfo.Tag = tag - return NewPutManifestInterceptor(&bb, &mfInfo) - } - - // PATCH /v2//blobs/uploads/ - matchPatchBlob, _ := util.MatchPatchBlobURL(req) - if matchPatchBlob { - return NewPatchBlobInterceptor() - } - - return nil -} - -func requireQuota(conn redis.Conn, blobInfo *util.BlobInfo) error { - projectID, err := util.GetProjectID(strings.Split(blobInfo.Repository, "/")[0]) - if err != nil { - return err - } - blobInfo.ProjectID = projectID - - digestLock, err := tryLockBlob(conn, blobInfo) - if err != nil { - log.Infof("failed to lock digest in redis, %v", err) - return err - } - blobInfo.DigestLock = digestLock - - blobExist, err := dao.HasBlobInProject(blobInfo.ProjectID, blobInfo.Digest) - if err != nil { - tryFreeBlob(blobInfo) - return err - } - blobInfo.Exist = blobExist - if blobExist { - return nil - } - - // only require quota for non existing blob. - quotaRes := "a.ResourceList{ - quota.ResourceStorage: blobInfo.Size, - } - err = util.TryRequireQuota(blobInfo.ProjectID, quotaRes) - if err != nil { - log.Infof("project id, %d, size %d", blobInfo.ProjectID, blobInfo.Size) - tryFreeBlob(blobInfo) - log.Errorf("cannot get quota for the blob %v", err) - return err - } - blobInfo.Quota = quotaRes - - return nil -} - -// HandleBlobCommon handles put blob complete request -// 1, add blob into DB if success -// 2, roll back resource if failure. -func HandleBlobCommon(rw util.CustomResponseWriter, req *http.Request) error { - bbInfo := req.Context().Value(util.BBInfokKey) - bb, ok := bbInfo.(*util.BlobInfo) - if !ok { - return errors.New("failed to convert blob information context into BBInfo") - } - defer func() { - _, err := bb.DigestLock.Free() +func (h *sizeQuotaHandler) getInterceptor(req *http.Request) (interceptor.Interceptor, error) { + for _, builder := range h.builders { + interceptor, err := builder.Build(req) if err != nil { - log.Errorf("Error to unlock blob digest:%s in response handler, %v", bb.Digest, err) + return nil, err } - if err := bb.DigestLock.Conn.Close(); err != nil { - log.Errorf("Error to close redis connection in put blob response handler, %v", err) - } - }() - // Do nothing for a existing blob. - if bb.Exist { - return nil + if interceptor != nil { + return interceptor, nil + } } - if rw.Status() == http.StatusCreated { - blob := &models.Blob{ - Digest: bb.Digest, - ContentType: bb.ContentType, - Size: bb.Size, - CreationTime: time.Now(), - } - _, err := dao.AddBlob(blob) - if err != nil { - return err - } - } else if rw.Status() >= 300 && rw.Status() <= 511 { - success := util.TryFreeQuota(bb.ProjectID, bb.Quota) - if !success { - return fmt.Errorf("Error to release resource booked for the blob, %d, digest: %s ", bb.ProjectID, bb.Digest) - } - } - return nil -} - -// tryLockBlob locks blob with redis ... -func tryLockBlob(conn redis.Conn, blobInfo *util.BlobInfo) (*common_redis.Mutex, error) { - // Quota::blob-lock::projectname::digest - digestLock := common_redis.New(conn, "Quota::blob-lock::"+strings.Split(blobInfo.Repository, "/")[0]+":"+blobInfo.Digest, common_util.GenerateRandomString()) - success, err := digestLock.Require() - if err != nil { - return nil, err - } - if !success { - return nil, fmt.Errorf("unable to lock digest: %s, %s ", blobInfo.Repository, blobInfo.Digest) - } - return digestLock, nil -} - -func tryFreeBlob(blobInfo *util.BlobInfo) { - _, err := blobInfo.DigestLock.Free() - if err != nil { - log.Warningf("Error to unlock digest: %s,%s with error: %v ", blobInfo.Repository, blobInfo.Digest, err) - } -} - -func rmBlobUploadUUID(conn redis.Conn, UUID string) (bool, error) { - exists, err := redis.Int(conn.Do("EXISTS", UUID)) - if err != nil { - return false, err - } - if exists == 1 { - res, err := redis.Int(conn.Do("DEL", UUID)) - if err != nil { - return false, err - } - return res == 1, nil - } - return true, nil -} - -// put blob path: /v2//blobs/uploads/ -func getUUID(path string) string { - if !strings.Contains(path, "/") { - log.Infof("it's not a valid path string: %s", path) - return "" - } - strs := strings.Split(path, "/") - return strs[len(strs)-1] + return nil, nil } diff --git a/src/core/middlewares/sizequota/handler_test.go b/src/core/middlewares/sizequota/handler_test.go index b5231f16b..e2b2bb309 100644 --- a/src/core/middlewares/sizequota/handler_test.go +++ b/src/core/middlewares/sizequota/handler_test.go @@ -15,163 +15,696 @@ package sizequota import ( - "context" + "bytes" + "encoding/json" "fmt" - "github.com/garyburd/redigo/redis" - utilstest "github.com/goharbor/harbor/src/common/utils/test" - "github.com/goharbor/harbor/src/core/middlewares/util" - "github.com/stretchr/testify/assert" + "math/rand" "net/http" "net/http/httptest" "os" + "strconv" + "sync" "testing" "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema2" + "github.com/goharbor/harbor/src/common" + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/core/middlewares/countquota" + "github.com/goharbor/harbor/src/core/middlewares/util" + "github.com/goharbor/harbor/src/pkg/types" + "github.com/opencontainers/go-digest" + "github.com/stretchr/testify/suite" ) -const testingRedisHost = "REDIS_HOST" +func init() { + rand.Seed(time.Now().UnixNano()) +} + +func genUUID() string { + b := make([]byte, 16) + + if _, err := rand.Read(b); err != nil { + return "" + } + + return fmt.Sprintf("%X-%X-%X-%X-%X", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]) +} + +func getProjectCountUsage(projectID int64) (int64, error) { + usage := models.QuotaUsage{Reference: "project", ReferenceID: fmt.Sprintf("%d", projectID)} + err := dao.GetOrmer().Read(&usage, "reference", "reference_id") + if err != nil { + return 0, err + } + used, err := types.NewResourceList(usage.Used) + if err != nil { + return 0, err + } + + return used[types.ResourceCount], nil +} + +func getProjectStorageUsage(projectID int64) (int64, error) { + usage := models.QuotaUsage{Reference: "project", ReferenceID: fmt.Sprintf("%d", projectID)} + err := dao.GetOrmer().Read(&usage, "reference", "reference_id") + if err != nil { + return 0, err + } + used, err := types.NewResourceList(usage.Used) + if err != nil { + return 0, err + } + + return used[types.ResourceStorage], nil +} + +func randomString(n int) string { + const letterBytes = "abcdefghijklmnopqrstuvwxyz" + + b := make([]byte, n) + for i := range b { + b[i] = letterBytes[rand.Intn(len(letterBytes))] + } + + return string(b) +} + +func makeManifest(configSize int64, layerSizes []int64) schema2.Manifest { + manifest := schema2.Manifest{ + Versioned: manifest.Versioned{SchemaVersion: 2, MediaType: schema2.MediaTypeManifest}, + Config: distribution.Descriptor{ + MediaType: schema2.MediaTypeImageConfig, + Size: configSize, + Digest: digest.FromString(randomString(15)), + }, + } + + for _, size := range layerSizes { + manifest.Layers = append(manifest.Layers, distribution.Descriptor{ + MediaType: schema2.MediaTypeLayer, + Size: size, + Digest: digest.FromString(randomString(15)), + }) + } + + return manifest +} + +func manifestWithAdditionalLayers(raw schema2.Manifest, layerSizes []int64) schema2.Manifest { + var manifest schema2.Manifest + + manifest.Versioned = raw.Versioned + manifest.Config = raw.Config + manifest.Layers = append(manifest.Layers, raw.Layers...) + + for _, size := range layerSizes { + manifest.Layers = append(manifest.Layers, distribution.Descriptor{ + MediaType: schema2.MediaTypeLayer, + Size: size, + Digest: digest.FromString(randomString(15)), + }) + } + + return manifest +} + +func digestOfManifest(manifest schema2.Manifest) string { + bytes, _ := json.Marshal(manifest) + + return digest.FromBytes(bytes).String() +} + +func sizeOfManifest(manifest schema2.Manifest) int64 { + bytes, _ := json.Marshal(manifest) + + return int64(len(bytes)) +} + +func sizeOfImage(manifest schema2.Manifest) int64 { + totalSizeOfLayers := manifest.Config.Size + + for _, layer := range manifest.Layers { + totalSizeOfLayers += layer.Size + } + + return sizeOfManifest(manifest) + totalSizeOfLayers +} + +func doHandle(req *http.Request, next ...http.HandlerFunc) int { + rr := httptest.NewRecorder() + + var n http.HandlerFunc + if len(next) > 0 { + n = next[0] + } else { + n = func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusCreated) + } + } + + h := New(http.HandlerFunc(n)) + h.ServeHTTP(util.NewCustomResponseWriter(rr), req) + + return rr.Code +} + +func patchBlobUpload(projectName, name, uuid, blobDigest string, chunkSize int64) { + repository := fmt.Sprintf("%s/%s", projectName, name) + + url := fmt.Sprintf("/v2/%s/blobs/uploads/%s?digest=%s", repository, uuid, blobDigest) + req, _ := http.NewRequest(http.MethodPatch, url, nil) + + doHandle(req, func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusAccepted) + w.Header().Add("Range", fmt.Sprintf("0-%d", chunkSize-1)) + }) +} + +func putBlobUpload(projectName, name, uuid, blobDigest string, blobSize ...int64) { + repository := fmt.Sprintf("%s/%s", projectName, name) + + url := fmt.Sprintf("/v2/%s/blobs/uploads/%s?digest=%s", repository, uuid, blobDigest) + req, _ := http.NewRequest(http.MethodPut, url, nil) + if len(blobSize) > 0 { + req.Header.Add("Content-Length", strconv.FormatInt(blobSize[0], 10)) + } + + doHandle(req, func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusCreated) + }) +} + +func mountBlob(projectName, name, blobDigest, fromRepository string) { + repository := fmt.Sprintf("%s/%s", projectName, name) + + url := fmt.Sprintf("/v2/%s/blobs/uploads/?mount=%s&from=%s", repository, blobDigest, fromRepository) + req, _ := http.NewRequest(http.MethodPost, url, nil) + + doHandle(req, func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusCreated) + }) +} + +func deleteManifest(projectName, name, digest string, accepted ...func() bool) { + repository := fmt.Sprintf("%s/%s", projectName, name) + + url := fmt.Sprintf("/v2/%s/manifests/%s", repository, digest) + req, _ := http.NewRequest(http.MethodDelete, url, nil) + + next := countquota.New(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if len(accepted) > 0 { + if accepted[0]() { + w.WriteHeader(http.StatusAccepted) + } else { + w.WriteHeader(http.StatusNotFound) + } + + return + } + + w.WriteHeader(http.StatusAccepted) + })) + + rr := httptest.NewRecorder() + h := New(next) + h.ServeHTTP(util.NewCustomResponseWriter(rr), req) +} + +func putManifest(projectName, name, tag string, manifest schema2.Manifest) { + repository := fmt.Sprintf("%s/%s", projectName, name) + + buf, _ := json.Marshal(manifest) + + url := fmt.Sprintf("/v2/%s/manifests/%s", repository, tag) + req, _ := http.NewRequest(http.MethodPut, url, bytes.NewReader(buf)) + req.Header.Add("Content-Type", manifest.MediaType) + + next := countquota.New(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusCreated) + })) + + rr := httptest.NewRecorder() + h := New(next) + h.ServeHTTP(util.NewCustomResponseWriter(rr), req) +} + +func pushImage(projectName, name, tag string, manifest schema2.Manifest) { + putBlobUpload(projectName, name, genUUID(), manifest.Config.Digest.String(), manifest.Config.Size) + for _, layer := range manifest.Layers { + putBlobUpload(projectName, name, genUUID(), layer.Digest.String(), layer.Size) + } + + putManifest(projectName, name, tag, manifest) +} + +func withProject(f func(int64, string)) { + projectName := randomString(5) + + projectID, err := dao.AddProject(models.Project{ + Name: projectName, + OwnerID: 1, + }) + if err != nil { + panic(err) + } + + defer func() { + dao.DeleteProject(projectID) + }() + + f(projectID, projectName) +} + +type HandlerSuite struct { + suite.Suite +} + +func (suite *HandlerSuite) checkCountUsage(expected, projectID int64) { + count, err := getProjectCountUsage(projectID) + suite.Nil(err, fmt.Sprintf("Failed to get count usage of project %d, error: %v", projectID, err)) + suite.Equal(expected, count, "Failed to check count usage for project %d", projectID) +} + +func (suite *HandlerSuite) checkStorageUsage(expected, projectID int64) { + value, err := getProjectStorageUsage(projectID) + suite.Nil(err, fmt.Sprintf("Failed to get storage usage of project %d, error: %v", projectID, err)) + suite.Equal(expected, value, "Failed to check storage usage for project %d", projectID) +} + +func (suite *HandlerSuite) TearDownTest() { + for _, table := range []string{ + "artifact", "blob", + "artifact_blob", "project_blob", + "quota", "quota_usage", + } { + dao.ClearTable(table) + } +} + +func (suite *HandlerSuite) TestPatchBlobUpload() { + withProject(func(projectID int64, projectName string) { + uuid := genUUID() + blobDigest := digest.FromString(randomString(15)).String() + patchBlobUpload(projectName, "photon", uuid, blobDigest, 1024) + size, err := getUploadedBlobSize(uuid) + suite.Nil(err) + suite.Equal(int64(1024), size) + }) +} + +func (suite *HandlerSuite) TestPutBlobUpload() { + withProject(func(projectID int64, projectName string) { + uuid := genUUID() + blobDigest := digest.FromString(randomString(15)).String() + putBlobUpload(projectName, "photon", uuid, blobDigest, 1024) + suite.checkStorageUsage(1024, projectID) + + blob, err := dao.GetBlob(blobDigest) + suite.Nil(err) + suite.Equal(int64(1024), blob.Size) + }) +} + +func (suite *HandlerSuite) TestPutBlobUploadWithPatch() { + withProject(func(projectID int64, projectName string) { + uuid := genUUID() + blobDigest := digest.FromString(randomString(15)).String() + patchBlobUpload(projectName, "photon", uuid, blobDigest, 1024) + + putBlobUpload(projectName, "photon", uuid, blobDigest) + suite.checkStorageUsage(1024, projectID) + + blob, err := dao.GetBlob(blobDigest) + suite.Nil(err) + suite.Equal(int64(1024), blob.Size) + }) +} + +func (suite *HandlerSuite) TestMountBlob() { + withProject(func(projectID int64, projectName string) { + blobDigest := digest.FromString(randomString(15)).String() + putBlobUpload(projectName, "photon", genUUID(), blobDigest, 1024) + suite.checkStorageUsage(1024, projectID) + + repository := fmt.Sprintf("%s/%s", projectName, "photon") + + withProject(func(projectID int64, projectName string) { + mountBlob(projectName, "harbor", blobDigest, repository) + suite.checkStorageUsage(1024, projectID) + }) + }) +} + +func (suite *HandlerSuite) TestPutManifestCreated() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(100, []int64{100, 100}) + + putBlobUpload(projectName, "photon", genUUID(), manifest.Config.Digest.String(), manifest.Config.Size) + for _, layer := range manifest.Layers { + putBlobUpload(projectName, "photon", genUUID(), layer.Digest.String(), layer.Size) + } + + putManifest(projectName, "photon", "latest", manifest) + + suite.checkStorageUsage(int64(300+sizeOfManifest(manifest)), projectID) + }) +} + +func (suite *HandlerSuite) TestDeleteManifest() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + size := sizeOfImage(manifest) + + pushImage(projectName, "photon", "latest", manifest) + suite.checkStorageUsage(size, projectID) + + deleteManifest(projectName, "photon", digestOfManifest(manifest)) + suite.checkStorageUsage(0, projectID) + }) +} + +func (suite *HandlerSuite) TestImageOverwrite() { + withProject(func(projectID int64, projectName string) { + manifest1 := makeManifest(1, []int64{2, 3, 4, 5}) + size1 := sizeOfImage(manifest1) + pushImage(projectName, "photon", "latest", manifest1) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(size1, projectID) + + manifest2 := makeManifest(1, []int64{2, 3, 4, 5}) + size2 := sizeOfImage(manifest2) + pushImage(projectName, "photon", "latest", manifest2) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(size1+size2, projectID) + + manifest3 := makeManifest(1, []int64{2, 3, 4, 5}) + size3 := sizeOfImage(manifest2) + pushImage(projectName, "photon", "latest", manifest3) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(size1+size2+size3, projectID) + }) +} + +func (suite *HandlerSuite) TestPushImageMultiTimes() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + size := sizeOfImage(manifest) + + pushImage(projectName, "photon", "latest", manifest) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(size, projectID) + + pushImage(projectName, "photon", "latest", manifest) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(size, projectID) + + pushImage(projectName, "photon", "latest", manifest) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(size, projectID) + }) +} + +func (suite *HandlerSuite) TestPushImageToSameRepository() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + size := sizeOfImage(manifest) + + pushImage(projectName, "photon", "latest", manifest) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(size, projectID) + + pushImage(projectName, "photon", "dev", manifest) + suite.checkCountUsage(2, projectID) + suite.checkStorageUsage(size, projectID) + }) +} + +func (suite *HandlerSuite) TestPushImageToDifferentRepositories() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + size := sizeOfImage(manifest) + + pushImage(projectName, "mysql", "latest", manifest) + suite.checkStorageUsage(size, projectID) + + pushImage(projectName, "redis", "latest", manifest) + suite.checkStorageUsage(size+sizeOfManifest(manifest), projectID) + + pushImage(projectName, "postgres", "latest", manifest) + suite.checkStorageUsage(size+2*sizeOfManifest(manifest), projectID) + }) +} + +func (suite *HandlerSuite) TestPushImageToDifferentProjects() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + size := sizeOfImage(manifest) + + pushImage(projectName, "mysql", "latest", manifest) + suite.checkStorageUsage(size, projectID) + + withProject(func(id int64, name string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + size := sizeOfImage(manifest) + + pushImage(name, "mysql", "latest", manifest) + suite.checkStorageUsage(size, id) + + suite.checkStorageUsage(size, projectID) + }) + }) +} + +func (suite *HandlerSuite) TestDeleteManifestShareLayersInSameRepository() { + withProject(func(projectID int64, projectName string) { + manifest1 := makeManifest(1, []int64{2, 3, 4, 5}) + size1 := sizeOfImage(manifest1) + + pushImage(projectName, "mysql", "latest", manifest1) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(size1, projectID) + + manifest2 := manifestWithAdditionalLayers(manifest1, []int64{6, 7}) + pushImage(projectName, "mysql", "dev", manifest2) + suite.checkCountUsage(2, projectID) + + totalSize := size1 + sizeOfManifest(manifest2) + 6 + 7 + suite.checkStorageUsage(totalSize, projectID) + + deleteManifest(projectName, "mysql", digestOfManifest(manifest1)) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(totalSize-sizeOfManifest(manifest1), projectID) + }) +} + +func (suite *HandlerSuite) TestDeleteManifestShareLayersInDifferentRepositories() { + withProject(func(projectID int64, projectName string) { + manifest1 := makeManifest(1, []int64{2, 3, 4, 5}) + size1 := sizeOfImage(manifest1) + + pushImage(projectName, "mysql", "latest", manifest1) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(size1, projectID) + + pushImage(projectName, "mysql", "dev", manifest1) + suite.checkCountUsage(2, projectID) + suite.checkStorageUsage(size1, projectID) + + manifest2 := manifestWithAdditionalLayers(manifest1, []int64{6, 7}) + pushImage(projectName, "mariadb", "latest", manifest2) + suite.checkCountUsage(3, projectID) + + totalSize := size1 + sizeOfManifest(manifest2) + 6 + 7 + suite.checkStorageUsage(totalSize, projectID) + + deleteManifest(projectName, "mysql", digestOfManifest(manifest1)) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(totalSize-sizeOfManifest(manifest1), projectID) + }) +} + +func (suite *HandlerSuite) TestDeleteManifestInSameRepository() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + size := sizeOfImage(manifest) + + pushImage(projectName, "photon", "latest", manifest) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(size, projectID) + + pushImage(projectName, "photon", "dev", manifest) + suite.checkCountUsage(2, projectID) + suite.checkStorageUsage(size, projectID) + + deleteManifest(projectName, "photon", digestOfManifest(manifest)) + suite.checkCountUsage(0, projectID) + suite.checkStorageUsage(0, projectID) + }) +} + +func (suite *HandlerSuite) TestDeleteManifestInDifferentRepositories() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + size := sizeOfImage(manifest) + + pushImage(projectName, "mysql", "latest", manifest) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(size, projectID) + + pushImage(projectName, "mysql", "5.6", manifest) + suite.checkCountUsage(2, projectID) + suite.checkStorageUsage(size, projectID) + + pushImage(projectName, "redis", "latest", manifest) + suite.checkCountUsage(3, projectID) + suite.checkStorageUsage(size+sizeOfManifest(manifest), projectID) + + deleteManifest(projectName, "redis", digestOfManifest(manifest)) + suite.checkCountUsage(2, projectID) + suite.checkStorageUsage(size, projectID) + + pushImage(projectName, "redis", "latest", manifest) + suite.checkCountUsage(3, projectID) + suite.checkStorageUsage(size+sizeOfManifest(manifest), projectID) + }) +} + +func (suite *HandlerSuite) TestDeleteManifestInDifferentProjects() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + size := sizeOfImage(manifest) + + pushImage(projectName, "mysql", "latest", manifest) + suite.checkStorageUsage(size, projectID) + + withProject(func(id int64, name string) { + pushImage(name, "mysql", "latest", manifest) + suite.checkStorageUsage(size, id) + + suite.checkStorageUsage(size, projectID) + deleteManifest(projectName, "mysql", digestOfManifest(manifest)) + suite.checkCountUsage(0, projectID) + suite.checkStorageUsage(0, projectID) + }) + + }) +} + +func (suite *HandlerSuite) TestPushDeletePush() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + size := sizeOfImage(manifest) + + pushImage(projectName, "photon", "latest", manifest) + suite.checkStorageUsage(size, projectID) + + deleteManifest(projectName, "photon", digestOfManifest(manifest)) + suite.checkStorageUsage(0, projectID) + + pushImage(projectName, "photon", "latest", manifest) + suite.checkStorageUsage(size, projectID) + }) +} + +func (suite *HandlerSuite) TestPushImageRace() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + size := sizeOfImage(manifest) + + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + pushImage(projectName, "photon", "latest", manifest) + }() + } + wg.Wait() + + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(size, projectID) + }) +} + +func (suite *HandlerSuite) TestDeleteImageRace() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + pushImage(projectName, "photon", "latest", manifest) + + count := 100 + size := sizeOfImage(manifest) + for i := 0; i < count; i++ { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + pushImage(projectName, "mysql", fmt.Sprintf("tag%d", i), manifest) + size += sizeOfImage(manifest) + } + + suite.checkCountUsage(int64(count+1), projectID) + suite.checkStorageUsage(size, projectID) + + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + deleteManifest(projectName, "photon", digestOfManifest(manifest), func() bool { + return i == 0 + }) + }(i) + } + wg.Wait() + + suite.checkCountUsage(int64(count), projectID) + suite.checkStorageUsage(size-sizeOfImage(manifest), projectID) + }) +} + +func (suite *HandlerSuite) TestDisableProjectQuota() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + pushImage(projectName, "photon", "latest", manifest) + + quotas, err := dao.ListQuotas(&models.QuotaQuery{ + Reference: "project", + ReferenceID: strconv.FormatInt(projectID, 10), + }) + + suite.Nil(err) + suite.Len(quotas, 1) + }) + + withProject(func(projectID int64, projectName string) { + cfg := config.GetCfgManager() + cfg.Set(common.QuotaPerProjectEnable, false) + defer cfg.Set(common.QuotaPerProjectEnable, true) + + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + pushImage(projectName, "photon", "latest", manifest) + + quotas, err := dao.ListQuotas(&models.QuotaQuery{ + Reference: "project", + ReferenceID: strconv.FormatInt(projectID, 10), + }) + + suite.Nil(err) + suite.Len(quotas, 0) + }) +} func TestMain(m *testing.M) { - utilstest.InitDatabaseFromEnv() - rc := m.Run() - if rc != 0 { - os.Exit(rc) + config.Init() + dao.PrepareTestForPostgresSQL() + + if result := m.Run(); result != 0 { + os.Exit(result) } } -func TestGetInteceptor(t *testing.T) { - assert := assert.New(t) - req1, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil) - res1 := getInteceptor(req1) - - _, ok := res1.(*PutManifestInterceptor) - assert.True(ok) - - req2, _ := http.NewRequest("POST", "http://127.0.0.1:5000/v2/library/ubuntu/TestGetInteceptor/14.04", nil) - res2 := getInteceptor(req2) - assert.Nil(res2) - -} - -func TestRequireQuota(t *testing.T) { - con, err := redis.Dial( - "tcp", - fmt.Sprintf("%s:%d", getRedisHost(), 6379), - redis.DialConnectTimeout(30*time.Second), - redis.DialReadTimeout(time.Minute+10*time.Second), - redis.DialWriteTimeout(10*time.Second), - ) - assert.Nil(t, err) - defer con.Close() - - assert := assert.New(t) - blobInfo := &util.BlobInfo{ - Repository: "library/test", - Digest: "sha256:abcdf123sdfefeg1246", - } - - err = requireQuota(con, blobInfo) - assert.Nil(err) - -} - -func TestGetUUID(t *testing.T) { - str1 := "test/1/2/uuid-1" - uuid1 := getUUID(str1) - assert.Equal(t, uuid1, "uuid-1") - - // not a valid path, just return empty - str2 := "test-1-2-uuid-2" - uuid2 := getUUID(str2) - assert.Equal(t, uuid2, "") -} - -func TestAddRmUUID(t *testing.T) { - con, err := redis.Dial( - "tcp", - fmt.Sprintf("%s:%d", getRedisHost(), 6379), - redis.DialConnectTimeout(30*time.Second), - redis.DialReadTimeout(time.Minute+10*time.Second), - redis.DialWriteTimeout(10*time.Second), - ) - assert.Nil(t, err) - defer con.Close() - - rmfail, err := rmBlobUploadUUID(con, "test-rm-uuid") - assert.Nil(t, err) - assert.True(t, rmfail) - - success, err := util.SetBunkSize(con, "test-rm-uuid", 1000) - assert.Nil(t, err) - assert.True(t, success) - - rmSuccess, err := rmBlobUploadUUID(con, "test-rm-uuid") - assert.Nil(t, err) - assert.True(t, rmSuccess) - -} - -func TestTryFreeLockBlob(t *testing.T) { - con, err := redis.Dial( - "tcp", - fmt.Sprintf("%s:%d", getRedisHost(), 6379), - redis.DialConnectTimeout(30*time.Second), - redis.DialReadTimeout(time.Minute+10*time.Second), - redis.DialWriteTimeout(10*time.Second), - ) - assert.Nil(t, err) - defer con.Close() - - blobInfo := util.BlobInfo{ - Repository: "lock/test", - Digest: "sha256:abcdf123sdfefeg1246", - } - - lock, err := tryLockBlob(con, &blobInfo) - assert.Nil(t, err) - blobInfo.DigestLock = lock - tryFreeBlob(&blobInfo) -} - -func TestBlobCommon(t *testing.T) { - con, err := redis.Dial( - "tcp", - fmt.Sprintf("%s:%d", getRedisHost(), 6379), - redis.DialConnectTimeout(30*time.Second), - redis.DialReadTimeout(time.Minute+10*time.Second), - redis.DialWriteTimeout(10*time.Second), - ) - assert.Nil(t, err) - defer con.Close() - - req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil) - blobInfo := util.BlobInfo{ - Repository: "TestBlobCommon/test", - Digest: "sha256:abcdf12345678sdfefeg1246", - ContentType: "ContentType", - Size: 101, - Exist: false, - } - - rw := httptest.NewRecorder() - customResW := util.CustomResponseWriter{ResponseWriter: rw} - customResW.WriteHeader(201) - - lock, err := tryLockBlob(con, &blobInfo) - assert.Nil(t, err) - blobInfo.DigestLock = lock - - *req = *(req.WithContext(context.WithValue(req.Context(), util.BBInfokKey, &blobInfo))) - - err = HandleBlobCommon(customResW, req) - assert.Nil(t, err) - -} - -func getRedisHost() string { - redisHost := os.Getenv(testingRedisHost) - if redisHost == "" { - redisHost = "127.0.0.1" // for local test - } - - return redisHost +func TestRunHandlerSuite(t *testing.T) { + suite.Run(t, new(HandlerSuite)) } diff --git a/src/core/middlewares/sizequota/mountblob.go b/src/core/middlewares/sizequota/mountblob.go deleted file mode 100644 index 8eba2ee3b..000000000 --- a/src/core/middlewares/sizequota/mountblob.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright Project Harbor Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sizequota - -import ( - "context" - "fmt" - "github.com/goharbor/harbor/src/common/dao" - "github.com/goharbor/harbor/src/common/utils/log" - "github.com/goharbor/harbor/src/core/middlewares/util" - "net/http" - "strings" -) - -// MountBlobInterceptor ... -type MountBlobInterceptor struct { - blobInfo *util.BlobInfo -} - -// NewMountBlobInterceptor ... -func NewMountBlobInterceptor(blobInfo *util.BlobInfo) *MountBlobInterceptor { - return &MountBlobInterceptor{ - blobInfo: blobInfo, - } -} - -// HandleRequest ... -func (mbi *MountBlobInterceptor) HandleRequest(req *http.Request) error { - tProjectID, err := util.GetProjectID(strings.Split(mbi.blobInfo.Repository, "/")[0]) - if err != nil { - return fmt.Errorf("error occurred when to get target project: %d, %v", tProjectID, err) - } - blob, err := dao.GetBlob(mbi.blobInfo.Digest) - if err != nil { - return err - } - if blob == nil { - return fmt.Errorf("the blob in the mount request with digest: %s doesn't exist", mbi.blobInfo.Digest) - } - mbi.blobInfo.Size = blob.Size - con, err := util.GetRegRedisCon() - if err != nil { - return err - } - if err := requireQuota(con, mbi.blobInfo); err != nil { - return err - } - *req = *(req.WithContext(context.WithValue(req.Context(), util.BBInfokKey, mbi.blobInfo))) - return nil -} - -// HandleResponse ... -func (mbi *MountBlobInterceptor) HandleResponse(rw util.CustomResponseWriter, req *http.Request) { - if err := HandleBlobCommon(rw, req); err != nil { - log.Error(err) - } -} diff --git a/src/core/middlewares/sizequota/mountblob_test.go b/src/core/middlewares/sizequota/mountblob_test.go deleted file mode 100644 index 7d6c07cbc..000000000 --- a/src/core/middlewares/sizequota/mountblob_test.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright Project Harbor Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sizequota - -import ( - "context" - "fmt" - "github.com/garyburd/redigo/redis" - "github.com/goharbor/harbor/src/core/middlewares/util" - "github.com/stretchr/testify/assert" - "net/http" - "net/http/httptest" - "testing" - "time" -) - -func TestNewMountBlobInterceptor(t *testing.T) { - blobinfo := &util.BlobInfo{} - blobinfo.Repository = "TestNewMountBlobInterceptor/latest" - - bi := NewMountBlobInterceptor(blobinfo) - assert.NotNil(t, bi) -} - -func TestMountBlobHandleRequest(t *testing.T) { - blobInfo := util.BlobInfo{ - Repository: "TestHandleRequest/test", - Digest: "sha256:TestHandleRequest1234", - ContentType: "ContentType", - Size: 101, - Exist: false, - } - req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil) - bi := NewMountBlobInterceptor(&blobInfo) - assert.NotNil(t, bi.HandleRequest(req)) -} - -func TestMountBlobHandleResponse(t *testing.T) { - con, err := redis.Dial( - "tcp", - fmt.Sprintf("%s:%d", getRedisHost(), 6379), - redis.DialConnectTimeout(30*time.Second), - redis.DialReadTimeout(time.Minute+10*time.Second), - redis.DialWriteTimeout(10*time.Second), - ) - assert.Nil(t, err) - defer con.Close() - - req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil) - blobInfo := util.BlobInfo{ - Repository: "TestHandleResponse/test", - Digest: "sha256:TestHandleResponseabcdf12345678sdfefeg1246", - ContentType: "ContentType", - Size: 101, - Exist: false, - } - - rw := httptest.NewRecorder() - customResW := util.CustomResponseWriter{ResponseWriter: rw} - customResW.WriteHeader(201) - - lock, err := tryLockBlob(con, &blobInfo) - assert.Nil(t, err) - blobInfo.DigestLock = lock - - *req = *(req.WithContext(context.WithValue(req.Context(), util.BBInfokKey, &blobInfo))) - - bi := NewMountBlobInterceptor(&blobInfo) - assert.NotNil(t, bi) - - bi.HandleResponse(customResW, req) - -} diff --git a/src/core/middlewares/sizequota/patchblob.go b/src/core/middlewares/sizequota/patchblob.go deleted file mode 100644 index c5ce15d63..000000000 --- a/src/core/middlewares/sizequota/patchblob.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright Project Harbor Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sizequota - -import ( - "github.com/goharbor/harbor/src/common/utils/log" - "github.com/goharbor/harbor/src/core/middlewares/util" - "net/http" - "strconv" - "strings" -) - -// PatchBlobInterceptor ... -type PatchBlobInterceptor struct { -} - -// NewPatchBlobInterceptor ... -func NewPatchBlobInterceptor() *PatchBlobInterceptor { - return &PatchBlobInterceptor{} -} - -// HandleRequest do nothing for patch blob, just let the request to proxy. -func (pbi *PatchBlobInterceptor) HandleRequest(req *http.Request) error { - return nil -} - -// HandleResponse record the upload process with Range attribute, set it into redis with UUID as the key -func (pbi *PatchBlobInterceptor) HandleResponse(rw util.CustomResponseWriter, req *http.Request) { - if rw.Status() != http.StatusAccepted { - return - } - - con, err := util.GetRegRedisCon() - if err != nil { - log.Error(err) - return - } - defer con.Close() - - uuid := rw.Header().Get("Docker-Upload-UUID") - if uuid == "" { - log.Errorf("no UUID in the patch blob response, the request path %s ", req.URL.Path) - return - } - - // Range: Range indicating the current progress of the upload. - // https://github.com/opencontainers/distribution-spec/blob/master/spec.md#get-blob-upload - patchRange := rw.Header().Get("Range") - if uuid == "" { - log.Errorf("no Range in the patch blob response, the request path %s ", req.URL.Path) - return - } - - endRange := strings.Split(patchRange, "-")[1] - size, err := strconv.ParseInt(endRange, 10, 64) - // docker registry did '-1' in the response - if size > 0 { - size = size + 1 - } - if err != nil { - log.Error(err) - return - } - success, err := util.SetBunkSize(con, uuid, size) - if err != nil { - log.Error(err) - return - } - if !success { - // ToDo discuss what to do here. - log.Warningf(" T_T: Fail to set bunk: %s size: %d in redis, it causes unable to set correct quota for the artifact.", uuid, size) - } - return -} diff --git a/src/core/middlewares/sizequota/patchblob_test.go b/src/core/middlewares/sizequota/patchblob_test.go deleted file mode 100644 index 843b505c1..000000000 --- a/src/core/middlewares/sizequota/patchblob_test.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright Project Harbor Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sizequota - -import ( - "github.com/goharbor/harbor/src/core/middlewares/util" - "github.com/stretchr/testify/assert" - "net/http" - "net/http/httptest" - "testing" -) - -func TestNewPatchBlobInterceptor(t *testing.T) { - bi := NewPatchBlobInterceptor() - assert.NotNil(t, bi) -} - -func TestPatchBlobHandleRequest(t *testing.T) { - req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil) - bi := NewPatchBlobInterceptor() - assert.Nil(t, bi.HandleRequest(req)) -} - -func TestPatchBlobHandleResponse(t *testing.T) { - req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil) - rw := httptest.NewRecorder() - customResW := util.CustomResponseWriter{ResponseWriter: rw} - customResW.WriteHeader(400) - NewPatchBlobInterceptor().HandleResponse(customResW, req) -} diff --git a/src/core/middlewares/sizequota/putblob.go b/src/core/middlewares/sizequota/putblob.go deleted file mode 100644 index e2e75b8b3..000000000 --- a/src/core/middlewares/sizequota/putblob.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright Project Harbor Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sizequota - -import ( - "context" - "errors" - "github.com/goharbor/harbor/src/common/utils/log" - "github.com/goharbor/harbor/src/core/middlewares/util" - "github.com/opencontainers/go-digest" - "net/http" -) - -// PutBlobInterceptor ... -type PutBlobInterceptor struct { - blobInfo *util.BlobInfo -} - -// NewPutBlobInterceptor ... -func NewPutBlobInterceptor(blobInfo *util.BlobInfo) *PutBlobInterceptor { - return &PutBlobInterceptor{ - blobInfo: blobInfo, - } -} - -// HandleRequest ... -func (pbi *PutBlobInterceptor) HandleRequest(req *http.Request) error { - // the redis connection will be closed in the put response. - con, err := util.GetRegRedisCon() - if err != nil { - return err - } - - defer func() { - if pbi.blobInfo.UUID != "" { - _, err := rmBlobUploadUUID(con, pbi.blobInfo.UUID) - if err != nil { - log.Warningf("error occurred when remove UUID for blob, %v", err) - } - } - }() - - dgstStr := req.FormValue("digest") - if dgstStr == "" { - return errors.New("blob digest missing") - } - dgst, err := digest.Parse(dgstStr) - if err != nil { - return errors.New("blob digest parsing failed") - } - - pbi.blobInfo.Digest = dgst.String() - pbi.blobInfo.UUID = getUUID(req.URL.Path) - size, err := util.GetBlobSize(con, pbi.blobInfo.UUID) - if err != nil { - return err - } - pbi.blobInfo.Size = size - if err := requireQuota(con, pbi.blobInfo); err != nil { - return err - } - *req = *(req.WithContext(context.WithValue(req.Context(), util.BBInfokKey, pbi.blobInfo))) - return nil -} - -// HandleResponse ... -func (pbi *PutBlobInterceptor) HandleResponse(rw util.CustomResponseWriter, req *http.Request) { - if err := HandleBlobCommon(rw, req); err != nil { - log.Error(err) - } -} diff --git a/src/core/middlewares/sizequota/putblob_test.go b/src/core/middlewares/sizequota/putblob_test.go deleted file mode 100644 index 847623c56..000000000 --- a/src/core/middlewares/sizequota/putblob_test.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright Project Harbor Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sizequota - -import ( - "context" - "fmt" - "github.com/garyburd/redigo/redis" - "github.com/goharbor/harbor/src/core/middlewares/util" - "github.com/stretchr/testify/assert" - "net/http" - "net/http/httptest" - "testing" - "time" -) - -func TestNewPutBlobInterceptor(t *testing.T) { - blobinfo := &util.BlobInfo{} - blobinfo.Repository = "TestNewPutBlobInterceptor/latest" - - bi := NewPutBlobInterceptor(blobinfo) - assert.NotNil(t, bi) -} - -func TestPutBlobHandleRequest(t *testing.T) { - req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil) - blobinfo := &util.BlobInfo{} - blobinfo.Repository = "TestPutBlobHandleRequest/latest" - - bi := NewPutBlobInterceptor(blobinfo) - assert.NotNil(t, bi.HandleRequest(req)) -} - -func TestPutBlobHandleResponse(t *testing.T) { - con, err := redis.Dial( - "tcp", - fmt.Sprintf("%s:%d", getRedisHost(), 6379), - redis.DialConnectTimeout(30*time.Second), - redis.DialReadTimeout(time.Minute+10*time.Second), - redis.DialWriteTimeout(10*time.Second), - ) - assert.Nil(t, err) - defer con.Close() - - req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil) - blobInfo := util.BlobInfo{ - Repository: "TestPutBlobHandleResponse/test", - Digest: "sha256:TestPutBlobHandleResponseabcdf12345678sdfefeg1246", - ContentType: "ContentType", - Size: 101, - Exist: false, - } - - rw := httptest.NewRecorder() - customResW := util.CustomResponseWriter{ResponseWriter: rw} - customResW.WriteHeader(201) - - lock, err := tryLockBlob(con, &blobInfo) - assert.Nil(t, err) - blobInfo.DigestLock = lock - - *req = *(req.WithContext(context.WithValue(req.Context(), util.BBInfokKey, &blobInfo))) - - bi := NewPutBlobInterceptor(&blobInfo) - assert.NotNil(t, bi) - - bi.HandleResponse(customResW, req) -} diff --git a/src/core/middlewares/sizequota/putmanifest.go b/src/core/middlewares/sizequota/putmanifest.go deleted file mode 100644 index 76d87044a..000000000 --- a/src/core/middlewares/sizequota/putmanifest.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright Project Harbor Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sizequota - -import ( - "bytes" - "context" - "fmt" - "github.com/docker/distribution" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/manifest/schema2" - "github.com/goharbor/harbor/src/common/utils/log" - "github.com/goharbor/harbor/src/core/middlewares/util" - "io/ioutil" - "net/http" - "strings" -) - -// PutManifestInterceptor ... -type PutManifestInterceptor struct { - blobInfo *util.BlobInfo - mfInfo *util.MfInfo -} - -// NewPutManifestInterceptor ... -func NewPutManifestInterceptor(blobInfo *util.BlobInfo, mfInfo *util.MfInfo) *PutManifestInterceptor { - return &PutManifestInterceptor{ - blobInfo: blobInfo, - mfInfo: mfInfo, - } -} - -// HandleRequest ... -func (pmi *PutManifestInterceptor) HandleRequest(req *http.Request) error { - mediaType := req.Header.Get("Content-Type") - if mediaType == schema1.MediaTypeManifest || - mediaType == schema1.MediaTypeSignedManifest || - mediaType == schema2.MediaTypeManifest { - - con, err := util.GetRegRedisCon() - if err != nil { - log.Infof("failed to get registry redis connection, %v", err) - return err - } - - data, err := ioutil.ReadAll(req.Body) - if err != nil { - log.Warningf("Error occurred when to copy manifest body %v", err) - return err - } - req.Body = ioutil.NopCloser(bytes.NewBuffer(data)) - manifest, desc, err := distribution.UnmarshalManifest(mediaType, data) - if err != nil { - log.Warningf("Error occurred when to Unmarshal Manifest %v", err) - return err - } - projectID, err := util.GetProjectID(strings.Split(pmi.mfInfo.Repository, "/")[0]) - if err != nil { - log.Warningf("Error occurred when to get project ID %v", err) - return err - } - - pmi.mfInfo.ProjectID = projectID - pmi.mfInfo.Refrerence = manifest.References() - pmi.mfInfo.Digest = desc.Digest.String() - pmi.blobInfo.ProjectID = projectID - pmi.blobInfo.Digest = desc.Digest.String() - pmi.blobInfo.Size = desc.Size - pmi.blobInfo.ContentType = mediaType - - if err := requireQuota(con, pmi.blobInfo); err != nil { - return err - } - - *req = *(req.WithContext(context.WithValue(req.Context(), util.MFInfokKey, pmi.mfInfo))) - *req = *(req.WithContext(context.WithValue(req.Context(), util.BBInfokKey, pmi.blobInfo))) - - return nil - } - - return fmt.Errorf("unsupported content type for manifest: %s", mediaType) -} - -// HandleResponse ... -func (pmi *PutManifestInterceptor) HandleResponse(rw util.CustomResponseWriter, req *http.Request) { - if err := HandleBlobCommon(rw, req); err != nil { - log.Error(err) - return - } -} diff --git a/src/core/middlewares/sizequota/putmanifest_test.go b/src/core/middlewares/sizequota/putmanifest_test.go deleted file mode 100644 index dc6b91098..000000000 --- a/src/core/middlewares/sizequota/putmanifest_test.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright Project Harbor Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sizequota - -import ( - "context" - "fmt" - "github.com/garyburd/redigo/redis" - "github.com/goharbor/harbor/src/core/middlewares/util" - "github.com/stretchr/testify/assert" - "net/http" - "net/http/httptest" - "testing" - "time" -) - -func TestNewPutManifestInterceptor(t *testing.T) { - blobinfo := &util.BlobInfo{} - blobinfo.Repository = "TestNewPutManifestInterceptor/latest" - - mfinfo := &util.MfInfo{ - Repository: "TestNewPutManifestInterceptor", - } - - mi := NewPutManifestInterceptor(blobinfo, mfinfo) - assert.NotNil(t, mi) -} - -func TestPutManifestHandleRequest(t *testing.T) { - req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil) - blobinfo := &util.BlobInfo{} - blobinfo.Repository = "TestPutManifestHandleRequest/latest" - - mfinfo := &util.MfInfo{ - Repository: "TestPutManifestHandleRequest", - } - - mi := NewPutManifestInterceptor(blobinfo, mfinfo) - assert.NotNil(t, mi.HandleRequest(req)) -} - -func TestPutManifestHandleResponse(t *testing.T) { - con, err := redis.Dial( - "tcp", - fmt.Sprintf("%s:%d", getRedisHost(), 6379), - redis.DialConnectTimeout(30*time.Second), - redis.DialReadTimeout(time.Minute+10*time.Second), - redis.DialWriteTimeout(10*time.Second), - ) - assert.Nil(t, err) - defer con.Close() - - req, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil) - blobInfo := util.BlobInfo{ - Repository: "TestPutManifestandleResponse/test", - Digest: "sha256:TestPutManifestandleResponseabcdf12345678sdfefeg1246", - ContentType: "ContentType", - Size: 101, - Exist: false, - } - - mfinfo := util.MfInfo{ - Repository: "TestPutManifestandleResponse", - } - - rw := httptest.NewRecorder() - customResW := util.CustomResponseWriter{ResponseWriter: rw} - customResW.WriteHeader(201) - - lock, err := tryLockBlob(con, &blobInfo) - assert.Nil(t, err) - blobInfo.DigestLock = lock - - *req = *(req.WithContext(context.WithValue(req.Context(), util.BBInfokKey, &blobInfo))) - - bi := NewPutManifestInterceptor(&blobInfo, &mfinfo) - assert.NotNil(t, bi) - - bi.HandleResponse(customResW, req) -} diff --git a/src/core/middlewares/sizequota/util.go b/src/core/middlewares/sizequota/util.go new file mode 100644 index 000000000..edcf92631 --- /dev/null +++ b/src/core/middlewares/sizequota/util.go @@ -0,0 +1,330 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sizequota + +import ( + "errors" + "fmt" + "net/http" + "regexp" + "strconv" + "strings" + "time" + + "github.com/garyburd/redigo/redis" + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils" + "github.com/goharbor/harbor/src/core/middlewares/util" + "github.com/goharbor/harbor/src/pkg/types" + "github.com/opencontainers/go-digest" +) + +var ( + blobUploadURLRe = regexp.MustCompile(`^/v2/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)+)blobs/uploads/([a-zA-Z0-9-_.=]+)/?$`) + initiateBlobUploadURLRe = regexp.MustCompile(`^/v2/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)+)blobs/uploads/?$`) +) + +// parseUploadedBlobSize parse the blob stream upload response and return the size blob uploaded +func parseUploadedBlobSize(w http.ResponseWriter) (int64, error) { + // Range: Range indicating the current progress of the upload. + // https://github.com/opencontainers/distribution-spec/blob/master/spec.md#get-blob-upload + r := w.Header().Get("Range") + + end := strings.Split(r, "-")[1] + size, err := strconv.ParseInt(end, 10, 64) + if err != nil { + return 0, err + } + + // docker registry did '-1' in the response + if size > 0 { + size = size + 1 + } + + return size, nil +} + +// setUploadedBlobSize update the size of stream upload blob +func setUploadedBlobSize(uuid string, size int64) (bool, error) { + conn, err := util.GetRegRedisCon() + if err != nil { + return false, err + } + defer conn.Close() + + key := fmt.Sprintf("upload:%s:size", uuid) + reply, err := redis.String(conn.Do("SET", key, size)) + if err != nil { + return false, err + } + return reply == "OK", nil + +} + +// getUploadedBlobSize returns the size of stream upload blob +func getUploadedBlobSize(uuid string) (int64, error) { + conn, err := util.GetRegRedisCon() + if err != nil { + return 0, err + } + defer conn.Close() + + key := fmt.Sprintf("upload:%s:size", uuid) + size, err := redis.Int64(conn.Do("GET", key)) + if err != nil { + return 0, err + } + + return size, nil +} + +// parseBlobSize returns blob size from blob upload complete request +func parseBlobSize(req *http.Request, uuid string) (int64, error) { + size, err := strconv.ParseInt(req.Header.Get("Content-Length"), 10, 64) + if err == nil && size != 0 { + return size, nil + } + + return getUploadedBlobSize(uuid) +} + +// match returns true if request method equal method and path match re +func match(req *http.Request, method string, re *regexp.Regexp) bool { + return req.Method == method && re.MatchString(req.URL.Path) +} + +// parseBlobInfoFromComplete returns blob info from blob upload complete request +func parseBlobInfoFromComplete(req *http.Request) (*util.BlobInfo, error) { + if !match(req, http.MethodPut, blobUploadURLRe) { + return nil, fmt.Errorf("not match url %s for blob upload complete", req.URL.Path) + } + + s := blobUploadURLRe.FindStringSubmatch(req.URL.Path) + repository, uuid := s[1][:len(s[1])-1], s[2] + + projectName, _ := utils.ParseRepository(repository) + project, err := dao.GetProjectByName(projectName) + if err != nil { + return nil, fmt.Errorf("failed to get project %s, error: %v", projectName, err) + } + if project == nil { + return nil, fmt.Errorf("project %s not found", projectName) + } + + dgt, err := digest.Parse(req.FormValue("digest")) + if err != nil { + return nil, fmt.Errorf("blob digest invalid for upload %s", uuid) + } + + size, err := parseBlobSize(req, uuid) + if err != nil { + return nil, fmt.Errorf("failed to get content length of blob upload %s, error: %v", uuid, err) + } + + return &util.BlobInfo{ + ProjectID: project.ProjectID, + Repository: repository, + Digest: dgt.String(), + Size: size, + }, nil +} + +// parseBlobInfoFromManifest returns blob info from put the manifest request +func parseBlobInfoFromManifest(req *http.Request) (*util.BlobInfo, error) { + info, ok := util.ManifestInfoFromContext(req.Context()) + if !ok { + manifest, err := util.ParseManifestInfo(req) + if err != nil { + return nil, err + } + + info = manifest + + // replace the request with manifest info + *req = *(req.WithContext(util.NewManifestInfoContext(req.Context(), info))) + } + + return &util.BlobInfo{ + ProjectID: info.ProjectID, + Repository: info.Repository, + Digest: info.Descriptor.Digest.String(), + Size: info.Descriptor.Size, + ContentType: info.Descriptor.MediaType, + }, nil +} + +// parseBlobInfoFromMount returns blob info from blob mount request +func parseBlobInfoFromMount(req *http.Request) (*util.BlobInfo, error) { + if !match(req, http.MethodPost, initiateBlobUploadURLRe) { + return nil, fmt.Errorf("not match url %s for mount blob", req.URL.Path) + } + + if req.FormValue("mount") == "" || req.FormValue("from") == "" { + return nil, fmt.Errorf("not match url %s for mount blob", req.URL.Path) + } + + dgt, err := digest.Parse(req.FormValue("mount")) + if err != nil { + return nil, errors.New("mount must be digest") + } + + s := initiateBlobUploadURLRe.FindStringSubmatch(req.URL.Path) + repository := strings.TrimSuffix(s[1], "/") + + projectName, _ := utils.ParseRepository(repository) + project, err := dao.GetProjectByName(projectName) + if err != nil { + return nil, fmt.Errorf("failed to get project %s, error: %v", projectName, err) + } + if project == nil { + return nil, fmt.Errorf("project %s not found", projectName) + } + + blob, err := dao.GetBlob(dgt.String()) + if err != nil { + return nil, fmt.Errorf("failed to get blob %s, error: %v", dgt.String(), err) + } + if blob == nil { + return nil, fmt.Errorf("the blob in the mount request with digest: %s doesn't exist", dgt.String()) + } + + return &util.BlobInfo{ + ProjectID: project.ProjectID, + Repository: repository, + Digest: dgt.String(), + Size: blob.Size, + }, nil +} + +// getBlobInfoParser return parse blob info function for request +// returns parseBlobInfoFromComplete when request match PUT /v2//blobs/uploads/?digest= +// returns parseBlobInfoFromMount when request match POST /v2//blobs/uploads/?mount=&from= +func getBlobInfoParser(req *http.Request) func(*http.Request) (*util.BlobInfo, error) { + if match(req, http.MethodPut, blobUploadURLRe) { + if req.FormValue("digest") != "" { + return parseBlobInfoFromComplete + } + } + + if match(req, http.MethodPost, initiateBlobUploadURLRe) { + if req.FormValue("mount") != "" && req.FormValue("from") != "" { + return parseBlobInfoFromMount + } + } + + return nil +} + +// computeResourcesForBlob returns storage required for blob, no storage required if blob exists in project +func computeResourcesForBlob(req *http.Request) (types.ResourceList, error) { + info, ok := util.BlobInfoFromContext(req.Context()) + if !ok { + return nil, errors.New("blob info missing") + } + + exist, err := info.BlobExists() + if err != nil { + return nil, err + } + + if exist { + return nil, nil + } + + return types.ResourceList{types.ResourceStorage: info.Size}, nil +} + +// computeResourcesForManifestCreation returns storage resource required for manifest +// no storage required if manifest exists in project +// the sum size of manifest itself and blobs not in project will return if manifest not exists in project +func computeResourcesForManifestCreation(req *http.Request) (types.ResourceList, error) { + info, ok := util.ManifestInfoFromContext(req.Context()) + if !ok { + return nil, errors.New("manifest info missing") + } + + exist, err := info.ManifestExists() + if err != nil { + return nil, err + } + + // manifest exist in project, so no storage quota required + if exist { + return nil, nil + } + + blobs, err := info.GetBlobsNotInProject() + if err != nil { + return nil, err + } + + size := info.Descriptor.Size + + for _, blob := range blobs { + size += blob.Size + } + + return types.ResourceList{types.ResourceStorage: size}, nil +} + +// computeResourcesForManifestDeletion returns storage resource will be released when manifest deleted +// then result will be the sum of manifest itself and blobs which will not be used by other manifests of project +func computeResourcesForManifestDeletion(req *http.Request) (types.ResourceList, error) { + info, ok := util.ManifestInfoFromContext(req.Context()) + if !ok { + return nil, errors.New("manifest info missing") + } + + blobs, err := dao.GetExclusiveBlobs(info.ProjectID, info.Repository, info.Digest) + if err != nil { + return nil, err + } + + info.ExclusiveBlobs = blobs + + blob, err := dao.GetBlob(info.Digest) + if err != nil { + return nil, err + } + + // manifest size will always be released + size := blob.Size + + for _, blob := range blobs { + size = size + blob.Size + } + + return types.ResourceList{types.ResourceStorage: size}, nil +} + +// syncBlobInfoToProject create the blob and add it to project +func syncBlobInfoToProject(info *util.BlobInfo) error { + _, blob, err := dao.GetOrCreateBlob(&models.Blob{ + Digest: info.Digest, + ContentType: info.ContentType, + Size: info.Size, + CreationTime: time.Now(), + }) + if err != nil { + return err + } + + if _, err := dao.AddBlobToProject(blob.ID, info.ProjectID); err != nil { + return err + } + + return nil +} diff --git a/src/core/middlewares/util/util.go b/src/core/middlewares/util/util.go index 82491161f..7b8d2839e 100644 --- a/src/core/middlewares/util/util.go +++ b/src/core/middlewares/util/util.go @@ -15,51 +15,49 @@ package util import ( + "bytes" "context" "encoding/json" - "errors" "fmt" + "io/ioutil" "net/http" "net/http/httptest" "os" "regexp" - "strconv" "strings" + "sync" "time" "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" "github.com/garyburd/redigo/redis" "github.com/goharbor/harbor/src/common/dao" "github.com/goharbor/harbor/src/common/models" - "github.com/goharbor/harbor/src/common/quota" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/common/utils/clair" "github.com/goharbor/harbor/src/common/utils/log" - common_redis "github.com/goharbor/harbor/src/common/utils/redis" "github.com/goharbor/harbor/src/core/config" "github.com/goharbor/harbor/src/core/promgr" "github.com/goharbor/harbor/src/pkg/scan/whitelist" + "github.com/opencontainers/go-digest" ) type contextKey string -// ErrRequireQuota ... -var ErrRequireQuota = errors.New("cannot get quota on project for request") - const ( - manifestURLPattern = `^/v2/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)+)manifests/([\w][\w.:-]{0,127})` - blobURLPattern = `^/v2/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)+)blobs/uploads/` - - chartVersionInfoKey = contextKey("ChartVersionInfo") - // ImageInfoCtxKey the context key for image information ImageInfoCtxKey = contextKey("ImageInfo") // TokenUsername ... // TODO: temp solution, remove after vmware/harbor#2242 is resolved. TokenUsername = "harbor-core" - // MFInfokKey the context key for image tag redis lock - MFInfokKey = contextKey("ManifestInfo") - // BBInfokKey the context key for image tag redis lock - BBInfokKey = contextKey("BlobInfo") + + // blobInfoKey the context key for blob info + blobInfoKey = contextKey("BlobInfo") + // chartVersionInfoKey the context key for chart version info + chartVersionInfoKey = contextKey("ChartVersionInfo") + // manifestInfoKey the context key for manifest info + manifestInfoKey = contextKey("ManifestInfo") // DialConnectionTimeout ... DialConnectionTimeout = 30 * time.Second @@ -69,6 +67,10 @@ const ( DialWriteTimeout = 10 * time.Second ) +var ( + manifestURLRe = regexp.MustCompile(`^/v2/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)+)manifests/([\w][\w.:-]{0,127})`) +) + // ChartVersionInfo ... type ChartVersionInfo struct { ProjectID int64 @@ -77,6 +79,13 @@ type ChartVersionInfo struct { Version string } +// MutexKey returns mutex key of the chart version +func (info *ChartVersionInfo) MutexKey(suffix ...string) string { + a := []string{"quota", info.Namespace, "chart", info.ChartName, "version", info.Version} + + return strings.Join(append(a, suffix...), ":") +} + // ImageInfo ... type ImageInfo struct { Repository string @@ -87,46 +96,147 @@ type ImageInfo struct { // BlobInfo ... type BlobInfo struct { - UUID string ProjectID int64 ContentType string Size int64 Repository string - Tag string + Digest string - // Exist is to index the existing of the manifest in DB. If false, it's an new image for uploading. - Exist bool - - Digest string - DigestLock *common_redis.Mutex - // Quota is the resource applied for the manifest upload request. - Quota *quota.ResourceList + blobExist bool + blobExistErr error + blobExistOnce sync.Once } -// MfInfo ... -type MfInfo struct { +// BlobExists returns true when blob exists in the project +func (info *BlobInfo) BlobExists() (bool, error) { + info.blobExistOnce.Do(func() { + info.blobExist, info.blobExistErr = dao.HasBlobInProject(info.ProjectID, info.Digest) + }) + + return info.blobExist, info.blobExistErr +} + +// MutexKey returns mutex key of the blob +func (info *BlobInfo) MutexKey(suffix ...string) string { + projectName, _ := utils.ParseRepository(info.Repository) + a := []string{"quota", projectName, "blob", info.Digest} + + return strings.Join(append(a, suffix...), ":") +} + +// ManifestInfo ... +type ManifestInfo struct { // basic information of a manifest ProjectID int64 Repository string Tag string Digest string - // Exist is to index the existing of the manifest in DB. If false, it's an new image for uploading. - Exist bool + References []distribution.Descriptor + Descriptor distribution.Descriptor - // ArtifactID is the ID of the artifact which query by repository and tag - ArtifactID int64 + // manifestExist is to index the existing of the manifest in DB by (repository, digest) + manifestExist bool + manifestExistErr error + manifestExistOnce sync.Once - // DigestChanged true means the manifest exists but digest is changed. - // Probably it's a new image with existing repo/tag name or overwrite. - DigestChanged bool + // artifact the artifact indexed by (repository, tag) in DB + artifact *models.Artifact + artifactErr error + artifactOnce sync.Once - // used to block multiple push on same image. - TagLock *common_redis.Mutex - Refrerence []distribution.Descriptor + // ExclusiveBlobs include the blobs that belong to the manifest only + // and exclude the blobs that shared by other manifests in the same repo(project/repository). + ExclusiveBlobs []*models.Blob +} - // Quota is the resource applied for the manifest upload request. - Quota *quota.ResourceList +// MutexKey returns mutex key of the manifest +func (info *ManifestInfo) MutexKey(suffix ...string) string { + projectName, _ := utils.ParseRepository(info.Repository) + var a []string + + if info.Tag != "" { + // tag not empty happened in PUT /v2//manifests/ + // lock by to tag to compute the count resource required by quota + a = []string{"quota", projectName, "manifest", info.Tag} + } else { + a = []string{"quota", projectName, "manifest", info.Digest} + } + + return strings.Join(append(a, suffix...), ":") +} + +// BlobMutexKey returns mutex key of the blob in manifest +func (info *ManifestInfo) BlobMutexKey(blob *models.Blob, suffix ...string) string { + projectName, _ := utils.ParseRepository(info.Repository) + a := []string{"quota", projectName, "blob", blob.Digest} + + return strings.Join(append(a, suffix...), ":") +} + +// GetBlobsNotInProject returns blobs of the manifest which not in the project +func (info *ManifestInfo) GetBlobsNotInProject() ([]*models.Blob, error) { + var digests []string + for _, reference := range info.References { + digests = append(digests, reference.Digest.String()) + } + + blobs, err := dao.GetBlobsNotInProject(info.ProjectID, digests...) + if err != nil { + return nil, err + } + + return blobs, nil +} + +func (info *ManifestInfo) fetchArtifact() (*models.Artifact, error) { + info.artifactOnce.Do(func() { + info.artifact, info.artifactErr = dao.GetArtifact(info.Repository, info.Tag) + }) + + return info.artifact, info.artifactErr +} + +// IsNewTag returns true if the tag of the manifest not exists in project +func (info *ManifestInfo) IsNewTag() bool { + artifact, _ := info.fetchArtifact() + + return artifact == nil +} + +// Artifact returns artifact of the manifest +func (info *ManifestInfo) Artifact() *models.Artifact { + result := &models.Artifact{ + PID: info.ProjectID, + Repo: info.Repository, + Tag: info.Tag, + Digest: info.Digest, + Kind: "Docker-Image", + } + + if artifact, _ := info.fetchArtifact(); artifact != nil { + result.ID = artifact.ID + result.CreationTime = artifact.CreationTime + result.PushTime = time.Now() + } + + return result +} + +// ManifestExists returns true if manifest exist in repository +func (info *ManifestInfo) ManifestExists() (bool, error) { + info.manifestExistOnce.Do(func() { + total, err := dao.GetTotalOfArtifacts(&models.ArtifactQuery{ + PID: info.ProjectID, + Repo: info.Repository, + Digest: info.Digest, + }) + + info.manifestExist = total > 0 + info.manifestExistErr = err + }) + + return info.manifestExist, info.manifestExistErr } // JSONError wraps a concrete Code and Message, it's readable for docker deamon. @@ -156,12 +266,7 @@ func MarshalError(code, msg string) string { // MatchManifestURL ... func MatchManifestURL(req *http.Request) (bool, string, string) { - re, err := regexp.Compile(manifestURLPattern) - if err != nil { - log.Errorf("error to match manifest url, %v", err) - return false, "", "" - } - s := re.FindStringSubmatch(req.URL.Path) + s := manifestURLRe.FindStringSubmatch(req.URL.Path) if len(s) == 3 { s[1] = strings.TrimSuffix(s[1], "/") return true, s[1], s[2] @@ -169,42 +274,6 @@ func MatchManifestURL(req *http.Request) (bool, string, string) { return false, "", "" } -// MatchPutBlobURL ... -func MatchPutBlobURL(req *http.Request) (bool, string) { - if req.Method != http.MethodPut { - return false, "" - } - re, err := regexp.Compile(blobURLPattern) - if err != nil { - log.Errorf("error to match put blob url, %v", err) - return false, "" - } - s := re.FindStringSubmatch(req.URL.Path) - if len(s) == 2 { - s[1] = strings.TrimSuffix(s[1], "/") - return true, s[1] - } - return false, "" -} - -// MatchPatchBlobURL ... -func MatchPatchBlobURL(req *http.Request) (bool, string) { - if req.Method != http.MethodPatch { - return false, "" - } - re, err := regexp.Compile(blobURLPattern) - if err != nil { - log.Errorf("error to match put blob url, %v", err) - return false, "" - } - s := re.FindStringSubmatch(req.URL.Path) - if len(s) == 2 { - s[1] = strings.TrimSuffix(s[1], "/") - return true, s[1] - } - return false, "" -} - // MatchPullManifest checks if the request looks like a request to pull manifest. If it is returns the image and tag/sha256 digest as 2nd and 3rd return values func MatchPullManifest(req *http.Request) (bool, string, string) { if req.Method != http.MethodGet { @@ -221,31 +290,21 @@ func MatchPushManifest(req *http.Request) (bool, string, string) { return MatchManifestURL(req) } -// MatchMountBlobURL POST /v2//blobs/uploads/?mount=&from= -// If match, will return repo, mount and from as the 2nd, 3th and 4th. -func MatchMountBlobURL(req *http.Request) (bool, string, string, string) { - if req.Method != http.MethodPost { - return false, "", "", "" +// MatchDeleteManifest checks if the request +func MatchDeleteManifest(req *http.Request) (match bool, repository string, reference string) { + if req.Method != http.MethodDelete { + return } - re, err := regexp.Compile(blobURLPattern) - if err != nil { - log.Errorf("error to match post blob url, %v", err) - return false, "", "", "" + + match, repository, reference = MatchManifestURL(req) + if _, err := digest.Parse(reference); err != nil { + // Delete manifest only accept digest as reference + match = false + + return } - s := re.FindStringSubmatch(req.URL.Path) - if len(s) == 2 { - s[1] = strings.TrimSuffix(s[1], "/") - mount := req.FormValue("mount") - if mount == "" { - return false, "", "", "" - } - from := req.FormValue("from") - if from == "" { - return false, "", "", "" - } - return true, s[1], mount, from - } - return false, "", "", "" + + return } // CopyResp ... @@ -318,72 +377,6 @@ func GetPolicyChecker() PolicyChecker { return NewPMSPolicyChecker(config.GlobalProjectMgr) } -// TryRequireQuota ... -func TryRequireQuota(projectID int64, quotaRes *quota.ResourceList) error { - quotaMgr, err := quota.NewManager("project", strconv.FormatInt(projectID, 10)) - if err != nil { - log.Errorf("Error occurred when to new quota manager %v", err) - return err - } - if err := quotaMgr.AddResources(*quotaRes); err != nil { - log.Errorf("cannot get quota for the project resource: %d, err: %v", projectID, err) - return ErrRequireQuota - } - return nil -} - -// TryFreeQuota used to release resource for failure case -func TryFreeQuota(projectID int64, qres *quota.ResourceList) bool { - quotaMgr, err := quota.NewManager("project", strconv.FormatInt(projectID, 10)) - if err != nil { - log.Errorf("Error occurred when to new quota manager %v", err) - return false - } - - if err := quotaMgr.SubtractResources(*qres); err != nil { - log.Errorf("cannot release quota for the project resource: %d, err: %v", projectID, err) - return false - } - return true -} - -// GetBlobSize blob size with UUID in redis -func GetBlobSize(conn redis.Conn, uuid string) (int64, error) { - exists, err := redis.Int(conn.Do("EXISTS", uuid)) - if err != nil { - return 0, err - } - if exists == 1 { - size, err := redis.Int64(conn.Do("GET", uuid)) - if err != nil { - return 0, err - } - return size, nil - } - return 0, nil -} - -// SetBunkSize sets the temp size for blob bunk with its uuid. -func SetBunkSize(conn redis.Conn, uuid string, size int64) (bool, error) { - setRes, err := redis.String(conn.Do("SET", uuid, size)) - if err != nil { - return false, err - } - return setRes == "OK", nil -} - -// GetProjectID ... -func GetProjectID(name string) (int64, error) { - project, err := dao.GetProjectByName(name) - if err != nil { - return 0, err - } - if project != nil { - return project.ProjectID, nil - } - return 0, fmt.Errorf("project %s is not found", name) -} - // GetRegRedisCon ... func GetRegRedisCon() (redis.Conn, error) { // FOR UT @@ -406,7 +399,7 @@ func GetRegRedisCon() (redis.Conn, error) { // BlobInfoFromContext returns blob info from context func BlobInfoFromContext(ctx context.Context) (*BlobInfo, bool) { - info, ok := ctx.Value(BBInfokKey).(*BlobInfo) + info, ok := ctx.Value(blobInfoKey).(*BlobInfo) return info, ok } @@ -423,14 +416,14 @@ func ImageInfoFromContext(ctx context.Context) (*ImageInfo, bool) { } // ManifestInfoFromContext returns manifest info from context -func ManifestInfoFromContext(ctx context.Context) (*MfInfo, bool) { - info, ok := ctx.Value(MFInfokKey).(*MfInfo) +func ManifestInfoFromContext(ctx context.Context) (*ManifestInfo, bool) { + info, ok := ctx.Value(manifestInfoKey).(*ManifestInfo) return info, ok } // NewBlobInfoContext returns context with blob info func NewBlobInfoContext(ctx context.Context, info *BlobInfo) context.Context { - return context.WithValue(ctx, BBInfokKey, info) + return context.WithValue(ctx, blobInfoKey, info) } // NewChartVersionInfoContext returns context with blob info @@ -444,6 +437,92 @@ func NewImageInfoContext(ctx context.Context, info *ImageInfo) context.Context { } // NewManifestInfoContext returns context with manifest info -func NewManifestInfoContext(ctx context.Context, info *MfInfo) context.Context { - return context.WithValue(ctx, MFInfokKey, info) +func NewManifestInfoContext(ctx context.Context, info *ManifestInfo) context.Context { + return context.WithValue(ctx, manifestInfoKey, info) +} + +// ParseManifestInfo prase manifest from request +func ParseManifestInfo(req *http.Request) (*ManifestInfo, error) { + match, repository, reference := MatchManifestURL(req) + if !match { + return nil, fmt.Errorf("not match url %s for manifest", req.URL.Path) + } + + var tag string + if _, err := digest.Parse(reference); err != nil { + tag = reference + } + + mediaType := req.Header.Get("Content-Type") + if mediaType != schema1.MediaTypeManifest && + mediaType != schema1.MediaTypeSignedManifest && + mediaType != schema2.MediaTypeManifest { + return nil, fmt.Errorf("unsupported content type for manifest: %s", mediaType) + } + + if req.Body == nil { + return nil, fmt.Errorf("body missing") + } + + body, err := ioutil.ReadAll(req.Body) + if err != nil { + log.Warningf("Error occurred when to copy manifest body %v", err) + return nil, err + } + req.Body = ioutil.NopCloser(bytes.NewBuffer(body)) + + manifest, desc, err := distribution.UnmarshalManifest(mediaType, body) + if err != nil { + log.Warningf("Error occurred when to Unmarshal Manifest %v", err) + return nil, err + } + + projectName, _ := utils.ParseRepository(repository) + project, err := dao.GetProjectByName(projectName) + if err != nil { + return nil, fmt.Errorf("failed to get project %s, error: %v", projectName, err) + } + if project == nil { + return nil, fmt.Errorf("project %s not found", projectName) + } + + return &ManifestInfo{ + ProjectID: project.ProjectID, + Repository: repository, + Tag: tag, + Digest: desc.Digest.String(), + References: manifest.References(), + Descriptor: desc, + }, nil +} + +// ParseManifestInfoFromPath prase manifest from request path +func ParseManifestInfoFromPath(req *http.Request) (*ManifestInfo, error) { + match, repository, reference := MatchManifestURL(req) + if !match { + return nil, fmt.Errorf("not match url %s for manifest", req.URL.Path) + } + + projectName, _ := utils.ParseRepository(repository) + project, err := dao.GetProjectByName(projectName) + if err != nil { + return nil, fmt.Errorf("failed to get project %s, error: %v", projectName, err) + } + if project == nil { + return nil, fmt.Errorf("project %s not found", projectName) + } + + info := &ManifestInfo{ + ProjectID: project.ProjectID, + Repository: repository, + } + + dgt, err := digest.Parse(reference) + if err != nil { + info.Tag = reference + } else { + info.Digest = dgt.String() + } + + return info, nil } diff --git a/src/core/middlewares/util/util_test.go b/src/core/middlewares/util/util_test.go index 736c81a05..e02229ad9 100644 --- a/src/core/middlewares/util/util_test.go +++ b/src/core/middlewares/util/util_test.go @@ -15,33 +15,31 @@ package util import ( - "github.com/goharbor/harbor/src/common" - "github.com/goharbor/harbor/src/common/dao" - "github.com/goharbor/harbor/src/common/models" - notarytest "github.com/goharbor/harbor/src/common/utils/notary/test" - testutils "github.com/goharbor/harbor/src/common/utils/test" - "github.com/goharbor/harbor/src/core/config" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "fmt" - "github.com/garyburd/redigo/redis" - "github.com/goharbor/harbor/src/common/quota" + "bytes" + "encoding/json" "net/http" "net/http/httptest" "os" + "reflect" "testing" - "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema2" + "github.com/goharbor/harbor/src/common" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils" + notarytest "github.com/goharbor/harbor/src/common/utils/notary/test" + testutils "github.com/goharbor/harbor/src/common/utils/test" + "github.com/goharbor/harbor/src/core/config" + "github.com/opencontainers/go-digest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var endpoint = "10.117.4.142" var notaryServer *httptest.Server -const testingRedisHost = "REDIS_HOST" - -var admiralEndpoint = "http://127.0.0.1:8282" -var token = "" - func TestMain(m *testing.M) { testutils.InitDatabaseFromEnv() notaryServer = notarytest.NewNotaryServer(endpoint) @@ -99,56 +97,6 @@ func TestMatchPullManifest(t *testing.T) { assert.Equal("sha256:ca4626b691f57d16ce1576231e4a2e2135554d32e13a85dcff380d51fdd13f6a", tag7) } -func TestMatchPutBlob(t *testing.T) { - assert := assert.New(t) - req1, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/blobs/uploads/67bb4d9b-4dab-4bbe-b726-2e39322b8303?_state=7W3kWkgdr3fTW", nil) - res1, repo1 := MatchPutBlobURL(req1) - assert.True(res1, "%s %v is not a request to put blob", req1.Method, req1.URL) - assert.Equal("library/ubuntu", repo1) - - req2, _ := http.NewRequest("PATCH", "http://127.0.0.1:5000/v2/library/blobs/uploads/67bb4d9b-4dab-4bbe-b726-2e39322b8303?_state=7W3kWkgdr3fTW", nil) - res2, _ := MatchPutBlobURL(req2) - assert.False(res2, "%s %v is a request to put blob", req2.Method, req2.URL) - - req3, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/manifest/67bb4d9b-4dab-4bbe-b726-2e39322b8303?_state=7W3kWkgdr3fTW", nil) - res3, _ := MatchPutBlobURL(req3) - assert.False(res3, "%s %v is not a request to put blob", req3.Method, req3.URL) -} - -func TestMatchMountBlobURL(t *testing.T) { - assert := assert.New(t) - req1, _ := http.NewRequest("POST", "http://127.0.0.1:5000/v2/library/ubuntu/blobs/uploads/?mount=digtest123&from=testrepo", nil) - res1, repo1, mount, from := MatchMountBlobURL(req1) - assert.True(res1, "%s %v is not a request to mount blob", req1.Method, req1.URL) - assert.Equal("library/ubuntu", repo1) - assert.Equal("digtest123", mount) - assert.Equal("testrepo", from) - - req2, _ := http.NewRequest("PATCH", "http://127.0.0.1:5000/v2/library/ubuntu/blobs/uploads/?mount=digtest123&from=testrepo", nil) - res2, _, _, _ := MatchMountBlobURL(req2) - assert.False(res2, "%s %v is a request to mount blob", req2.Method, req2.URL) - - req3, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/blobs/uploads/?mount=digtest123&from=testrepo", nil) - res3, _, _, _ := MatchMountBlobURL(req3) - assert.False(res3, "%s %v is not a request to put blob", req3.Method, req3.URL) -} - -func TestPatchBlobURL(t *testing.T) { - assert := assert.New(t) - req1, _ := http.NewRequest("PATCH", "http://127.0.0.1:5000/v2/library/ubuntu/blobs/uploads/1234-1234-abcd", nil) - res1, repo1 := MatchPatchBlobURL(req1) - assert.True(res1, "%s %v is not a request to patch blob", req1.Method, req1.URL) - assert.Equal("library/ubuntu", repo1) - - req2, _ := http.NewRequest("POST", "http://127.0.0.1:5000/v2/library/ubuntu/blobs/uploads/1234-1234-abcd", nil) - res2, _ := MatchPatchBlobURL(req2) - assert.False(res2, "%s %v is a request to patch blob", req2.Method, req2.URL) - - req3, _ := http.NewRequest("PUT", "http://127.0.0.1:5000/v2/library/ubuntu/blobs/uploads/?mount=digtest123&from=testrepo", nil) - res3, _ := MatchPatchBlobURL(req3) - assert.False(res3, "%s %v is not a request to patch blob", req3.Method, req3.URL) -} - func TestMatchPushManifest(t *testing.T) { assert := assert.New(t) req1, _ := http.NewRequest("POST", "http://127.0.0.1:5000/v2/library/ubuntu/manifests/14.04", nil) @@ -260,83 +208,194 @@ func TestMarshalError(t *testing.T) { assert.Equal("{\"errors\":[{\"code\":\"DENIED\",\"message\":\"The action is denied\",\"detail\":\"The action is denied\"}]}", js2) } -func TestTryRequireQuota(t *testing.T) { - quotaRes := "a.ResourceList{ - quota.ResourceStorage: 100, - } - err := TryRequireQuota(1, quotaRes) - assert.Nil(t, err) -} - -func TestTryFreeQuota(t *testing.T) { - quotaRes := "a.ResourceList{ - quota.ResourceStorage: 1, - } - success := TryFreeQuota(1, quotaRes) - assert.True(t, success) -} - -func TestGetBlobSize(t *testing.T) { - con, err := redis.Dial( - "tcp", - fmt.Sprintf("%s:%d", getRedisHost(), 6379), - redis.DialConnectTimeout(30*time.Second), - redis.DialReadTimeout(time.Minute+10*time.Second), - redis.DialWriteTimeout(10*time.Second), - ) - assert.Nil(t, err) - defer con.Close() - - size, err := GetBlobSize(con, "test-TestGetBlobSize") - assert.Nil(t, err) - assert.Equal(t, size, int64(0)) -} - -func TestSetBunkSize(t *testing.T) { - con, err := redis.Dial( - "tcp", - fmt.Sprintf("%s:%d", getRedisHost(), 6379), - redis.DialConnectTimeout(30*time.Second), - redis.DialReadTimeout(time.Minute+10*time.Second), - redis.DialWriteTimeout(10*time.Second), - ) - assert.Nil(t, err) - defer con.Close() - - size, err := GetBlobSize(con, "TestSetBunkSize") - assert.Nil(t, err) - assert.Equal(t, size, int64(0)) - - _, err = SetBunkSize(con, "TestSetBunkSize", 123) - assert.Nil(t, err) - - size1, err := GetBlobSize(con, "TestSetBunkSize") - assert.Nil(t, err) - assert.Equal(t, size1, int64(123)) -} - -func TestGetProjectID(t *testing.T) { - name := "project_for_TestGetProjectID" - project := models.Project{ - OwnerID: 1, - Name: name, +func makeManifest(configSize int64, layerSizes []int64) schema2.Manifest { + manifest := schema2.Manifest{ + Versioned: manifest.Versioned{SchemaVersion: 2, MediaType: schema2.MediaTypeManifest}, + Config: distribution.Descriptor{ + MediaType: schema2.MediaTypeImageConfig, + Size: configSize, + Digest: digest.FromString(utils.GenerateRandomString()), + }, } - id, err := dao.AddProject(project) - if err != nil { - t.Fatalf("failed to add project: %v", err) + for _, size := range layerSizes { + manifest.Layers = append(manifest.Layers, distribution.Descriptor{ + MediaType: schema2.MediaTypeLayer, + Size: size, + Digest: digest.FromString(utils.GenerateRandomString()), + }) } - idget, err := GetProjectID(name) - assert.Nil(t, err) - assert.Equal(t, id, idget) + return manifest } -func getRedisHost() string { - redisHost := os.Getenv(testingRedisHost) - if redisHost == "" { - redisHost = "127.0.0.1" // for local test +func getDescriptor(manifest schema2.Manifest) distribution.Descriptor { + buf, _ := json.Marshal(manifest) + _, desc, _ := distribution.UnmarshalManifest(manifest.Versioned.MediaType, buf) + return desc +} + +func TestParseManifestInfo(t *testing.T) { + manifest := makeManifest(1, []int64{2, 3, 4}) + + tests := []struct { + name string + req func() *http.Request + want *ManifestInfo + wantErr bool + }{ + { + "ok", + func() *http.Request { + buf, _ := json.Marshal(manifest) + req, _ := http.NewRequest(http.MethodPut, "/v2/library/photon/manifests/latest", bytes.NewReader(buf)) + req.Header.Add("Content-Type", manifest.MediaType) + + return req + }, + &ManifestInfo{ + ProjectID: 1, + Repository: "library/photon", + Tag: "latest", + Digest: getDescriptor(manifest).Digest.String(), + References: manifest.References(), + Descriptor: getDescriptor(manifest), + }, + false, + }, + { + "bad content type", + func() *http.Request { + buf, _ := json.Marshal(manifest) + req, _ := http.NewRequest(http.MethodPut, "/v2/notfound/photon/manifests/latest", bytes.NewReader(buf)) + req.Header.Add("Content-Type", "application/json") + + return req + }, + nil, + true, + }, + { + "bad manifest", + func() *http.Request { + req, _ := http.NewRequest(http.MethodPut, "/v2/notfound/photon/manifests/latest", bytes.NewReader([]byte(""))) + req.Header.Add("Content-Type", schema2.MediaTypeManifest) + + return req + }, + nil, + true, + }, + { + "body missing", + func() *http.Request { + req, _ := http.NewRequest(http.MethodPut, "/v2/notfound/photon/manifests/latest", nil) + req.Header.Add("Content-Type", schema2.MediaTypeManifest) + + return req + }, + nil, + true, + }, + { + "project not found", + func() *http.Request { + + buf, _ := json.Marshal(manifest) + + req, _ := http.NewRequest(http.MethodPut, "/v2/notfound/photon/manifests/latest", bytes.NewReader(buf)) + req.Header.Add("Content-Type", manifest.MediaType) + + return req + }, + nil, + true, + }, + { + "url not match", + func() *http.Request { + buf, _ := json.Marshal(manifest) + req, _ := http.NewRequest(http.MethodPut, "/v2/library/photon/manifest/latest", bytes.NewReader(buf)) + req.Header.Add("Content-Type", manifest.MediaType) + + return req + }, + nil, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ParseManifestInfo(tt.req()) + if (err != nil) != tt.wantErr { + t.Errorf("ParseManifestInfo() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("ParseManifestInfo() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestParseManifestInfoFromPath(t *testing.T) { + mustRequest := func(method, url string) *http.Request { + req, _ := http.NewRequest(method, url, nil) + return req } - return redisHost + type args struct { + req *http.Request + } + tests := []struct { + name string + args args + want *ManifestInfo + wantErr bool + }{ + { + "ok for digest", + args{mustRequest(http.MethodDelete, "/v2/library/photon/manifests/sha256:3e17b60ab9d92d953fb8ebefa25624c0d23fb95f78dde5572285d10158044059")}, + &ManifestInfo{ + ProjectID: 1, + Repository: "library/photon", + Digest: "sha256:3e17b60ab9d92d953fb8ebefa25624c0d23fb95f78dde5572285d10158044059", + }, + false, + }, + { + "ok for tag", + args{mustRequest(http.MethodDelete, "/v2/library/photon/manifests/latest")}, + &ManifestInfo{ + ProjectID: 1, + Repository: "library/photon", + Tag: "latest", + }, + false, + }, + { + "project not found", + args{mustRequest(http.MethodDelete, "/v2/notfound/photon/manifests/latest")}, + nil, + true, + }, + { + "url not match", + args{mustRequest(http.MethodDelete, "/v2/library/photon/manifest/latest")}, + nil, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ParseManifestInfoFromPath(tt.args.req) + if (err != nil) != tt.wantErr { + t.Errorf("ParseManifestInfoFromPath() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("ParseManifestInfoFromPath() = %v, want %v", got, tt.want) + } + }) + } } diff --git a/src/core/router.go b/src/core/router.go index 04fd1a173..39729174a 100755 --- a/src/core/router.go +++ b/src/core/router.go @@ -134,6 +134,7 @@ func initRouters() { beego.Router("/api/internal/syncregistry", &api.InternalAPI{}, "post:SyncRegistry") beego.Router("/api/internal/renameadmin", &api.InternalAPI{}, "post:RenameAdmin") + beego.Router("/api/internal/switchquota", &api.InternalAPI{}, "put:SwitchQuota") // external service that hosted on harbor process: beego.Router("/service/notifications", ®istry.NotificationHandler{}) diff --git a/src/core/service/notifications/jobs/handler.go b/src/core/service/notifications/jobs/handler.go index a5b923aba..cf6706b7c 100755 --- a/src/core/service/notifications/jobs/handler.go +++ b/src/core/service/notifications/jobs/handler.go @@ -33,12 +33,11 @@ import ( var statusMap = map[string]string{ job.JobServiceStatusPending: models.JobPending, + job.JobServiceStatusScheduled: models.JobScheduled, job.JobServiceStatusRunning: models.JobRunning, job.JobServiceStatusStopped: models.JobStopped, - job.JobServiceStatusCancelled: models.JobCanceled, job.JobServiceStatusError: models.JobError, job.JobServiceStatusSuccess: models.JobFinished, - job.JobServiceStatusScheduled: models.JobScheduled, } // Handler handles reqeust on /service/notifications/jobs/*, which listens to the webhook of jobservice. diff --git a/src/core/service/notifications/registry/handler.go b/src/core/service/notifications/registry/handler.go index 351938a45..eb581ef1e 100755 --- a/src/core/service/notifications/registry/handler.go +++ b/src/core/service/notifications/registry/handler.go @@ -112,7 +112,7 @@ func (n *NotificationHandler) Post() { }() } - if !coreutils.WaitForManifestReady(repository, tag, 5) { + if !coreutils.WaitForManifestReady(repository, tag, 6) { log.Errorf("Manifest for image %s:%s is not ready, skip the follow up actions.", repository, tag) return } diff --git a/src/core/utils/utils.go b/src/core/utils/utils.go index 7997227a8..e55f8a010 100644 --- a/src/core/utils/utils.go +++ b/src/core/utils/utils.go @@ -62,14 +62,19 @@ func newRepositoryClient(endpoint, username, repository string) (*registry.Repos // WaitForManifestReady implements exponential sleeep to wait until manifest is ready in registry. // This is a workaround for https://github.com/docker/distribution/issues/2625 func WaitForManifestReady(repository string, tag string, maxRetry int) bool { - // The initial wait interval, hard-coded to 50ms - interval := 50 * time.Millisecond + // The initial wait interval, hard-coded to 80ms, interval will be 80ms,200ms,500ms,1.25s,3.124999936s + interval := 80 * time.Millisecond repoClient, err := NewRepositoryClientForUI("harbor-core", repository) if err != nil { log.Errorf("Failed to create repo client.") return false } for i := 0; i < maxRetry; i++ { + if i != 0 { + log.Warningf("manifest for image %s:%s is not ready, retry after %v", repository, tag, interval) + time.Sleep(interval) + interval = time.Duration(int64(float32(interval) * 2.5)) + } _, exist, err := repoClient.ManifestExist(tag) if err != nil { log.Errorf("Unexpected error when checking manifest existence, image: %s:%s, error: %v", repository, tag, err) @@ -78,9 +83,6 @@ func WaitForManifestReady(repository string, tag string, maxRetry int) bool { if exist { return true } - log.Warningf("manifest for image %s:%s is not ready, retry after %v", repository, tag, interval) - time.Sleep(interval) - interval = interval * 2 } return false } diff --git a/src/core/views/404.tpl b/src/core/views/404.tpl index 88213a5d5..e6d0d6f2e 100644 --- a/src/core/views/404.tpl +++ b/src/core/views/404.tpl @@ -67,7 +67,7 @@ a.underline, .underline{ Page Not Found diff --git a/src/pkg/retention/policy/rule/dayspl/evaluator.go b/src/pkg/retention/policy/rule/dayspl/evaluator.go index 9b2fb34e9..c0fd76256 100644 --- a/src/pkg/retention/policy/rule/dayspl/evaluator.go +++ b/src/pkg/retention/policy/rule/dayspl/evaluator.go @@ -58,12 +58,13 @@ func (e *evaluator) Action() string { func New(params rule.Parameters) rule.Evaluator { if params != nil { if p, ok := params[ParameterN]; ok { - if v, ok := p.(int); ok && v >= 0 { - return &evaluator{n: v} + if v, ok := p.(float64); ok && v >= 0 { + return &evaluator{n: int(v)} } } } - log.Debugf("default parameter %d used for rule %s", DefaultN, TemplateID) + log.Warningf("default parameter %d used for rule %s", DefaultN, TemplateID) + return &evaluator{n: DefaultN} } diff --git a/src/pkg/retention/policy/rule/dayspl/evaluator_test.go b/src/pkg/retention/policy/rule/dayspl/evaluator_test.go index 0c8ba1ec1..a8587ccd8 100644 --- a/src/pkg/retention/policy/rule/dayspl/evaluator_test.go +++ b/src/pkg/retention/policy/rule/dayspl/evaluator_test.go @@ -15,7 +15,7 @@ package dayspl import ( - "strconv" + "fmt" "testing" "time" @@ -36,8 +36,8 @@ func (e *EvaluatorTestSuite) TestNew() { args rule.Parameters expectedN int }{ - {Name: "Valid", args: map[string]rule.Parameter{ParameterN: 5}, expectedN: 5}, - {Name: "Default If Negative", args: map[string]rule.Parameter{ParameterN: -1}, expectedN: DefaultN}, + {Name: "Valid", args: map[string]rule.Parameter{ParameterN: float64(5)}, expectedN: 5}, + {Name: "Default If Negative", args: map[string]rule.Parameter{ParameterN: float64(-1)}, expectedN: DefaultN}, {Name: "Default If Not Set", args: map[string]rule.Parameter{}, expectedN: DefaultN}, {Name: "Default If Wrong Type", args: map[string]rule.Parameter{ParameterN: "foo"}, expectedN: DefaultN}, } @@ -65,7 +65,7 @@ func (e *EvaluatorTestSuite) TestProcess() { } tests := []struct { - n int + n float64 expected int minPullTime int64 }{ @@ -80,7 +80,7 @@ func (e *EvaluatorTestSuite) TestProcess() { } for _, tt := range tests { - e.T().Run(strconv.Itoa(tt.n), func(t *testing.T) { + e.T().Run(fmt.Sprintf("%v", tt.n), func(t *testing.T) { sut := New(map[string]rule.Parameter{ParameterN: tt.n}) result, err := sut.Process(data) diff --git a/src/pkg/retention/policy/rule/daysps/evaluator.go b/src/pkg/retention/policy/rule/daysps/evaluator.go index 2c121dde7..ee4dd436d 100644 --- a/src/pkg/retention/policy/rule/daysps/evaluator.go +++ b/src/pkg/retention/policy/rule/daysps/evaluator.go @@ -58,12 +58,13 @@ func (e *evaluator) Action() string { func New(params rule.Parameters) rule.Evaluator { if params != nil { if p, ok := params[ParameterN]; ok { - if v, ok := p.(int); ok && v >= 0 { - return &evaluator{n: v} + if v, ok := p.(float64); ok && v >= 0 { + return &evaluator{n: int(v)} } } } - log.Debugf("default parameter %d used for rule %s", DefaultN, TemplateID) + log.Warningf("default parameter %d used for rule %s", DefaultN, TemplateID) + return &evaluator{n: DefaultN} } diff --git a/src/pkg/retention/policy/rule/daysps/evaluator_test.go b/src/pkg/retention/policy/rule/daysps/evaluator_test.go index 07c9f9bdd..75287ce4f 100644 --- a/src/pkg/retention/policy/rule/daysps/evaluator_test.go +++ b/src/pkg/retention/policy/rule/daysps/evaluator_test.go @@ -15,7 +15,7 @@ package daysps import ( - "strconv" + "fmt" "testing" "time" @@ -36,8 +36,8 @@ func (e *EvaluatorTestSuite) TestNew() { args rule.Parameters expectedN int }{ - {Name: "Valid", args: map[string]rule.Parameter{ParameterN: 5}, expectedN: 5}, - {Name: "Default If Negative", args: map[string]rule.Parameter{ParameterN: -1}, expectedN: DefaultN}, + {Name: "Valid", args: map[string]rule.Parameter{ParameterN: float64(5)}, expectedN: 5}, + {Name: "Default If Negative", args: map[string]rule.Parameter{ParameterN: float64(-1)}, expectedN: DefaultN}, {Name: "Default If Not Set", args: map[string]rule.Parameter{}, expectedN: DefaultN}, {Name: "Default If Wrong Type", args: map[string]rule.Parameter{ParameterN: "foo"}, expectedN: DefaultN}, } @@ -65,7 +65,7 @@ func (e *EvaluatorTestSuite) TestProcess() { } tests := []struct { - n int + n float64 expected int minPushTime int64 }{ @@ -80,7 +80,7 @@ func (e *EvaluatorTestSuite) TestProcess() { } for _, tt := range tests { - e.T().Run(strconv.Itoa(tt.n), func(t *testing.T) { + e.T().Run(fmt.Sprintf("%v", tt.n), func(t *testing.T) { sut := New(map[string]rule.Parameter{ParameterN: tt.n}) result, err := sut.Process(data) diff --git a/src/pkg/retention/policy/rule/lastx/evaluator.go b/src/pkg/retention/policy/rule/lastx/evaluator.go index 6d98c5c2d..b466f5eda 100644 --- a/src/pkg/retention/policy/rule/lastx/evaluator.go +++ b/src/pkg/retention/policy/rule/lastx/evaluator.go @@ -59,15 +59,15 @@ func (e *evaluator) Action() string { func New(params rule.Parameters) rule.Evaluator { if params != nil { if param, ok := params[ParameterX]; ok { - if v, ok := param.(int); ok && v >= 0 { + if v, ok := param.(float64); ok && v >= 0 { return &evaluator{ - x: v, + x: int(v), } } } } - log.Debugf("default parameter %d used for rule %s", DefaultX, TemplateID) + log.Warningf("default parameter %d used for rule %s", DefaultX, TemplateID) return &evaluator{ x: DefaultX, diff --git a/src/pkg/retention/policy/rule/lastx/evaluator_test.go b/src/pkg/retention/policy/rule/lastx/evaluator_test.go index eafc30f2f..becd79234 100644 --- a/src/pkg/retention/policy/rule/lastx/evaluator_test.go +++ b/src/pkg/retention/policy/rule/lastx/evaluator_test.go @@ -21,8 +21,8 @@ func (e *EvaluatorTestSuite) TestNew() { args rule.Parameters expectedX int }{ - {Name: "Valid", args: map[string]rule.Parameter{ParameterX: 3}, expectedX: 3}, - {Name: "Default If Negative", args: map[string]rule.Parameter{ParameterX: -3}, expectedX: DefaultX}, + {Name: "Valid", args: map[string]rule.Parameter{ParameterX: float64(3)}, expectedX: 3}, + {Name: "Default If Negative", args: map[string]rule.Parameter{ParameterX: float64(-3)}, expectedX: DefaultX}, {Name: "Default If Not Set", args: map[string]rule.Parameter{}, expectedX: DefaultX}, {Name: "Default If Wrong Type", args: map[string]rule.Parameter{}, expectedX: DefaultX}, } @@ -48,7 +48,7 @@ func (e *EvaluatorTestSuite) TestProcess() { } tests := []struct { - days int + days float64 expected int }{ {days: 0, expected: 0}, @@ -62,7 +62,7 @@ func (e *EvaluatorTestSuite) TestProcess() { } for _, tt := range tests { - e.T().Run(fmt.Sprintf("%d days - should keep %d", tt.days, tt.expected), func(t *testing.T) { + e.T().Run(fmt.Sprintf("%v days - should keep %d", tt.days, tt.expected), func(t *testing.T) { e := New(rule.Parameters{ParameterX: tt.days}) result, err := e.Process(data) diff --git a/src/pkg/retention/policy/rule/latestk/evaluator.go b/src/pkg/retention/policy/rule/latestk/evaluator.go index bb1d246de..f6d73599a 100644 --- a/src/pkg/retention/policy/rule/latestk/evaluator.go +++ b/src/pkg/retention/policy/rule/latestk/evaluator.go @@ -65,9 +65,9 @@ func (e *evaluator) Action() string { func New(params rule.Parameters) rule.Evaluator { if params != nil { if param, ok := params[ParameterK]; ok { - if v, ok := param.(int); ok && v >= 0 { + if v, ok := param.(float64); ok && v >= 0 { return &evaluator{ - k: v, + k: int(v), } } } diff --git a/src/pkg/retention/policy/rule/latestk/evaluator_test.go b/src/pkg/retention/policy/rule/latestk/evaluator_test.go index ab2967f51..24b04fb9e 100644 --- a/src/pkg/retention/policy/rule/latestk/evaluator_test.go +++ b/src/pkg/retention/policy/rule/latestk/evaluator_test.go @@ -15,7 +15,7 @@ package latestk import ( - "strconv" + "fmt" "testing" "github.com/goharbor/harbor/src/pkg/retention/policy/rule" @@ -58,7 +58,7 @@ func (e *EvaluatorTestSuite) TestProcess() { {k: 99, expected: len(e.artifacts)}, } for _, tt := range tests { - e.T().Run(strconv.Itoa(tt.k), func(t *testing.T) { + e.T().Run(fmt.Sprintf("%v", tt.k), func(t *testing.T) { sut := &evaluator{k: tt.k} result, err := sut.Process(e.artifacts) @@ -79,8 +79,8 @@ func (e *EvaluatorTestSuite) TestNew() { params rule.Parameters expectedK int }{ - {name: "Valid", params: rule.Parameters{ParameterK: 5}, expectedK: 5}, - {name: "Default If Negative", params: rule.Parameters{ParameterK: -5}, expectedK: DefaultK}, + {name: "Valid", params: rule.Parameters{ParameterK: float64(5)}, expectedK: 5}, + {name: "Default If Negative", params: rule.Parameters{ParameterK: float64(-5)}, expectedK: DefaultK}, {name: "Default If Wrong Type", params: rule.Parameters{ParameterK: "5"}, expectedK: DefaultK}, {name: "Default If Wrong Key", params: rule.Parameters{"n": 5}, expectedK: DefaultK}, {name: "Default If Empty", params: rule.Parameters{}, expectedK: DefaultK}, diff --git a/src/pkg/retention/policy/rule/latestpl/evaluator.go b/src/pkg/retention/policy/rule/latestpl/evaluator.go index 620790a73..bed7b6e4e 100644 --- a/src/pkg/retention/policy/rule/latestpl/evaluator.go +++ b/src/pkg/retention/policy/rule/latestpl/evaluator.go @@ -59,13 +59,13 @@ func (e *evaluator) Action() string { func New(params rule.Parameters) rule.Evaluator { if params != nil { if p, ok := params[ParameterN]; ok { - if v, ok := p.(int); ok && v >= 0 { - return &evaluator{n: v} + if v, ok := p.(float64); ok && v >= 0 { + return &evaluator{n: int(v)} } } } - log.Debugf("default parameter %d used for rule %s", DefaultN, TemplateID) + log.Warningf("default parameter %d used for rule %s", DefaultN, TemplateID) return &evaluator{n: DefaultN} } diff --git a/src/pkg/retention/policy/rule/latestpl/evaluator_test.go b/src/pkg/retention/policy/rule/latestpl/evaluator_test.go index 443481649..69b0605f5 100644 --- a/src/pkg/retention/policy/rule/latestpl/evaluator_test.go +++ b/src/pkg/retention/policy/rule/latestpl/evaluator_test.go @@ -15,8 +15,8 @@ package latestpl import ( + "fmt" "math/rand" - "strconv" "testing" "github.com/goharbor/harbor/src/pkg/retention/policy/rule" @@ -35,8 +35,8 @@ func (e *EvaluatorTestSuite) TestNew() { args rule.Parameters expectedK int }{ - {Name: "Valid", args: map[string]rule.Parameter{ParameterN: 5}, expectedK: 5}, - {Name: "Default If Negative", args: map[string]rule.Parameter{ParameterN: -1}, expectedK: DefaultN}, + {Name: "Valid", args: map[string]rule.Parameter{ParameterN: float64(5)}, expectedK: 5}, + {Name: "Default If Negative", args: map[string]rule.Parameter{ParameterN: float64(-1)}, expectedK: DefaultN}, {Name: "Default If Not Set", args: map[string]rule.Parameter{}, expectedK: DefaultN}, {Name: "Default If Wrong Type", args: map[string]rule.Parameter{ParameterN: "foo"}, expectedK: DefaultN}, } @@ -57,7 +57,7 @@ func (e *EvaluatorTestSuite) TestProcess() { }) tests := []struct { - n int + n float64 expected int minPullTime int64 }{ @@ -69,7 +69,7 @@ func (e *EvaluatorTestSuite) TestProcess() { } for _, tt := range tests { - e.T().Run(strconv.Itoa(tt.n), func(t *testing.T) { + e.T().Run(fmt.Sprintf("%v", tt.n), func(t *testing.T) { ev := New(map[string]rule.Parameter{ParameterN: tt.n}) result, err := ev.Process(data) diff --git a/src/pkg/retention/policy/rule/latestps/evaluator.go b/src/pkg/retention/policy/rule/latestps/evaluator.go index 8ac090b3f..ac000a302 100644 --- a/src/pkg/retention/policy/rule/latestps/evaluator.go +++ b/src/pkg/retention/policy/rule/latestps/evaluator.go @@ -62,15 +62,15 @@ func (e *evaluator) Action() string { func New(params rule.Parameters) rule.Evaluator { if params != nil { if param, ok := params[ParameterK]; ok { - if v, ok := param.(int); ok && v >= 0 { + if v, ok := param.(float64); ok && v >= 0 { return &evaluator{ - k: v, + k: int(v), } } } } - log.Debugf("default parameter %d used for rule %s", DefaultK, TemplateID) + log.Warningf("default parameter %d used for rule %s", DefaultK, TemplateID) return &evaluator{ k: DefaultK, diff --git a/src/pkg/retention/policy/rule/latestps/evaluator_test.go b/src/pkg/retention/policy/rule/latestps/evaluator_test.go index 7136b69d6..6e303c3c4 100644 --- a/src/pkg/retention/policy/rule/latestps/evaluator_test.go +++ b/src/pkg/retention/policy/rule/latestps/evaluator_test.go @@ -1,8 +1,8 @@ package latestps import ( + "fmt" "math/rand" - "strconv" "testing" "github.com/stretchr/testify/suite" @@ -22,8 +22,8 @@ func (e *EvaluatorTestSuite) TestNew() { args rule.Parameters expectedK int }{ - {Name: "Valid", args: map[string]rule.Parameter{ParameterK: 5}, expectedK: 5}, - {Name: "Default If Negative", args: map[string]rule.Parameter{ParameterK: -1}, expectedK: DefaultK}, + {Name: "Valid", args: map[string]rule.Parameter{ParameterK: float64(5)}, expectedK: 5}, + {Name: "Default If Negative", args: map[string]rule.Parameter{ParameterK: float64(-1)}, expectedK: DefaultK}, {Name: "Default If Not Set", args: map[string]rule.Parameter{}, expectedK: DefaultK}, {Name: "Default If Wrong Type", args: map[string]rule.Parameter{ParameterK: "foo"}, expectedK: DefaultK}, } @@ -44,7 +44,7 @@ func (e *EvaluatorTestSuite) TestProcess() { }) tests := []struct { - k int + k float64 expected int }{ {k: 0, expected: 0}, @@ -55,7 +55,7 @@ func (e *EvaluatorTestSuite) TestProcess() { } for _, tt := range tests { - e.T().Run(strconv.Itoa(tt.k), func(t *testing.T) { + e.T().Run(fmt.Sprintf("%v", tt.k), func(t *testing.T) { e := New(map[string]rule.Parameter{ParameterK: tt.k}) result, err := e.Process(data) diff --git a/src/portal/lib/src/config/config.ts b/src/portal/lib/src/config/config.ts index 23078e106..9505b11c5 100644 --- a/src/portal/lib/src/config/config.ts +++ b/src/portal/lib/src/config/config.ts @@ -87,6 +87,7 @@ export class Configuration { token_expiration: NumberValueItem; scan_all_policy: ComplexValueItem; read_only: BoolValueItem; + notification_enable: BoolValueItem; http_authproxy_endpoint?: StringValueItem; http_authproxy_tokenreview_endpoint?: StringValueItem; http_authproxy_verify_cert?: BoolValueItem; @@ -140,6 +141,7 @@ export class Configuration { } }, true); this.read_only = new BoolValueItem(false, true); + this.notification_enable = new BoolValueItem(false, true); this.http_authproxy_endpoint = new StringValueItem("", true); this.http_authproxy_tokenreview_endpoint = new StringValueItem("", true); this.http_authproxy_verify_cert = new BoolValueItem(false, true); diff --git a/src/portal/lib/src/config/gc/gc-history/gc-history.component.ts b/src/portal/lib/src/config/gc/gc-history/gc-history.component.ts index dfcc0c4e8..2a8c55c18 100644 --- a/src/portal/lib/src/config/gc/gc-history/gc-history.component.ts +++ b/src/portal/lib/src/config/gc/gc-history/gc-history.component.ts @@ -4,6 +4,7 @@ import { GcJobViewModel } from "../gcLog"; import { GcViewModelFactory } from "../gc.viewmodel.factory"; import { ErrorHandler } from "../../../error-handler/index"; import { Subscription, timer } from "rxjs"; +import { REFRESH_TIME_DIFFERENCE } from '../../../shared/shared.const'; const JOB_STATUS = { PENDING: "pending", RUNNING: "running" @@ -34,7 +35,7 @@ export class GcHistoryComponent implements OnInit, OnDestroy { this.loading = false; // to avoid some jobs not finished. if (!this.timerDelay) { - this.timerDelay = timer(3000, 3000).subscribe(() => { + this.timerDelay = timer(REFRESH_TIME_DIFFERENCE, REFRESH_TIME_DIFFERENCE).subscribe(() => { let count: number = 0; this.jobs.forEach(job => { if ( diff --git a/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.html b/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.html index 98e57589a..c9bd48440 100644 --- a/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.html +++ b/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.html @@ -25,8 +25,8 @@ {{'PROJECT.QUOTA_UNLIMIT_TIP' | translate }}
-
+
@@ -60,8 +60,9 @@ {{'PROJECT.QUOTA_UNLIMIT_TIP' | translate }}
-
- +
diff --git a/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.scss b/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.scss index 024a058d6..0baa3e354 100644 --- a/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.scss +++ b/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.scss @@ -1,21 +1,25 @@ - ::ng-deep .modal-dialog { width: 25rem; } + .modal-body { padding-top: 0.8rem; overflow-y: visible; overflow-x: visible; + .clr-form-compact { div.form-group { padding-left: 8.5rem; + .mr-3px { margin-right: 3px; } + .quota-input { width: 2rem; padding-right: 0.8rem; } + .select-div { width: 2.5rem; @@ -51,6 +55,22 @@ width: 9rem; } +::ng-deep { + .progress { + &.warning>progress { + color: orange; + + &::-webkit-progress-value { + background-color: orange; + } + + &::-moz-progress-bar { + background-color: orange; + } + } + } +} + .progress-label { position: absolute; right: -2.3rem; diff --git a/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.ts b/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.ts index a0e435c91..1a9e6cd4e 100644 --- a/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.ts +++ b/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.ts @@ -6,13 +6,10 @@ import { OnInit, } from '@angular/core'; import { NgForm, Validators } from '@angular/forms'; -import { ActivatedRoute } from "@angular/router"; - -import { TranslateService } from '@ngx-translate/core'; import { InlineAlertComponent } from '../../../inline-alert/inline-alert.component'; -import { QuotaUnits, QuotaUnlimited } from "../../../shared/shared.const"; +import { QuotaUnits, QuotaUnlimited, QUOTA_DANGER_COEFFICIENT, QUOTA_WARNING_COEFFICIENT } from "../../../shared/shared.const"; import { clone, getSuitableUnit, getByte, GetIntegerAndUnit, validateLimit } from '../../../utils'; import { EditQuotaQuotaInterface, QuotaHardLimitInterface } from '../../../service'; @@ -47,9 +44,9 @@ export class EditProjectQuotasComponent implements OnInit { @ViewChild('quotaForm') currentForm: NgForm; @Output() confirmAction = new EventEmitter(); - constructor( - private translateService: TranslateService, - private route: ActivatedRoute) { } + quotaDangerCoefficient: number = QUOTA_DANGER_COEFFICIENT; + quotaWarningCoefficient: number = QUOTA_WARNING_COEFFICIENT; + constructor() { } ngOnInit() { } @@ -134,10 +131,18 @@ export class EditProjectQuotasComponent implements OnInit { } return 0; } - getDangerStyle(limit: number | string, used: number | string, unit?: string) { + isDangerColor(limit: number | string, used: number | string, unit?: string) { if (unit) { - return limit !== QuotaUnlimited ? +used / getByte(+limit, unit) > 0.9 : false; + return limit !== QuotaUnlimited ? +used / getByte(+limit, unit) >= this.quotaDangerCoefficient : false; } - return limit !== QuotaUnlimited ? +used / +limit > 0.9 : false; + return limit !== QuotaUnlimited ? +used / +limit >= this.quotaDangerCoefficient : false; + } + isWarningColor(limit: number | string, used: number | string, unit?: string) { + if (unit) { + return limit !== QuotaUnlimited ? + +used / getByte(+limit, unit) >= this.quotaWarningCoefficient && +used / getByte(+limit, unit) <= this.quotaDangerCoefficient : false; + } + return limit !== QuotaUnlimited ? + +used / +limit >= this.quotaWarningCoefficient && +used / +limit <= this.quotaDangerCoefficient : false; } } diff --git a/src/portal/lib/src/config/project-quotas/project-quotas.component.html b/src/portal/lib/src/config/project-quotas/project-quotas.component.html index 6f9e9cad2..22af1333d 100644 --- a/src/portal/lib/src/config/project-quotas/project-quotas.component.html +++ b/src/portal/lib/src/config/project-quotas/project-quotas.component.html @@ -37,7 +37,9 @@
+ [class.danger]="quota.hard.count!==-1?quota.used.count/quota.hard.count>quotaDangerCoefficient:false" + [class.warning]="quota.hard.count!==-1?quota.used.count/quota.hard.count<=quotaDangerCoefficient &"a.used.count/quota.hard.count>=quotaWarningCoefficient:false" + >
@@ -48,7 +50,9 @@
+ [class.danger]="quota.hard.storage!==-1?quota.used.storage/quota.hard.storage>quotaDangerCoefficient:false" + [class.warning]="quota.hard.storage!==-1?quota.used.storage/quota.hard.storage>=quotaWarningCoefficient&"a.used.storage/quota.hard.storage<=quotaDangerCoefficient:false" + >
diff --git a/src/portal/lib/src/config/project-quotas/project-quotas.component.ts b/src/portal/lib/src/config/project-quotas/project-quotas.component.ts index df5effd32..fa457b03e 100644 --- a/src/portal/lib/src/config/project-quotas/project-quotas.component.ts +++ b/src/portal/lib/src/config/project-quotas/project-quotas.component.ts @@ -8,7 +8,7 @@ import { , getByte, GetIntegerAndUnit } from '../../utils'; import { ErrorHandler } from '../../error-handler/index'; -import { QuotaUnits, QuotaUnlimited } from '../../shared/shared.const'; +import { QuotaUnits, QuotaUnlimited, QUOTA_DANGER_COEFFICIENT, QUOTA_WARNING_COEFFICIENT } from '../../shared/shared.const'; import { EditProjectQuotasComponent } from './edit-project-quotas/edit-project-quotas.component'; import { ConfigurationService @@ -46,6 +46,8 @@ export class ProjectQuotasComponent implements OnChanges { currentPage = 1; totalCount = 0; pageSize = 15; + quotaDangerCoefficient: number = QUOTA_DANGER_COEFFICIENT; + quotaWarningCoefficient: number = QUOTA_WARNING_COEFFICIENT; @Input() get allConfig(): Configuration { return this.config; diff --git a/src/portal/lib/src/config/system/system-settings.component.html b/src/portal/lib/src/config/system/system-settings.component.html index 803348995..82f83f024 100644 --- a/src/portal/lib/src/config/system/system-settings.component.html +++ b/src/portal/lib/src/config/system/system-settings.component.html @@ -142,9 +142,21 @@
-
+
+ + + + + +
diff --git a/src/portal/lib/src/config/system/system-settings.component.ts b/src/portal/lib/src/config/system/system-settings.component.ts index b7a2f2614..f87779109 100644 --- a/src/portal/lib/src/config/system/system-settings.component.ts +++ b/src/portal/lib/src/config/system/system-settings.component.ts @@ -108,7 +108,7 @@ export class SystemSettingsComponent implements OnChanges, OnInit { let changes = {}; for (let prop in allChanges) { if (prop === 'token_expiration' || prop === 'read_only' || prop === 'project_creation_restriction' - || prop === 'robot_token_duration') { + || prop === 'robot_token_duration' || prop === 'notification_enable') { changes[prop] = allChanges[prop]; } } @@ -119,6 +119,10 @@ export class SystemSettingsComponent implements OnChanges, OnInit { this.systemSettings.read_only.value = $event; } + setWebhookNotificationEnabledValue($event: any) { + this.systemSettings.notification_enable.value = $event; + } + disabled(prop: any): boolean { return !(prop && prop.editable); } diff --git a/src/portal/lib/src/confirmation-dialog/confirmation-dialog.component.html b/src/portal/lib/src/confirmation-dialog/confirmation-dialog.component.html index 5daa0b16f..343001220 100644 --- a/src/portal/lib/src/confirmation-dialog/confirmation-dialog.component.html +++ b/src/portal/lib/src/confirmation-dialog/confirmation-dialog.component.html @@ -23,10 +23,18 @@ + + + + + + + + - + diff --git a/src/portal/lib/src/log/recent-log.component.spec.ts b/src/portal/lib/src/log/recent-log.component.spec.ts index 39425c8b2..bdb7cc4c0 100644 --- a/src/portal/lib/src/log/recent-log.component.spec.ts +++ b/src/portal/lib/src/log/recent-log.component.spec.ts @@ -203,7 +203,7 @@ describe('RecentLogComponent (inline template)', () => { fixture.detectChanges(); expect(component.recentLogs).toBeTruthy(); expect(component.logsCache).toBeTruthy(); - expect(component.recentLogs.length).toEqual(3); + expect(component.recentLogs.length).toEqual(15); }); }); diff --git a/src/portal/lib/src/log/recent-log.component.ts b/src/portal/lib/src/log/recent-log.component.ts index b791e7bfb..8a187c9df 100644 --- a/src/portal/lib/src/log/recent-log.component.ts +++ b/src/portal/lib/src/log/recent-log.component.ts @@ -67,7 +67,8 @@ export class RecentLogComponent implements OnInit { } public doFilter(terms: string): void { - if (!terms) { + // allow search by null characters + if (terms === undefined || terms === null) { return; } this.currentTerm = terms.trim(); diff --git a/src/portal/lib/src/replication/replication-tasks/replication-tasks.component.html b/src/portal/lib/src/replication/replication-tasks/replication-tasks.component.html index d441c799b..633f020e6 100644 --- a/src/portal/lib/src/replication/replication-tasks/replication-tasks.component.html +++ b/src/portal/lib/src/replication/replication-tasks/replication-tasks.component.html @@ -92,7 +92,7 @@
- + {{'REPLICATION.TASK_ID'| translate}} {{'REPLICATION.RESOURCE_TYPE' | translate}} {{'REPLICATION.SOURCE' | translate}} @@ -102,7 +102,7 @@ {{'REPLICATION.CREATION_TIME' | translate}} {{'REPLICATION.END_TIME' | translate}} {{'REPLICATION.LOGS' | translate}} - + {{t.id}} {{t.resource_type}} {{t.src_resource}} @@ -118,8 +118,8 @@ - {{pagination.firstItem + 1}} - {{pagination.lastItem +1 }} {{'REPLICATION.OF' | translate}} {{pagination.totalItems }} {{'REPLICATION.ITEMS' | translate}} - + {{pagination.firstItem + 1}} - {{pagination.lastItem +1 }} {{'REPLICATION.OF' | translate}} {{totalCount }} {{'REPLICATION.ITEMS' | translate}} + diff --git a/src/portal/lib/src/replication/replication-tasks/replication-tasks.component.ts b/src/portal/lib/src/replication/replication-tasks/replication-tasks.component.ts index a9984d47f..d80320319 100644 --- a/src/portal/lib/src/replication/replication-tasks/replication-tasks.component.ts +++ b/src/portal/lib/src/replication/replication-tasks/replication-tasks.component.ts @@ -6,8 +6,9 @@ import { finalize } from "rxjs/operators"; import { Subscription, timer } from "rxjs"; import { ErrorHandler } from "../../error-handler/error-handler"; import { ReplicationJob, ReplicationTasks, Comparator, ReplicationJobItem, State } from "../../service/interface"; -import { CustomComparator, DEFAULT_PAGE_SIZE, calculatePage, doFiltering, doSorting } from "../../utils"; +import { CustomComparator, DEFAULT_PAGE_SIZE } from "../../utils"; import { RequestQueryParams } from "../../service/RequestQueryParams"; +import { REFRESH_TIME_DIFFERENCE } from '../../shared/shared.const'; const executionStatus = 'InProgress'; @Component({ selector: 'replication-tasks', @@ -18,8 +19,8 @@ export class ReplicationTasksComponent implements OnInit, OnDestroy { isOpenFilterTag: boolean; inProgress: boolean = false; currentPage: number = 1; - selectedRow: []; pageSize: number = DEFAULT_PAGE_SIZE; + totalCount: number; loading = true; searchTask: string; defaultFilter = "resource_type"; @@ -47,7 +48,6 @@ export class ReplicationTasksComponent implements OnInit, OnDestroy { ngOnInit(): void { this.searchTask = ''; this.getExecutionDetail(); - this.clrLoadTasks(); } getExecutionDetail(): void { @@ -67,14 +67,17 @@ export class ReplicationTasksComponent implements OnInit, OnDestroy { clrLoadPage(): void { if (!this.timerDelay) { - this.timerDelay = timer(10000, 10000).subscribe(() => { + this.timerDelay = timer(REFRESH_TIME_DIFFERENCE, REFRESH_TIME_DIFFERENCE).subscribe(() => { let count: number = 0; - if (this.executions['status'] === executionStatus) { - count++; - } + if (this.executions['status'] === executionStatus) { + count++; + } if (count > 0) { this.getExecutionDetail(); - this.clrLoadTasks(); + let state: State = { + page: {} + }; + this.clrLoadTasks(state); } else { this.timerDelay.unsubscribe(); this.timerDelay = null; @@ -136,16 +139,30 @@ export class ReplicationTasksComponent implements OnInit, OnDestroy { } } - clrLoadTasks(): void { - this.loading = true; + clrLoadTasks(state: State): void { + if (!state || !state.page || !this.executionId) { + return; + } + let params: RequestQueryParams = new RequestQueryParams(); + params = params.set('page_size', this.pageSize + '').set('page', this.currentPage + ''); if (this.searchTask && this.searchTask !== "") { params = params.set(this.defaultFilter, this.searchTask); } + + this.loading = true; this.replicationService.getReplicationTasks(this.executionId, params) - .pipe(finalize(() => (this.loading = false))) + .pipe(finalize(() => { + this.loading = false; + })) .subscribe(res => { - this.tasks = res; // Keep the data + if (res.headers) { + let xHeader: string = res.headers.get("X-Total-Count"); + if (xHeader) { + this.totalCount = parseInt(xHeader, 0); + } + } + this.tasks = res.body; // Keep the data }, error => { this.errorHandler.error(error); @@ -162,23 +179,20 @@ export class ReplicationTasksComponent implements OnInit, OnDestroy { // refresh icon refreshTasks(): void { - this.loading = true; this.currentPage = 1; - this.replicationService.getReplicationTasks(this.executionId) - .subscribe(res => { - this.tasks = res; - this.loading = false; - }, - error => { - this.loading = false; - this.errorHandler.error(error); - }); + let state: State = { + page: {} + }; + this.clrLoadTasks(state); } public doSearch(value: string): void { + this.currentPage = 1; this.searchTask = value.trim(); - this.loading = true; - this.clrLoadTasks(); + let state: State = { + page: {} + }; + this.clrLoadTasks(state); } openFilter(isOpen: boolean): void { diff --git a/src/portal/lib/src/replication/replication.component.ts b/src/portal/lib/src/replication/replication.component.ts index e211af9ea..1bdbfe200 100644 --- a/src/portal/lib/src/replication/replication.component.ts +++ b/src/portal/lib/src/replication/replication.component.ts @@ -48,7 +48,8 @@ import { import { ConfirmationTargets, ConfirmationButtons, - ConfirmationState + ConfirmationState, + REFRESH_TIME_DIFFERENCE } from "../shared/shared.const"; import { ConfirmationMessage } from "../confirmation-dialog/confirmation-message"; import { ConfirmationDialogComponent } from "../confirmation-dialog/confirmation-dialog.component"; @@ -214,7 +215,7 @@ export class ReplicationComponent implements OnInit, OnDestroy { this.totalCount = response.metadata.xTotalCount; this.jobs = response.data; if (!this.timerDelay) { - this.timerDelay = timer(10000, 10000).subscribe(() => { + this.timerDelay = timer(REFRESH_TIME_DIFFERENCE, REFRESH_TIME_DIFFERENCE).subscribe(() => { let count: number = 0; this.jobs.forEach(job => { if ( diff --git a/src/portal/lib/src/service/interface.ts b/src/portal/lib/src/service/interface.ts index 5de814f2b..17e739d91 100644 --- a/src/portal/lib/src/service/interface.ts +++ b/src/portal/lib/src/service/interface.ts @@ -66,6 +66,8 @@ export interface Tag extends Base { signature?: string; scan_overview?: VulnerabilitySummary; labels: Label[]; + push_time?: string; + pull_time?: string; } /** diff --git a/src/portal/lib/src/service/permission-static.ts b/src/portal/lib/src/service/permission-static.ts index 2aab585bb..42518be03 100644 --- a/src/portal/lib/src/service/permission-static.ts +++ b/src/portal/lib/src/service/permission-static.ts @@ -144,5 +144,12 @@ export const USERSTATICPERMISSION = { "PUSH": "push" } }, + "WEBHOOK": { + "KEY": "notification-policy", + "VALUE": { + "LIST": "list", + "READ": "read", + } + }, }; diff --git a/src/portal/lib/src/service/replication.service.ts b/src/portal/lib/src/service/replication.service.ts index 135f04b8c..3da31077f 100644 --- a/src/portal/lib/src/service/replication.service.ts +++ b/src/portal/lib/src/service/replication.service.ts @@ -296,8 +296,7 @@ export class ReplicationDefaultService extends ReplicationService { } let url: string = `${this._replicateUrl}/executions/${executionId}/tasks`; return this.http - .get(url, - queryParams ? buildHttpRequestOptions(queryParams) : HTTP_GET_OPTIONS) + .get(url, buildHttpRequestOptionsWithObserveResponse(queryParams)) .pipe(map(response => response as ReplicationTasks) , catchError(error => observableThrowError(error))); } diff --git a/src/portal/lib/src/service/tag.service.ts b/src/portal/lib/src/service/tag.service.ts index 9020200f3..336fa2369 100644 --- a/src/portal/lib/src/service/tag.service.ts +++ b/src/portal/lib/src/service/tag.service.ts @@ -140,7 +140,7 @@ export class TagDefaultService extends TagService { queryParams = queryParams = new RequestQueryParams(); } - queryParams = queryParams.set("detail", "1"); + queryParams = queryParams.set("detail", "true"); let url: string = `${this._baseUrl}/${repositoryName}/tags`; return this.http diff --git a/src/portal/lib/src/shared/shared.const.ts b/src/portal/lib/src/shared/shared.const.ts index db48e9354..3152f3e5d 100644 --- a/src/portal/lib/src/shared/shared.const.ts +++ b/src/portal/lib/src/shared/shared.const.ts @@ -69,7 +69,7 @@ export const FilterType = { }; export const enum ConfirmationButtons { - CONFIRM_CANCEL, YES_NO, DELETE_CANCEL, CLOSE, REPLICATE_CANCEL, STOP_CANCEL + CONFIRM_CANCEL, YES_NO, DELETE_CANCEL, CLOSE, ENABLE_CANCEL, DISABLE_CANCEL, REPLICATE_CANCEL, STOP_CANCEL } export const QuotaUnits = [ { @@ -122,6 +122,8 @@ export const CONFIG_AUTH_MODE = { OIDC_AUTH: "oidc_auth", UAA_AUTH: "uaa_auth" }; +export const QUOTA_DANGER_COEFFICIENT = 0.9; +export const QUOTA_WARNING_COEFFICIENT = 0.7; export const PROJECT_ROOTS = [ { NAME: "admin", @@ -149,3 +151,4 @@ export enum GroupType { LDAP_TYPE = 1, HTTP_TYPE = 2 } +export const REFRESH_TIME_DIFFERENCE = 10000; diff --git a/src/portal/lib/src/tag/tag.component.html b/src/portal/lib/src/tag/tag.component.html index 30e1af20d..1c705be9e 100644 --- a/src/portal/lib/src/tag/tag.component.html +++ b/src/portal/lib/src/tag/tag.component.html @@ -85,6 +85,8 @@ {{'REPOSITORY.CREATED' | translate}} {{'REPOSITORY.DOCKER_VERSION' | translate}} {{'REPOSITORY.LABELS' | translate}} + {{'REPOSITORY.PUSH_TIME' | translate}} + {{'REPOSITORY.PULL_TIME' | translate}} {{'TAG.PLACEHOLDER' | translate }} @@ -123,6 +125,8 @@ + {{t.push_time | date: 'short'}} + {{t.pull_time | date: 'short'}} {{pagination.firstItem + 1}} - {{pagination.lastItem + 1}} {{'REPOSITORY.OF' | translate}} {{pagination.totalItems}} {{'REPOSITORY.ITEMS' | translate}}     diff --git a/src/portal/lib/src/tag/tag.component.ts b/src/portal/lib/src/tag/tag.component.ts index b74f790c8..71eec3b96 100644 --- a/src/portal/lib/src/tag/tag.component.ts +++ b/src/portal/lib/src/tag/tag.component.ts @@ -66,7 +66,7 @@ export interface LabelState { label: Label; show: boolean; } - +export const AVAILABLE_TIME = '0001-01-01T00:00:00Z'; @Component({ selector: 'hbr-tag', templateUrl: './tag.component.html', @@ -271,7 +271,10 @@ export class TagComponent implements OnInit, AfterViewInit { // Do filtering and sorting this.tags = doFiltering(tags, state); this.tags = doSorting(this.tags, state); - + this.tags = this.tags.map(tag => { + tag.push_time = tag.push_time === AVAILABLE_TIME ? '' : tag.push_time; + return tag; + }); this.loading = false; }, error => { this.loading = false; @@ -539,7 +542,10 @@ export class TagComponent implements OnInit, AfterViewInit { signatures.push(t.name); } }); - this.tags = items; + this.tags = items.map(tag => { + tag.push_time = tag.push_time === AVAILABLE_TIME ? '' : tag.push_time; + return tag; + }); let signedName: { [key: string]: string[] } = {}; signedName[this.repoName] = signatures; this.signatureOutput.emit(signedName); diff --git a/src/portal/lib/src/vulnerability-scanning/result-bar-chart.component.ts b/src/portal/lib/src/vulnerability-scanning/result-bar-chart.component.ts index d4648a6ce..947ba031c 100644 --- a/src/portal/lib/src/vulnerability-scanning/result-bar-chart.component.ts +++ b/src/portal/lib/src/vulnerability-scanning/result-bar-chart.component.ts @@ -48,8 +48,12 @@ export class ResultBarChartComponent implements OnInit, OnDestroy { ) { } ngOnInit(): void { - if (this.tagStatus === "running") { - this.scanNow(); + if ((this.tagStatus === VULNERABILITY_SCAN_STATUS.running || this.tagStatus === VULNERABILITY_SCAN_STATUS.pending) + && !this.stateCheckTimer) { + // Avoid duplicated subscribing + this.stateCheckTimer = timer(0, STATE_CHECK_INTERVAL).subscribe(() => { + this.getSummary(); + }); } this.scanSubscription = this.channel.scanCommand$.subscribe((tagId: string) => { let myFullTag: string = this.repoName + "/" + this.tagId; diff --git a/src/portal/src/app/app.module.ts b/src/portal/src/app/app.module.ts index 5d0cd92f8..85ad627e4 100644 --- a/src/portal/src/app/app.module.ts +++ b/src/portal/src/app/app.module.ts @@ -37,6 +37,7 @@ import { DevCenterComponent } from './dev-center/dev-center.component'; import { VulnerabilityPageComponent } from './vulnerability-page/vulnerability-page.component'; import { GcPageComponent } from './gc-page/gc-page.component'; import { OidcOnboardModule } from './oidc-onboard/oidc-onboard.module'; +import { LicenseModule } from './license/license.module'; registerLocaleData(zh, 'zh-cn'); registerLocaleData(es, 'es-es'); registerLocaleData(localeFr, 'fr-fr'); @@ -70,7 +71,8 @@ export function getCurrentLanguage(translateService: TranslateService) { HarborRoutingModule, ConfigurationModule, DeveloperCenterModule, - OidcOnboardModule + OidcOnboardModule, + LicenseModule ], exports: [ ], diff --git a/src/portal/src/app/harbor-routing.module.ts b/src/portal/src/app/harbor-routing.module.ts index f5e6588a2..662b51fe9 100644 --- a/src/portal/src/app/harbor-routing.module.ts +++ b/src/portal/src/app/harbor-routing.module.ts @@ -49,6 +49,7 @@ import { ProjectComponent } from './project/project.component'; import { ProjectDetailComponent } from './project/project-detail/project-detail.component'; import { MemberComponent } from './project/member/member.component'; import { RobotAccountComponent } from './project/robot-account/robot-account.component'; +import { WebhookComponent } from './project/webhook/webhook.component'; import { ProjectLabelComponent } from "./project/project-label/project-label.component"; import { ProjectConfigComponent } from './project/project-config/project-config.component'; import { ProjectRoutingResolver } from './project/project-routing-resolver.service'; @@ -56,6 +57,7 @@ import { ListChartsComponent } from './project/helm-chart/list-charts.component' import { ListChartVersionsComponent } from './project/helm-chart/list-chart-versions/list-chart-versions.component'; import { HelmChartDetailComponent } from './project/helm-chart/helm-chart-detail/chart-detail.component'; import { OidcOnboardComponent } from './oidc-onboard/oidc-onboard.component'; +import { LicenseComponent } from './license/license.component'; import { SummaryComponent } from './project/summary/summary.component'; import { TagRetentionComponent } from "./project/tag-retention/tag-retention.component"; @@ -72,6 +74,10 @@ const harborRoutes: Routes = [ component: OidcOnboardComponent, canActivate: [OidcGuard, SignInGuard] }, + { + path: 'license', + component: LicenseComponent + }, { path: 'harbor/sign-in', component: SignInComponent, @@ -207,7 +213,11 @@ const harborRoutes: Routes = [ { path: 'tag-retention', component: TagRetentionComponent - } + }, + { + path: 'webhook', + component: WebhookComponent + }, ] }, { diff --git a/src/portal/src/app/license/license.component.html b/src/portal/src/app/license/license.component.html new file mode 100644 index 000000000..caa00cb9e --- /dev/null +++ b/src/portal/src/app/license/license.component.html @@ -0,0 +1 @@ +
{{licenseContent}}
diff --git a/src/portal/src/app/license/license.component.scss b/src/portal/src/app/license/license.component.scss new file mode 100644 index 000000000..6d00fe18b --- /dev/null +++ b/src/portal/src/app/license/license.component.scss @@ -0,0 +1,8 @@ +.license { + display: block; + font-family: monospace; + word-wrap: break-word; + white-space: pre-wrap; + margin: 1em 0px; + font-size: 1rem; +} \ No newline at end of file diff --git a/src/portal/src/app/license/license.component.spec.ts b/src/portal/src/app/license/license.component.spec.ts new file mode 100644 index 000000000..f1d41ee71 --- /dev/null +++ b/src/portal/src/app/license/license.component.spec.ts @@ -0,0 +1,25 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; + +import { LicenseComponent } from './license.component'; + +describe('LicenseComponent', () => { + let component: LicenseComponent; + let fixture: ComponentFixture; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + declarations: [ LicenseComponent ] + }) + .compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(LicenseComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/src/portal/src/app/license/license.component.ts b/src/portal/src/app/license/license.component.ts new file mode 100644 index 000000000..218b2492a --- /dev/null +++ b/src/portal/src/app/license/license.component.ts @@ -0,0 +1,25 @@ +import { Component, OnInit } from '@angular/core'; +import { HttpClient } from '@angular/common/http'; +import { throwError as observableThrowError } from 'rxjs'; +import { catchError } from 'rxjs/operators'; +import { Title } from '@angular/platform-browser'; +@Component({ + selector: 'app-license', + viewProviders: [Title], + templateUrl: './license.component.html', + styleUrls: ['./license.component.scss'] +}) +export class LicenseComponent implements OnInit { + + constructor( + private http: HttpClient + ) { } + public licenseContent: any; + ngOnInit() { + this.http.get("/LICENSE", { responseType: 'text'}) + .pipe(catchError(error => observableThrowError(error))) + .subscribe(json => { + this.licenseContent = json; + }); + } +} diff --git a/src/portal/src/app/license/license.module.ts b/src/portal/src/app/license/license.module.ts new file mode 100644 index 000000000..543dcddc4 --- /dev/null +++ b/src/portal/src/app/license/license.module.ts @@ -0,0 +1,11 @@ +import { NgModule } from '@angular/core'; +import { CommonModule } from '@angular/common'; +import { LicenseComponent } from './license.component'; + +@NgModule({ + declarations: [LicenseComponent], + imports: [ + CommonModule + ] +}) +export class LicenseModule { } diff --git a/src/portal/src/app/project/project-detail/project-detail.component.html b/src/portal/src/app/project/project-detail/project-detail.component.html index 410b7a693..ed9dedad3 100644 --- a/src/portal/src/app/project/project-detail/project-detail.component.html +++ b/src/portal/src/app/project/project-detail/project-detail.component.html @@ -28,6 +28,9 @@ + diff --git a/src/portal/src/app/project/project-detail/project-detail.component.ts b/src/portal/src/app/project/project-detail/project-detail.component.ts index c955416d8..8fdc749c5 100644 --- a/src/portal/src/app/project/project-detail/project-detail.component.ts +++ b/src/portal/src/app/project/project-detail/project-detail.component.ts @@ -44,6 +44,7 @@ export class ProjectDetailComponent implements OnInit { hasConfigurationListPermission: boolean; hasRobotListPermission: boolean; hasTagRetentionPermission: boolean; + hasWebhookListPermission: boolean; constructor( private route: ActivatedRoute, private router: Router, @@ -86,11 +87,12 @@ export class ProjectDetailComponent implements OnInit { USERSTATICPERMISSION.LABEL.KEY, USERSTATICPERMISSION.LABEL.VALUE.CREATE)); permissionsList.push(this.userPermissionService.getPermission(projectId, USERSTATICPERMISSION.TAG_RETENTION.KEY, USERSTATICPERMISSION.TAG_RETENTION.VALUE.READ)); + permissionsList.push(this.userPermissionService.getPermission(projectId, + USERSTATICPERMISSION.WEBHOOK.KEY, USERSTATICPERMISSION.WEBHOOK.VALUE.LIST)); forkJoin(...permissionsList).subscribe(Rules => { [this.hasProjectReadPermission, this.hasLogListPermission, this.hasConfigurationListPermission, this.hasMemberListPermission , this.hasLabelListPermission, this.hasRepositoryListPermission, this.hasHelmChartsListPermission, this.hasRobotListPermission - , this.hasLabelCreatePermission, this.hasTagRetentionPermission] = Rules; - + , this.hasLabelCreatePermission, this.hasTagRetentionPermission, this.hasWebhookListPermission] = Rules; }, error => this.errorHandler.error(error)); } diff --git a/src/portal/src/app/project/project.module.ts b/src/portal/src/app/project/project.module.ts index bf33c8503..d29d255f0 100644 --- a/src/portal/src/app/project/project.module.ts +++ b/src/portal/src/app/project/project.module.ts @@ -43,7 +43,10 @@ import { AddHttpAuthGroupComponent } from './member/add-http-auth-group/add-http import { TagRetentionComponent } from "./tag-retention/tag-retention.component"; import { AddRuleComponent } from "./tag-retention/add-rule/add-rule.component"; import { TagRetentionService } from "./tag-retention/tag-retention.service"; - +import { WebhookService } from './webhook/webhook.service'; +import { WebhookComponent } from './webhook/webhook.component'; +import { AddWebhookComponent } from './webhook/add-webhook/add-webhook.component'; +import { AddWebhookFormComponent } from './webhook/add-webhook-form/add-webhook-form.component'; @NgModule({ imports: [ @@ -70,9 +73,12 @@ import { TagRetentionService } from "./tag-retention/tag-retention.service"; AddHttpAuthGroupComponent, TagRetentionComponent, AddRuleComponent, + WebhookComponent, + AddWebhookComponent, + AddWebhookFormComponent, ], exports: [ProjectComponent, ListProjectComponent], - providers: [ProjectRoutingResolver, MemberService, RobotService, TagRetentionService] + providers: [ProjectRoutingResolver, MemberService, RobotService, TagRetentionService, WebhookService] }) export class ProjectModule { diff --git a/src/portal/src/app/project/summary/summary.component.html b/src/portal/src/app/project/summary/summary.component.html index d53588b42..70d640c1a 100644 --- a/src/portal/src/app/project/summary/summary.component.html +++ b/src/portal/src/app/project/summary/summary.component.html @@ -35,7 +35,8 @@
-
+
@@ -58,7 +59,8 @@
+ [class.danger]="summaryInformation?.quota?.hard?.storage!==-1?summaryInformation?.quota?.used?.storage/summaryInformation?.quota?.hard?.storage>quotaDangerCoefficient:false" + [class.warning]="summaryInformation?.quota?.hard?.storage!==-1?summaryInformation?.quota?.used?.storage/summaryInformation?.quota?.hard?.storage<=quotaDangerCoefficient&&summaryInformation?.quota?.used?.storage/summaryInformation?.quota?.hard?.storage>=quotaWarningCoefficient:false"> diff --git a/src/portal/src/app/project/summary/summary.component.ts b/src/portal/src/app/project/summary/summary.component.ts index 00bdaf146..457d7fbb2 100644 --- a/src/portal/src/app/project/summary/summary.component.ts +++ b/src/portal/src/app/project/summary/summary.component.ts @@ -1,9 +1,9 @@ -import { Component, OnInit, Input } from '@angular/core'; -import { ProjectService, clone, QuotaUnits, getSuitableUnit, ErrorHandler, GetIntegerAndUnit } from '@harbor/ui'; -import { Router, ActivatedRoute } from '@angular/router'; -import { forkJoin } from 'rxjs'; +import { Component, OnInit } from '@angular/core'; +import { ProjectService, clone, QuotaUnits, getSuitableUnit, ErrorHandler, GetIntegerAndUnit + , QUOTA_DANGER_COEFFICIENT, QUOTA_WARNING_COEFFICIENT } from '@harbor/ui'; +import { ActivatedRoute } from '@angular/router'; + import { AppConfigService } from "../../app-config.service"; -export const riskRatio = 0.9; @Component({ selector: 'summary', templateUrl: './summary.component.html', @@ -12,6 +12,8 @@ export const riskRatio = 0.9; export class SummaryComponent implements OnInit { projectId: number; summaryInformation: any; + quotaDangerCoefficient: number = QUOTA_DANGER_COEFFICIENT; + quotaWarningCoefficient: number = QUOTA_WARNING_COEFFICIENT; constructor( private projectService: ProjectService, private errorHandler: ErrorHandler, diff --git a/src/portal/src/app/project/webhook/add-webhook-form/add-webhook-form.component.html b/src/portal/src/app/project/webhook/add-webhook-form/add-webhook-form.component.html new file mode 100644 index 000000000..29073e9be --- /dev/null +++ b/src/portal/src/app/project/webhook/add-webhook-form/add-webhook-form.component.html @@ -0,0 +1,46 @@ +
+
+
+ +
+ + +
+ +
+ + +
+ +
+ + + + + + {{'CONFIG.TOOLTIP.VERIFY_REMOTE_CERT' | translate}} + + +
+
+
+
+ + +
+
+ + + +
+
\ No newline at end of file diff --git a/src/portal/src/app/project/webhook/add-webhook-form/add-webhook-form.component.scss b/src/portal/src/app/project/webhook/add-webhook-form/add-webhook-form.component.scss new file mode 100644 index 000000000..a91cb90b1 --- /dev/null +++ b/src/portal/src/app/project/webhook/add-webhook-form/add-webhook-form.component.scss @@ -0,0 +1,12 @@ +.align-center { + text-align: center; +} + +.webhook-section { + margin-left: calc(50% - 10rem); + text-align: left; +} + +.icon-tooltip { + margin-top: 4px; +} \ No newline at end of file diff --git a/src/portal/src/app/project/webhook/add-webhook-form/add-webhook-form.component.ts b/src/portal/src/app/project/webhook/add-webhook-form/add-webhook-form.component.ts new file mode 100644 index 000000000..96d23ecb1 --- /dev/null +++ b/src/portal/src/app/project/webhook/add-webhook-form/add-webhook-form.component.ts @@ -0,0 +1,112 @@ +import { + Component, + OnInit, + OnChanges, + Input, + ViewChild, + Output, + EventEmitter, + SimpleChanges +} from "@angular/core"; +import { Webhook, Target } from "../webhook"; +import { NgForm } from "@angular/forms"; +import {ClrLoadingState} from "@clr/angular"; +import { finalize } from "rxjs/operators"; +import { WebhookService } from "../webhook.service"; +import { WebhookEventTypes } from '../../../shared/shared.const'; +import { MessageHandlerService } from "../../../shared/message-handler/message-handler.service"; + +@Component({ + selector: 'add-webhook-form', + templateUrl: './add-webhook-form.component.html', + styleUrls: ['./add-webhook-form.component.scss'] +}) +export class AddWebhookFormComponent implements OnInit, OnChanges { + closable: boolean = true; + staticBackdrop: boolean = true; + checking: boolean = false; + checkBtnState: ClrLoadingState = ClrLoadingState.DEFAULT; + webhookForm: NgForm; + submitting: boolean = false; + webhookTarget: Target = new Target(); + + @Input() projectId: number; + @Input() webhook: Webhook; + @Input() isModify: boolean; + @Input() isOpen: boolean; + @Output() edit = new EventEmitter(); + @Output() close = new EventEmitter(); + @ViewChild("webhookForm") currentForm: NgForm; + + + constructor( + private webhookService: WebhookService, + private messageHandlerService: MessageHandlerService + ) { } + + ngOnInit() { + } + + ngOnChanges(changes: SimpleChanges) { + if (changes['isOpen'] && changes['isOpen'].currentValue) { + Object.assign(this.webhookTarget, this.webhook.targets[0]); + } + } + + onTestEndpoint() { + this.checkBtnState = ClrLoadingState.LOADING; + this.checking = true; + + this.webhookService + .testEndpoint(this.projectId, { + targets: [this.webhookTarget] + }) + .pipe(finalize(() => (this.checking = false))) + .subscribe( + response => { + this.checkBtnState = ClrLoadingState.SUCCESS; + }, + error => { + this.checkBtnState = ClrLoadingState.DEFAULT; + this.messageHandlerService.handleError(error); + } + ); + } + + onCancel() { + this.close.emit(false); + this.currentForm.reset(); + } + + onSubmit() { + const rx = this.isModify + ? this.webhookService.editWebhook(this.projectId, this.webhook.id, Object.assign(this.webhook, { targets: [this.webhookTarget] })) + : this.webhookService.createWebhook(this.projectId, { + targets: [this.webhookTarget], + event_types: Object.keys(WebhookEventTypes).map(key => WebhookEventTypes[key]), + enabled: true, + }); + rx.pipe(finalize(() => (this.submitting = false))) + .subscribe( + response => { + this.edit.emit(this.isModify); + }, + error => { + this.messageHandlerService.handleError(error); + } + ); + } + + setCertValue($event: any): void { + this.webhookTarget.skip_cert_verify = !$event; + } + + public get isValid(): boolean { + return ( + this.currentForm && + this.currentForm.valid && + !this.submitting && + !this.checking + ); + } +} diff --git a/src/portal/src/app/project/webhook/add-webhook/add-webhook.component.html b/src/portal/src/app/project/webhook/add-webhook/add-webhook.component.html new file mode 100644 index 000000000..589376278 --- /dev/null +++ b/src/portal/src/app/project/webhook/add-webhook/add-webhook.component.html @@ -0,0 +1,13 @@ + + + + \ No newline at end of file diff --git a/src/portal/src/app/project/webhook/add-webhook/add-webhook.component.scss b/src/portal/src/app/project/webhook/add-webhook/add-webhook.component.scss new file mode 100644 index 000000000..e69de29bb diff --git a/src/portal/src/app/project/webhook/add-webhook/add-webhook.component.ts b/src/portal/src/app/project/webhook/add-webhook/add-webhook.component.ts new file mode 100644 index 000000000..4708d7010 --- /dev/null +++ b/src/portal/src/app/project/webhook/add-webhook/add-webhook.component.ts @@ -0,0 +1,49 @@ +import { + Component, + OnInit, + Input, + ViewChild, + Output, + EventEmitter, +} from "@angular/core"; +import { Webhook } from "../webhook"; +import { AddWebhookFormComponent } from "../add-webhook-form/add-webhook-form.component"; + +@Component({ + selector: 'add-webhook', + templateUrl: './add-webhook.component.html', + styleUrls: ['./add-webhook.component.scss'] +}) +export class AddWebhookComponent implements OnInit { + isOpen: boolean = false; + closable: boolean = true; + staticBackdrop: boolean = true; + + @Input() projectId: number; + @Input() webhook: Webhook; + @Output() modify = new EventEmitter(); + @ViewChild(AddWebhookFormComponent) + addWebhookFormComponent: AddWebhookFormComponent; + + + constructor() { } + + ngOnInit() { + } + + openAddWebhookModal() { + this.isOpen = true; + } + + onCancel() { + this.isOpen = false; + } + + closeModal(isModified: boolean): void { + if (isModified) { + this.modify.emit(true); + } + this.isOpen = false; + } + +} diff --git a/src/portal/src/app/project/webhook/webhook.component.html b/src/portal/src/app/project/webhook/webhook.component.html new file mode 100644 index 000000000..6a8ad2ce5 --- /dev/null +++ b/src/portal/src/app/project/webhook/webhook.component.html @@ -0,0 +1,51 @@ +
+
+
+
+
+
+ Webhook endpoint: {{endpoint}} + +
+
+ + +
+
+
+
+
+ + {{'WEBHOOK.TYPE' | translate}} + {{'WEBHOOK.STATUS' | translate}} + {{'WEBHOOK.CREATED' | translate}} + {{'WEBHOOK.LAST_TRIGGERED' | translate}} + + {{item.event_type}} + +
+ + {{'WEBHOOK.ENABLED' | translate}} +
+
+ + {{'WEBHOOK.DISABLED' | translate}} +
+
+ {{item.creation_time | date: 'short'}} + {{item.last_trigger_time | date: 'short'}} +
+ + 1 - {{lastTriggerCount}} {{'WEBHOOK.OF' | translate}} {{lastTriggerCount}} {{'WEBHOOK.ITEMS' | translate}} + +
+
+
+
+

{{'WEBHOOK.CREATE_WEBHOOK' | translate}}

+

{{'WEBHOOK.CREATE_WEBHOOK_DESC' | translate}}

+ +
+ + +
\ No newline at end of file diff --git a/src/portal/src/app/project/webhook/webhook.component.scss b/src/portal/src/app/project/webhook/webhook.component.scss new file mode 100644 index 000000000..c9254f6e1 --- /dev/null +++ b/src/portal/src/app/project/webhook/webhook.component.scss @@ -0,0 +1,37 @@ +.label-top { + top: 12px; +} + +.icon-wrap { + height: 14px; +} + +.webhook-form-wrap { + width: 19rem; + margin: 0 auto; +} + +.create-text { + margin: 0 auto; + width: 19rem; +} + +.create-text-title { + margin-top: 1rem; +} + +.endpoint-label { + font-weight: bold; +} + +.disabled-btn { + color: #e12200; +} + +.disabled-btn:hover { + color: #c92100; +} + +.enabled-icon { + margin: -2px 5px 0 0; +} \ No newline at end of file diff --git a/src/portal/src/app/project/webhook/webhook.component.ts b/src/portal/src/app/project/webhook/webhook.component.ts new file mode 100644 index 000000000..d19759f46 --- /dev/null +++ b/src/portal/src/app/project/webhook/webhook.component.ts @@ -0,0 +1,154 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +import { finalize } from "rxjs/operators"; +import { TranslateService } from '@ngx-translate/core'; +import { Component, OnInit, ViewChild } from '@angular/core'; +import { AddWebhookComponent } from "./add-webhook/add-webhook.component"; +import { AddWebhookFormComponent } from "./add-webhook-form/add-webhook-form.component"; +import { ActivatedRoute } from '@angular/router'; +import { Webhook, LastTrigger } from './webhook'; +import { WebhookService } from './webhook.service'; +import { MessageHandlerService } from "../../shared/message-handler/message-handler.service"; +import { Project } from '../project'; +import { + ConfirmationTargets, + ConfirmationState, + ConfirmationButtons +} from "../../shared/shared.const"; + +import { ConfirmationMessage } from "../../shared/confirmation-dialog/confirmation-message"; +import { ConfirmationAcknowledgement } from "../../shared/confirmation-dialog/confirmation-state-message"; +import { ConfirmationDialogComponent } from "../../shared/confirmation-dialog/confirmation-dialog.component"; + +@Component({ + templateUrl: './webhook.component.html', + styleUrls: ['./webhook.component.scss'], + // changeDetection: ChangeDetectionStrategy.OnPush +}) +export class WebhookComponent implements OnInit { + @ViewChild(AddWebhookComponent) + addWebhookComponent: AddWebhookComponent; + @ViewChild(AddWebhookFormComponent) + addWebhookFormComponent: AddWebhookFormComponent; + @ViewChild("confirmationDialogComponent") + confirmationDialogComponent: ConfirmationDialogComponent; + webhook: Webhook; + endpoint: string = ''; + lastTriggers: LastTrigger[] = []; + lastTriggerCount: number = 0; + isEnabled: boolean; + loading: boolean = false; + showCreate: boolean = false; + projectId: number; + projectName: string; + constructor( + private route: ActivatedRoute, + private translate: TranslateService, + private webhookService: WebhookService, + private messageHandlerService: MessageHandlerService) {} + + ngOnInit() { + this.projectId = +this.route.snapshot.parent.params['id']; + let resolverData = this.route.snapshot.parent.data; + if (resolverData) { + let project = (resolverData["projectResolver"]); + this.projectName = project.name; + } + this.getData(this.projectId); + } + + getData(projectId: number) { + this.getLastTriggers(projectId); + this.getWebhook(projectId); + } + + getLastTriggers(projectId: number) { + this.loading = true; + this.webhookService + .listLastTrigger(projectId) + .pipe(finalize(() => (this.loading = false))) + .subscribe( + response => { + this.lastTriggers = response; + this.lastTriggerCount = response.length; + }, + error => { + this.messageHandlerService.handleError(error); + } + ); + } + + getWebhook(projectId: number) { + this.webhookService + .listWebhook(projectId) + .subscribe( + response => { + if (response.length) { + this.webhook = response[0]; + this.endpoint = this.webhook.targets[0].address; + this.isEnabled = this.webhook.enabled; + this.showCreate = false; + } else { + this.showCreate = true; + } + }, + error => { + this.messageHandlerService.handleError(error); + } + ); + } + + switchWebhookStatus(enabled = false) { + let content = ''; + this.translate.get( + enabled + ? 'WEBHOOK.ENABLED_WEBHOOK_SUMMARY' + : 'WEBHOOK.DISABLED_WEBHOOK_SUMMARY' + ).subscribe((res) => content = res + this.projectName); + let message = new ConfirmationMessage( + enabled ? 'WEBHOOK.ENABLED_WEBHOOK_TITLE' : 'WEBHOOK.DISABLED_WEBHOOK_TITLE', + content, + '', + {}, + ConfirmationTargets.WEBHOOK, + enabled ? ConfirmationButtons.ENABLE_CANCEL : ConfirmationButtons.DISABLE_CANCEL + ); + this.confirmationDialogComponent.open(message); + } + + confirmSwitch(message: ConfirmationAcknowledgement) { + if (message && + message.source === ConfirmationTargets.WEBHOOK && + message.state === ConfirmationState.CONFIRMED) { + this.webhookService + .editWebhook(this.projectId, this.webhook.id, Object.assign({}, this.webhook, { enabled: !this.isEnabled })) + .subscribe( + response => { + this.getData(this.projectId); + }, + error => { + this.messageHandlerService.handleError(error); + } + ); + } +} + + editWebhook(isModify: boolean): void { + this.getData(this.projectId); + } + + openAddWebhookModal(): void { + this.addWebhookComponent.openAddWebhookModal(); + } +} diff --git a/src/portal/src/app/project/webhook/webhook.service.ts b/src/portal/src/app/project/webhook/webhook.service.ts new file mode 100644 index 000000000..490942f25 --- /dev/null +++ b/src/portal/src/app/project/webhook/webhook.service.ts @@ -0,0 +1,56 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +import { throwError as observableThrowError, Observable } from "rxjs"; +import { map, catchError } from "rxjs/operators"; +import { Injectable } from "@angular/core"; +import { HttpClient } from "@angular/common/http"; +import { Webhook, LastTrigger } from "./webhook"; + +@Injectable() +export class WebhookService { + constructor(private http: HttpClient) { } + + public listWebhook(projectId: number): Observable { + return this.http + .get(`/api/projects/${projectId}/webhook/policies`) + .pipe(map(response => response as Webhook[])) + .pipe(catchError(error => observableThrowError(error))); + } + + public listLastTrigger(projectId: number): Observable { + return this.http + .get(`/api/projects/${projectId}/webhook/lasttrigger`) + .pipe(map(response => response as LastTrigger[])) + .pipe(catchError(error => observableThrowError(error))); + } + + public editWebhook(projectId: number, policyId: number, data: any): Observable { + return this.http + .put(`/api/projects/${projectId}/webhook/policies/${policyId}`, data) + .pipe(catchError(error => observableThrowError(error))); + } + + public createWebhook(projectId: number, data: any): Observable { + return this.http + .post(`/api/projects/${projectId}/webhook/policies`, data) + .pipe(catchError(error => observableThrowError(error))); + } + + + public testEndpoint(projectId: number, param): Observable { + return this.http + .post(`/api/projects/${projectId}/webhook/policies/test`, param) + .pipe(catchError(error => observableThrowError(error))); + } +} diff --git a/src/portal/src/app/project/webhook/webhook.ts b/src/portal/src/app/project/webhook/webhook.ts new file mode 100644 index 000000000..4d11a8c1c --- /dev/null +++ b/src/portal/src/app/project/webhook/webhook.ts @@ -0,0 +1,35 @@ +import { WebhookEventTypes } from '../../shared/shared.const'; + +export class Webhook { + id: number; + name: string; + project_id: number; + description: string; + targets: Target[]; + event_types: WebhookEventTypes[]; + creator: string; + creation_time: Date; + update_time: Date; + enabled: boolean; +} + +export class Target { + type: string; + address: string; + attachment: string; + auth_header: string; + skip_cert_verify: boolean; + + constructor () { + this.type = 'http'; + this.address = ''; + this.skip_cert_verify = true; + } +} + +export class LastTrigger { + enabled: boolean; + event_type: string; + creation_time: Date; + last_trigger_time: Date; +} diff --git a/src/portal/src/app/shared/about-dialog/about-dialog.component.html b/src/portal/src/app/shared/about-dialog/about-dialog.component.html index 3734a76e0..75ec45695 100644 --- a/src/portal/src/app/shared/about-dialog/about-dialog.component.html +++ b/src/portal/src/app/shared/about-dialog/about-dialog.component.html @@ -11,7 +11,7 @@
diff --git a/src/portal/src/app/shared/confirmation-dialog/confirmation-dialog.component.html b/src/portal/src/app/shared/confirmation-dialog/confirmation-dialog.component.html index e6b6511a4..004d8acf7 100644 --- a/src/portal/src/app/shared/confirmation-dialog/confirmation-dialog.component.html +++ b/src/portal/src/app/shared/confirmation-dialog/confirmation-dialog.component.html @@ -22,7 +22,15 @@ - + + + + + + + + + diff --git a/src/portal/src/app/shared/shared.const.ts b/src/portal/src/app/shared/shared.const.ts index 2bef75c07..bb3f7146c 100644 --- a/src/portal/src/app/shared/shared.const.ts +++ b/src/portal/src/app/shared/shared.const.ts @@ -39,7 +39,8 @@ export const enum ConfirmationTargets { CONFIG_ROUTE, CONFIG_TAB, HELM_CHART, - HELM_CHART_VERSION + HELM_CHART_VERSION, + WEBHOOK } export const enum ActionType { @@ -53,7 +54,7 @@ export const enum ConfirmationState { NA, CONFIRMED, CANCEL } export const enum ConfirmationButtons { - CONFIRM_CANCEL, YES_NO, DELETE_CANCEL, CLOSE, SWITCH_CANCEL + CONFIRM_CANCEL, YES_NO, DELETE_CANCEL, CLOSE, ENABLE_CANCEL, DISABLE_CANCEL, SWITCH_CANCEL } export const ProjectTypes = { 0: 'PROJECT.ALL_PROJECTS', 1: 'PROJECT.PRIVATE_PROJECTS', 2: 'PROJECT.PUBLIC_PROJECTS' }; @@ -80,3 +81,14 @@ export enum ResourceType { CHART_VERSION = 2, REPOSITORY_TAG = 3, } + +export enum WebhookEventTypes { + DOWNLOAD_CHART = "downloadChart", + DELETE_CHART = "deleteChart", + UPLOAD_CHART = "uploadChart", + DELETE_IMAGE = "deleteImage", + PULL_IMAGE = "pullImage", + PUSH_IMAGE = "pushImage", + SCANNING_FAILED = "scanningFailed", + SCANNING_COMPLETED = "scanningCompleted", +} diff --git a/src/portal/src/i18n/lang/en-us-lang.json b/src/portal/src/i18n/lang/en-us-lang.json index a7149b21a..f174394fd 100644 --- a/src/portal/src/i18n/lang/en-us-lang.json +++ b/src/portal/src/i18n/lang/en-us-lang.json @@ -45,7 +45,10 @@ "UPLOAD": "Upload", "NO_FILE": "No file selected", "ADD": "ADD", - "RUN": "RUN" + "RUN": "RUN", + "CONTINUE": "CONTINUE", + "ENABLE": "ENABLE", + "DISABLE": "DISABLE" }, "BATCH": { "DELETED_SUCCESS": "Deleted successfully", @@ -237,7 +240,8 @@ "PROJECTS": "Projects", "CONFIG": "Configuration", "HELMCHART": "Helm Charts", - "ROBOT_ACCOUNTS": "Robot Accounts" + "ROBOT_ACCOUNTS": "Robot Accounts", + "WEBHOOKS": "Webhooks" }, "PROJECT_CONFIG": { "REGISTRY": "Project registry", @@ -335,6 +339,34 @@ "PULL_IS_MUST" : "Pull permission is checked by default and can not be modified.", "EXPORT_TO_FILE" : "export to file" }, + "WEBHOOK": { + "EDIT_BUTTON": "EDIT", + "ENABLED_BUTTON": "ENABLED", + "DISABLED_BUTTON": "DISABLED", + "TYPE": "Webhook", + "STATUS": "Status", + "CREATED": "Created", + "ENABLED": "Enabled", + "DISABLED": "Disabled", + "OF": "of", + "ITEMS": "items", + "LAST_TRIGGERED": "Last Triggered", + "EDIT_WEBHOOK": "Webhook Endpoint", + "CREATE_WEBHOOK": "Getting started with webhooks", + "EDIT_WEBHOOK_DESC": "Specify the endpoint for receiving webhook notifications", + "CREATE_WEBHOOK_DESC": "To get started with webhooks, provide an endpoint and credentials to access the webhook server.", + "ENDPOINT_URL": "Endpoint URL", + "URL_IS_REQUIRED": "Endpoint URL is required.", + "AUTH_HEADER": "Auth Header", + "VERIFY_REMOTE_CERT": "Verify Remote Certificate", + "TEST_ENDPOINT_BUTTON": "TEST ENDPOINT", + "CANCEL_BUTTON": "CANCEL", + "SAVE_BUTTON": "SAVE", + "ENABLED_WEBHOOK_TITLE": "Enable Project Webhooks", + "ENABLED_WEBHOOK_SUMMARY": "Do you want to enable webhooks for project ", + "DISABLED_WEBHOOK_TITLE": "Disable Project Webhooks", + "DISABLED_WEBHOOK_SUMMARY": "Do you want to disable webhooks for project " + }, "GROUP": { "GROUP": "Group", "GROUPS": "Groups", @@ -575,6 +607,8 @@ "TAGS_COUNT": "Tags", "PULL_COUNT": "Pulls", "PULL_COMMAND": "Pull Command", + "PULL_TIME": "Pull Time", + "PUSH_TIME": "Push Time", "MY_REPOSITORY": "My Repository", "PUBLIC_REPOSITORY": "Public Repository", "DELETION_TITLE_REPO": "Confirm Repository Deletion", @@ -717,6 +751,7 @@ "LABEL": "Labels", "REPOSITORY": "Repository", "REPO_READ_ONLY": "Repository Read Only", + "WEBHOOK_NOTIFICATION_ENABLED": "Webhooks enabled", "SYSTEM": "System Settings", "PROJECT_QUOTAS": "Project Quotas", "VULNERABILITY": "Vulnerability", @@ -762,13 +797,14 @@ "LDAP_UID": "The attribute used in a search to match a user. It could be uid, cn, email, sAMAccountName or other attributes depending on your LDAP/AD.", "LDAP_SCOPE": "The scope to search for users.", "TOKEN_EXPIRATION": "The expiration time (in minutes) of a token created by the token service. Default is 30 minutes.", - "ROBOT_TOKEN_EXPIRATION": "The expiration time ( in days) of the token of the robot account, Default is 30 days. Show the number of days converted from minutes and rounds down", + "ROBOT_TOKEN_EXPIRATION": "The expiration time (in days) of the token of the robot account, Default is 30 days. Show the number of days converted from minutes and rounds down", "PRO_CREATION_RESTRICTION": "The flag to define what users have permission to create projects. By default, everyone can create a project. Set to 'Admin Only' so that only an administrator can create a project.", "ROOT_CERT_DOWNLOAD": "Download the root certificate of registry.", "SCANNING_POLICY": "Set image scanning policy based on different requirements. 'None': No active policy; 'Daily At': Triggering scanning at the specified time everyday.", "VERIFY_CERT": "Verify Cert from LDAP Server", "READONLY_TOOLTIP": "In read-only mode, you can not delete repositories or tags or push images. ", "REPO_TOOLTIP": "Users can not do any operations to the images in this mode.", + "WEBHOOK_TOOLTIP": "Enable webhooks to receive callbacks at your designated endpoints when certain actions such as image or chart being pushed, pulled, deleted, scanned are performed", "HOURLY_CRON":"Run once an hour, beginning of hour. Equivalent to 0 0 * * * *.", "WEEKLY_CRON":"Run once a week, midnight between Sat/Sun. Equivalent to 0 0 0 * * 0.", "DAILY_CRON":"Run once a day, midnight. Equivalent to 0 0 0 * * *." @@ -789,7 +825,7 @@ "LDAP_GROUP_GID": "LDAP Group GID", "LDAP_GROUP_GID_INFO": "The attribute used in a search to match a user, it could be uid, cn or other attributes depending on your LDAP/AD. the group in Harbor is named with this attribute by default.", "LDAP_GROUP_ADMIN_DN": "LDAP Group Admin DN", - "LDAP_GROUP_ADMIN_DN_INFO": "Specify an LDAP group DN. all LDAP user in this group will have harbor admin privilege. Keep it blank if you do not want to.", + "LDAP_GROUP_ADMIN_DN_INFO": "Specify an LDAP group DN. All LDAP user in this group will have harbor admin privilege. Keep it blank if you do not want to.", "LDAP_GROUP_MEMBERSHIP": "LDAP Group Membership", "LDAP_GROUP_MEMBERSHIP_INFO": "The attribute indicates the membership of LDAP group, default value is memberof, in some LDAP server it could be \"ismemberof\"", "GROUP_SCOPE": "LDAP Group Scope", diff --git a/src/portal/src/i18n/lang/es-es-lang.json b/src/portal/src/i18n/lang/es-es-lang.json index 347f5bbfe..2bc164682 100644 --- a/src/portal/src/i18n/lang/es-es-lang.json +++ b/src/portal/src/i18n/lang/es-es-lang.json @@ -45,7 +45,10 @@ "UPLOAD": "Upload", "NO_FILE": "No file selected", "ADD": "ADD", - "RUN": "RUN" + "RUN": "RUN", + "CONTINUE": "CONTINUE", + "ENABLE": "ENABLE", + "DISABLE": "DISABLE" }, "BATCH": { "DELETED_SUCCESS": "Deleted successfully", @@ -238,7 +241,8 @@ "PROJECTS": "Proyectos", "CONFIG": "Configuración", "HELMCHART": "Helm Charts", - "ROBOT_ACCOUNTS": "Robot Accounts" + "ROBOT_ACCOUNTS": "Robot Accounts", + "WEBHOOKS": "Webhooks" }, "PROJECT_CONFIG": { "REGISTRY": "Registro de proyectos", @@ -336,6 +340,34 @@ "PULL_IS_MUST" : "Pull permission is checked by default and can not be modified.", "EXPORT_TO_FILE" : "export to file" }, + "WEBHOOK": { + "EDIT_BUTTON": "EDIT", + "ENABLED_BUTTON": "ENABLED", + "DISABLED_BUTTON": "DISABLED", + "TYPE": "Webhook", + "STATUS": "Status", + "CREATED": "Created", + "ENABLED": "Enabled", + "DISABLED": "Disabled", + "OF": "of", + "ITEMS": "items", + "LAST_TRIGGERED": "Last Triggered", + "EDIT_WEBHOOK": "Webhook Endpoint", + "CREATE_WEBHOOK": "Getting started with webhooks", + "EDIT_WEBHOOK_DESC": "Specify the endpoint for receiving webhook notifications", + "CREATE_WEBHOOK_DESC": "To get started with webhooks, provide an endpoint and credentials to access the webhook server.", + "ENDPOINT_URL": "Endpoint URL", + "URL_IS_REQUIRED": "Endpoint URL is required.", + "AUTH_HEADER": "Auth Header", + "VERIFY_REMOTE_CERT": "Verify Remote Certificate", + "TEST_ENDPOINT_BUTTON": "TEST ENDPOINT", + "CANCEL_BUTTON": "CANCEL", + "SAVE_BUTTON": "SAVE", + "ENABLED_WEBHOOK_TITLE": "Enable Project Webhooks", + "ENABLED_WEBHOOK_SUMMARY": "Do you want to enable webhooks for project ", + "DISABLED_WEBHOOK_TITLE": "Disable Project Webhooks", + "DISABLED_WEBHOOK_SUMMARY": "Do you want to disable webhooks for project " + }, "GROUP": { "GROUP": "Group", "GROUPS": "Groups", @@ -576,6 +608,8 @@ "TAGS_COUNT": "Etiquetas", "PULL_COUNT": "Pulls", "PULL_COMMAND": "Comando Pull", + "PULL_TIME": "Pull Time", + "PUSH_TIME": "Push Time", "MY_REPOSITORY": "Mi Repositorio", "PUBLIC_REPOSITORY": "Repositorio Público", "DELETION_TITLE_REPO": "Confirmar Eliminación de Repositorio", @@ -769,6 +803,7 @@ "VERIFY_CERT": "Verify Cert from LDAP Server", "READONLY_TOOLTIP": "In read-only mode, you can not delete repositories or tags or push images. ", "GC_POLICY": "", + "WEBHOOK_TOOLTIP": "Enable webhooks to receive callbacks at your designated endpoints when certain actions such as image or chart being pushed, pulled, deleted, scanned are performed", "HOURLY_CRON":"Run once an hour, beginning of hour. Equivalente a 0 0 * * * *.", "WEEKLY_CRON":"Run once a week, midnight between Sat/Sun. Equivalente a 0 0 0 * * 0.", "DAILY_CRON":"Run once a day, midnight. Equivalente a 0 0 0 * * *." diff --git a/src/portal/src/i18n/lang/fr-fr-lang.json b/src/portal/src/i18n/lang/fr-fr-lang.json index 47d94defa..37195e3bf 100644 --- a/src/portal/src/i18n/lang/fr-fr-lang.json +++ b/src/portal/src/i18n/lang/fr-fr-lang.json @@ -42,7 +42,10 @@ "UPLOAD": "Upload", "NO_FILE": "No file selected", "ADD": "ADD", - "RUN": "RUN" + "RUN": "RUN", + "CONTINUE": "CONTINUE", + "ENABLE": "ENABLE", + "DISABLE": "DISABLE" }, "BATCH": { "DELETED_SUCCESS": "Deleted successfully", @@ -231,7 +234,8 @@ "PROJECTS": "Projets", "CONFIG": "Configuration", "HELMCHART": "Helm Charts", - "ROBOT_ACCOUNTS": "Robot Accounts" + "ROBOT_ACCOUNTS": "Robot Accounts", + "WEBHOOKS": "Webhooks" }, "PROJECT_CONFIG": { "REGISTRY": "Dépôt du Projet", @@ -328,6 +332,34 @@ "PULL_IS_MUST" : "Pull permission is checked by default and can not be modified.", "EXPORT_TO_FILE" : "export to file" }, + "WEBHOOK": { + "EDIT_BUTTON": "EDIT", + "ENABLED_BUTTON": "ENABLED", + "DISABLED_BUTTON": "DISABLED", + "TYPE": "Webhook", + "STATUS": "Status", + "CREATED": "Created", + "ENABLED": "Enabled", + "DISABLED": "Disabled", + "OF": "of", + "ITEMS": "items", + "LAST_TRIGGERED": "Last Triggered", + "EDIT_WEBHOOK": "Webhook Endpoint", + "CREATE_WEBHOOK": "Getting started with webhooks", + "EDIT_WEBHOOK_DESC": "Specify the endpoint for receiving webhook notifications", + "CREATE_WEBHOOK_DESC": "To get started with webhooks, provide an endpoint and credentials to access the webhook server.", + "ENDPOINT_URL": "Endpoint URL", + "URL_IS_REQUIRED": "Endpoint URL is required.", + "AUTH_HEADER": "Auth Header", + "VERIFY_REMOTE_CERT": "Verify Remote Certificate", + "TEST_ENDPOINT_BUTTON": "TEST ENDPOINT", + "CANCEL_BUTTON": "CANCEL", + "SAVE_BUTTON": "SAVE", + "ENABLED_WEBHOOK_TITLE": "Enable Project Webhooks", + "ENABLED_WEBHOOK_SUMMARY": "Do you want to enable webhooks for project ", + "DISABLED_WEBHOOK_TITLE": "Disable Project Webhooks", + "DISABLED_WEBHOOK_SUMMARY": "Do you want to disable webhooks for project " + }, "GROUP": { "Group": "Group", "GROUPS": "Groups", @@ -565,6 +597,8 @@ "TAGS_COUNT": "Tags", "PULL_COUNT": "Pulls", "PULL_COMMAND": "Commande de Pull", + "PULL_TIME": "Pull Time", + "PUSH_TIME": "Push Time", "MY_REPOSITORY": "Mon Dépôt", "PUBLIC_REPOSITORY": "Dépôt Public", "DELETION_TITLE_REPO": "Confirmer la Suppresion du Dépôt", @@ -750,6 +784,7 @@ "SCANNING_POLICY": "Définissez la politique d'analyse des images en fonction des différentes exigences. 'Aucune' : pas de politique active; 'Tousles jours à' : déclenchement du balayage à l'heure spécifiée tous les jours.", "READONLY_TOOLTIP": "In read-only mode, you can not delete repositories or tags or push images. ", "GC_POLICY": "", + "WEBHOOK_TOOLTIP": "Enable webhooks to receive callbacks at your designated endpoints when certain actions such as image or chart being pushed, pulled, deleted, scanned are performed", "HOURLY_CRON":"Run once an hour, beginning of hour. Équivalent à 0 0 * * * *.", "WEEKLY_CRON":"Run once a week, midnight between Sat/Sun. Équivalent à 0 0 0 * * 0.", "DAILY_CRON":"Run once a day, midnight. Équivalent à 0 0 0 * * *." diff --git a/src/portal/src/i18n/lang/pt-br-lang.json b/src/portal/src/i18n/lang/pt-br-lang.json index 8f7118975..47ce64732 100644 --- a/src/portal/src/i18n/lang/pt-br-lang.json +++ b/src/portal/src/i18n/lang/pt-br-lang.json @@ -45,7 +45,10 @@ "UPLOAD": "Upload", "NO_FILE": "Nenhum arquivo selecionado", "ADD": "ADD", - "RUN": "RUN" + "RUN": "RUN", + "CONTINUE": "CONTINUE", + "ENABLE": "ENABLE", + "DISABLE": "DISABLE" }, "BATCH": { "DELETED_SUCCESS": "Removido com sucesso", @@ -235,7 +238,8 @@ "PROJECTS": "Projetos", "CONFIG": "Configuração", "HELMCHART": "Helm Charts", - "ROBOT_ACCOUNTS": "Robot Accounts" + "ROBOT_ACCOUNTS": "Robot Accounts", + "WEBHOOKS": "Webhooks" }, "PROJECT_CONFIG": { "REGISTRY": "Registro do Projeto", @@ -362,6 +366,34 @@ "DEVELOPER": "Developer", "GUEST": "Guest" }, + "WEBHOOK": { + "EDIT_BUTTON": "EDIT", + "ENABLED_BUTTON": "ENABLED", + "DISABLED_BUTTON": "DISABLED", + "TYPE": "Webhook", + "STATUS": "Status", + "CREATED": "Created", + "ENABLED": "Enabled", + "DISABLED": "Disabled", + "OF": "of", + "ITEMS": "items", + "LAST_TRIGGERED": "Last Triggered", + "EDIT_WEBHOOK": "Webhook Endpoint", + "CREATE_WEBHOOK": "Getting started with webhooks", + "EDIT_WEBHOOK_DESC": "Specify the endpoint for receiving webhook notifications", + "CREATE_WEBHOOK_DESC": "To get started with webhooks, provide an endpoint and credentials to access the webhook server.", + "ENDPOINT_URL": "Endpoint URL", + "URL_IS_REQUIRED": "Endpoint URL is required.", + "AUTH_HEADER": "Auth Header", + "VERIFY_REMOTE_CERT": "Verify Remote Certificate", + "TEST_ENDPOINT_BUTTON": "TEST ENDPOINT", + "CANCEL_BUTTON": "CANCEL", + "SAVE_BUTTON": "SAVE", + "ENABLED_WEBHOOK_TITLE": "Enable Project Webhooks", + "ENABLED_WEBHOOK_SUMMARY": "Do you want to enable webhooks for project ", + "DISABLED_WEBHOOK_TITLE": "Disable Project Webhooks", + "DISABLED_WEBHOOK_SUMMARY": "Do you want to disable webhooks for project " + }, "AUDIT_LOG": { "USERNAME": "Nome do usuário", "REPOSITORY_NAME": "Nome do repositório", @@ -575,6 +607,8 @@ "TAGS_COUNT": "Tags", "PULL_COUNT": "Pulls", "PULL_COMMAND": "Comando de Pull", + "PULL_TIME": "Pull Time", + "PUSH_TIME": "Push Time", "MY_REPOSITORY": "Meu Repositório", "PUBLIC_REPOSITORY": "Repositório Público", "DELETION_TITLE_REPO": "Confirmar remoção de repositório", @@ -764,6 +798,7 @@ "VERIFY_CERT": "Verificar o Certificado do Servidor LDAP", "READONLY_TOOLTIP": "Em modo somente leitura, você não pode remover repositórios ou tags ou enviar imagens. ", "REPO_TOOLTIP": "Usuários não podem efetuar qualquer operação nas imagens nesse modo.", + "WEBHOOK_TOOLTIP": "Enable webhooks to receive callbacks at your designated endpoints when certain actions such as image or chart being pushed, pulled, deleted, scanned are performed", "HOURLY_CRON":"Run once an hour, beginning of hour. Equivalente a 0 0 * * * *.", "WEEKLY_CRON":"Run once a week, midnight between Sat/Sun. Equivalente a 0 0 0 * * 0.", "DAILY_CRON":"Run once a day, midnight. Equivalente a 0 0 0 * * *." diff --git a/src/portal/src/i18n/lang/zh-cn-lang.json b/src/portal/src/i18n/lang/zh-cn-lang.json index a9e249150..230d1870f 100644 --- a/src/portal/src/i18n/lang/zh-cn-lang.json +++ b/src/portal/src/i18n/lang/zh-cn-lang.json @@ -45,7 +45,10 @@ "UPLOAD": "上传", "NO_FILE": "未选择文件", "ADD": "添加", - "RUN": "执行" + "RUN": "执行", + "CONTINUE": "继续", + "ENABLE": "启用", + "DISABLE": "关闭" }, "BATCH": { "DELETED_SUCCESS": "删除成功", @@ -236,7 +239,8 @@ "PROJECTS": "项目", "CONFIG": "配置管理", "HELMCHART": "Helm Charts", - "ROBOT_ACCOUNTS": "机器人账户" + "ROBOT_ACCOUNTS": "机器人账户", + "WEBHOOKS": "Webhooks" }, "PROJECT_CONFIG": { "REGISTRY": "项目仓库", @@ -334,6 +338,34 @@ "PULL_IS_MUST" : "拉取权限默认选中且不可修改。", "EXPORT_TO_FILE" : "导出到文件中" }, + "WEBHOOK": { + "EDIT_BUTTON": "编辑", + "ENABLED_BUTTON": "启用", + "DISABLED_BUTTON": "停用", + "TYPE": "Webhook", + "STATUS": "状态", + "CREATED": "创建时间", + "ENABLED": "启用", + "DISABLED": "停用", + "OF": "共计", + "ITEMS": "条记录", + "LAST_TRIGGERED": "最近触发事件", + "EDIT_WEBHOOK": "Webhook 目标", + "CREATE_WEBHOOK": "创建 Webhooks", + "EDIT_WEBHOOK_DESC": "指定接收 Webhook 通知的目标", + "CREATE_WEBHOOK_DESC": "为了启用 webhook, 请提供 Endpoint 和凭据以访问 Webhook 服务器。", + "ENDPOINT_URL": "Endpoint 地址", + "URL_IS_REQUIRED": "Endpoint 地址必填", + "AUTH_HEADER": "Auth Header", + "VERIFY_REMOTE_CERT": "验证远程证书", + "TEST_ENDPOINT_BUTTON": "测试 ENDPOINT", + "CANCEL_BUTTON": "取消", + "SAVE_BUTTON": "保存", + "ENABLED_WEBHOOK_TITLE": "启用项目的 Webhooks", + "ENABLED_WEBHOOK_SUMMARY": "你希望开启项目的 Webhooks 吗?", + "DISABLED_WEBHOOK_TITLE": "停用项目的 Webhooks", + "DISABLED_WEBHOOK_SUMMARY": "你希望停用项目的 Webhooks 吗?" + }, "GROUP": { "GROUP": "组", "GROUPS": "组", @@ -576,6 +608,8 @@ "TAGS_COUNT": "标签数", "PULL_COUNT": "下载数", "PULL_COMMAND": "Pull命令", + "PULL_TIME": "拉取时间", + "PUSH_TIME": "推送时间", "MY_REPOSITORY": "我的仓库", "PUBLIC_REPOSITORY": "公共仓库", "DELETION_TITLE_REPO": "删除镜像仓库确认", @@ -717,6 +751,7 @@ "LABEL": "标签", "REPOSITORY": "仓库", "REPO_READ_ONLY": "仓库只读", + "WEBHOOK_NOTIFICATION_ENABLED": "开启 WEBHOOK", "SYSTEM": "系统设置", "PROJECT_QUOTAS": "项目定额", "VULNERABILITY": "漏洞", @@ -769,6 +804,7 @@ "VERIFY_CERT": "检查来自LDAP服务端的证书", "READONLY_TOOLTIP": "选中,表示正在维护状态,不可删除仓库及标签,也不可以推送镜像。", "REPO_TOOLTIP": "用户在此模式下无法对图像执行任何操作。", + "WEBHOOK_TOOLTIP": "当执行推送,拉动,删除,扫描图像或图表等特定操作时,启用 webhooks 以在指定端点接收回调", "HOURLY_CRON":"每小时运行一次。相当于 0 0 * * * *", "WEEKLY_CRON":"每周一次,周六/周日午夜之间开始。相当于 0 0 * * * *", "DAILY_CRON":"每天午夜运行一次。相当于 0 0 * * * *" diff --git a/src/replication/adapter/harbor/adapter.go b/src/replication/adapter/harbor/adapter.go index c24940d61..4c8a26597 100644 --- a/src/replication/adapter/harbor/adapter.go +++ b/src/replication/adapter/harbor/adapter.go @@ -156,7 +156,7 @@ func (a *adapter) PrepareForPush(resources []*model.Resource) error { paths := strings.Split(resource.Metadata.Repository.Name, "/") projectName := paths[0] // handle the public properties - metadata := resource.Metadata.Repository.Metadata + metadata := abstractPublicMetadata(resource.Metadata.Repository.Metadata) pro, exist := projects[projectName] if exist { metadata = mergeMetadata(pro.Metadata, metadata) @@ -187,6 +187,19 @@ func (a *adapter) PrepareForPush(resources []*model.Resource) error { return nil } +func abstractPublicMetadata(metadata map[string]interface{}) map[string]interface{} { + if metadata == nil { + return nil + } + public, exist := metadata["public"] + if !exist { + return nil + } + return map[string]interface{}{ + "public": public, + } +} + // currently, mergeMetadata only handles the public metadata func mergeMetadata(metadata1, metadata2 map[string]interface{}) map[string]interface{} { public := parsePublic(metadata1) && parsePublic(metadata2) diff --git a/src/replication/adapter/harbor/adapter_test.go b/src/replication/adapter/harbor/adapter_test.go index 085a62533..844e536cf 100644 --- a/src/replication/adapter/harbor/adapter_test.go +++ b/src/replication/adapter/harbor/adapter_test.go @@ -210,3 +210,26 @@ func TestMergeMetadata(t *testing.T) { assert.Equal(t, strconv.FormatBool(c.public), m["public"].(string)) } } + +func TestAbstractPublicMetadata(t *testing.T) { + // nil input metadata + meta := abstractPublicMetadata(nil) + assert.Nil(t, meta) + + // contains no public metadata + metadata := map[string]interface{}{ + "other": "test", + } + meta = abstractPublicMetadata(metadata) + assert.Nil(t, meta) + + // contains public metadata + metadata = map[string]interface{}{ + "other": "test", + "public": "true", + } + meta = abstractPublicMetadata(metadata) + require.NotNil(t, meta) + require.Equal(t, 1, len(meta)) + require.Equal(t, "true", meta["public"].(string)) +} diff --git a/src/replication/adapter/harbor/chart_registry.go b/src/replication/adapter/harbor/chart_registry.go index 81856ced3..a93cc3feb 100644 --- a/src/replication/adapter/harbor/chart_registry.go +++ b/src/replication/adapter/harbor/chart_registry.go @@ -164,6 +164,13 @@ func (a *adapter) DownloadChart(name, version string) (io.ReadCloser, error) { if err != nil { return nil, err } + if resp.StatusCode != http.StatusOK { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return nil, fmt.Errorf("failed to download the chart %s: %d %s", req.URL.String(), resp.StatusCode, string(body)) + } return resp.Body, nil } diff --git a/src/replication/adapter/harbor/chart_registry_test.go b/src/replication/adapter/harbor/chart_registry_test.go index a2830666d..5231c0940 100644 --- a/src/replication/adapter/harbor/chart_registry_test.go +++ b/src/replication/adapter/harbor/chart_registry_test.go @@ -137,7 +137,7 @@ func TestDownloadChart(t *testing.T) { }, { Method: http.MethodGet, - Pattern: "/api/chartrepo/library/charts/harbor-1.0.tgz", + Pattern: "/chartrepo/library/charts/harbor-1.0.tgz", Handler: func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) }, diff --git a/src/replication/adapter/helmhub/chart_registry.go b/src/replication/adapter/helmhub/chart_registry.go index daba32952..d59cf73ad 100644 --- a/src/replication/adapter/helmhub/chart_registry.go +++ b/src/replication/adapter/helmhub/chart_registry.go @@ -17,6 +17,7 @@ package helmhub import ( "fmt" "io" + "io/ioutil" "net/http" "strings" @@ -123,7 +124,7 @@ func (a *adapter) download(version *chartVersion) (io.ReadCloser, error) { url := strings.ToLower(version.Attributes.URLs[0]) if !(strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://")) { - url = fmt.Sprintf("%s/charts/%s", version.Relationships.Chart.Data.Repo.URL, url) + url = fmt.Sprintf("%s/%s", version.Relationships.Chart.Data.Repo.URL, url) } req, err := http.NewRequest(http.MethodGet, url, nil) @@ -134,6 +135,13 @@ func (a *adapter) download(version *chartVersion) (io.ReadCloser, error) { if err != nil { return nil, err } + if resp.StatusCode != http.StatusOK { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return nil, fmt.Errorf("failed to download the chart %s: %d %s", req.URL.String(), resp.StatusCode, string(body)) + } return resp.Body, nil } diff --git a/src/replication/adapter/native/adapter.go b/src/replication/adapter/native/adapter.go index 8f42f6a61..887448ed7 100644 --- a/src/replication/adapter/native/adapter.go +++ b/src/replication/adapter/native/adapter.go @@ -58,9 +58,8 @@ type Adapter struct { // NewAdapter returns an instance of the Adapter func NewAdapter(registry *model.Registry) (*Adapter, error) { - var authorizer modifier.Modifier + var cred modifier.Modifier if registry.Credential != nil && len(registry.Credential.AccessSecret) != 0 { - var cred modifier.Modifier if registry.Credential.Type == model.CredentialTypeSecret { cred = common_http_auth.NewSecretAuthorizer(registry.Credential.AccessSecret) } else { @@ -68,10 +67,11 @@ func NewAdapter(registry *model.Registry) (*Adapter, error) { registry.Credential.AccessKey, registry.Credential.AccessSecret) } - authorizer = auth.NewStandardTokenAuthorizer(&http.Client{ - Transport: util.GetHTTPTransport(registry.Insecure), - }, cred, registry.TokenServiceURL) } + authorizer := auth.NewStandardTokenAuthorizer(&http.Client{ + Transport: util.GetHTTPTransport(registry.Insecure), + }, cred, registry.TokenServiceURL) + return NewAdapterWithCustomizedAuthorizer(registry, authorizer) } diff --git a/src/replication/dao/execution.go b/src/replication/dao/execution.go index 030b57ef0..1fbd54ba2 100644 --- a/src/replication/dao/execution.go +++ b/src/replication/dao/execution.go @@ -322,25 +322,39 @@ func UpdateTask(task *models.Task, props ...string) (int64, error) { return o.Update(task, props...) } -// UpdateTaskStatus ... +// UpdateTaskStatus updates the status of task. +// The implementation uses raw sql rather than QuerySetter.Filter... as QuerySetter +// will generate sql like: +// `UPDATE "replication_task" SET "end_time" = $1, "status" = $2 +// WHERE "id" IN ( SELECT T0."id" FROM "replication_task" T0 WHERE T0."id" = $3 +// AND T0."status" IN ($4, $5, $6))]` +// which is not a "single" sql statement, this will cause issues when running in concurrency func UpdateTaskStatus(id int64, status string, statusCondition ...string) (int64, error) { - qs := dao.GetOrmer().QueryTable(&models.Task{}). - Filter("id", id) - if len(statusCondition) > 0 { - qs = qs.Filter("status", statusCondition[0]) - } - params := orm.Params{ - "status": status, - } + params := []interface{}{} + sql := `update replication_task set status = ? ` + params = append(params, status) + if taskFinished(status) { // should update endTime - params["end_time"] = time.Now() + sql += `, end_time = ? ` + params = append(params, time.Now()) } - n, err := qs.Update(params) + + sql += `where id = ? ` + params = append(params, id) + if len(statusCondition) > 0 { + sql += fmt.Sprintf(`and status in (%s) `, dao.ParamPlaceholderForIn(len(statusCondition))) + params = append(params, statusCondition) + } + + result, err := dao.GetOrmer().Raw(sql, params...).Exec() if err != nil { return 0, err } - log.Debugf("update task status %d: -> %s", id, status) + n, _ := result.RowsAffected() + if n > 0 { + log.Debugf("update task status %d: -> %s", id, status) + } return n, err } diff --git a/src/replication/operation/controller.go b/src/replication/operation/controller.go index 878325e7e..35d6e84fe 100644 --- a/src/replication/operation/controller.go +++ b/src/replication/operation/controller.go @@ -16,10 +16,12 @@ package operation import ( "fmt" + "regexp" "time" "github.com/goharbor/harbor/src/common/job" "github.com/goharbor/harbor/src/common/utils/log" + hjob "github.com/goharbor/harbor/src/jobservice/job" "github.com/goharbor/harbor/src/replication/dao/models" "github.com/goharbor/harbor/src/replication/model" "github.com/goharbor/harbor/src/replication/operation/execution" @@ -45,6 +47,11 @@ const ( maxReplicators = 1024 ) +var ( + statusBehindErrorPattern = "mismatch job status for stopping job: .*, job status (.*) is behind Running" + statusBehindErrorReg = regexp.MustCompile(statusBehindErrorPattern) +) + // NewController returns a controller implementation func NewController(js job.Client) Controller { ctl := &controller{ @@ -149,19 +156,36 @@ func (c *controller) StopReplication(executionID int64) error { } // got tasks, stopping the tasks one by one for _, task := range tasks { - if !isTaskRunning(task) { - log.Debugf("the task %d(job ID: %s) isn't running, its status is %s, skip", task.ID, task.JobID, task.Status) + if isTaskInFinalStatus(task) { + log.Debugf("the task %d(job ID: %s) is in final status, its status is %s, skip", task.ID, task.JobID, task.Status) continue } if err = c.scheduler.Stop(task.JobID); err != nil { - return err + status, flag := isStatusBehindError(err) + if flag { + switch hjob.Status(status) { + case hjob.ErrorStatus: + status = models.TaskStatusFailed + case hjob.SuccessStatus: + status = models.TaskStatusSucceed + } + e := c.executionMgr.UpdateTaskStatus(task.ID, status) + if e != nil { + log.Errorf("failed to update the status the task %d(job ID: %s): %v", task.ID, task.JobID, e) + } else { + log.Debugf("got status behind error for task %d, update it's status to %s directly", task.ID, status) + } + continue + } + log.Errorf("failed to stop the task %d(job ID: %s): %v", task.ID, task.JobID, err) + continue } log.Debugf("the stop request for task %d(job ID: %s) sent", task.ID, task.JobID) } return nil } -func isTaskRunning(task *models.Task) bool { +func isTaskInFinalStatus(task *models.Task) bool { if task == nil { return false } @@ -169,9 +193,20 @@ func isTaskRunning(task *models.Task) bool { case models.TaskStatusSucceed, models.TaskStatusStopped, models.TaskStatusFailed: - return false + return true } - return true + return false +} + +func isStatusBehindError(err error) (string, bool) { + if err == nil { + return "", false + } + strs := statusBehindErrorReg.FindStringSubmatch(err.Error()) + if len(strs) != 2 { + return "", false + } + return strs[1], true } func (c *controller) ListExecutions(query ...*models.ExecutionQuery) (int64, []*models.Execution, error) { diff --git a/src/replication/operation/controller_test.go b/src/replication/operation/controller_test.go index c19daa8c9..b6f4f5d77 100644 --- a/src/replication/operation/controller_test.go +++ b/src/replication/operation/controller_test.go @@ -15,6 +15,7 @@ package operation import ( + "errors" "io" "os" "testing" @@ -344,40 +345,57 @@ func TestGetTaskLog(t *testing.T) { func TestIsTaskRunning(t *testing.T) { cases := []struct { - task *models.Task - isRunning bool + task *models.Task + isFinalStatus bool }{ { - task: nil, - isRunning: false, + task: nil, + isFinalStatus: false, }, { task: &models.Task{ Status: models.TaskStatusSucceed, }, - isRunning: false, + isFinalStatus: true, }, { task: &models.Task{ Status: models.TaskStatusFailed, }, - isRunning: false, + isFinalStatus: true, }, { task: &models.Task{ Status: models.TaskStatusStopped, }, - isRunning: false, + isFinalStatus: true, }, { task: &models.Task{ Status: models.TaskStatusInProgress, }, - isRunning: true, + isFinalStatus: false, }, } for _, c := range cases { - assert.Equal(t, c.isRunning, isTaskRunning(c.task)) + assert.Equal(t, c.isFinalStatus, isTaskInFinalStatus(c.task)) } } + +func TestIsStatusBehindError(t *testing.T) { + // nil error + status, flag := isStatusBehindError(nil) + assert.False(t, flag) + + // not status behind error + err := errors.New("not status behind error") + status, flag = isStatusBehindError(err) + assert.False(t, flag) + + // status behind error + err = errors.New("mismatch job status for stopping job: 9feedf9933jffs, job status Error is behind Running") + status, flag = isStatusBehindError(err) + assert.True(t, flag) + assert.Equal(t, "Error", status) +} diff --git a/src/replication/operation/execution/execution.go b/src/replication/operation/execution/execution.go index ba9189826..0d4db946c 100644 --- a/src/replication/operation/execution/execution.go +++ b/src/replication/operation/execution/execution.go @@ -152,13 +152,9 @@ func (dm *DefaultManager) UpdateTask(task *models.Task, props ...string) error { // UpdateTaskStatus ... func (dm *DefaultManager) UpdateTaskStatus(taskID int64, status string, statusCondition ...string) error { - n, err := dao.UpdateTaskStatus(taskID, status, statusCondition...) - if err != nil { + if _, err := dao.UpdateTaskStatus(taskID, status, statusCondition...); err != nil { return err } - if n == 0 { - return fmt.Errorf("Update task status failed %d: -> %s ", taskID, status) - } return nil } diff --git a/src/replication/operation/hook/task.go b/src/replication/operation/hook/task.go index 576d0deab..220763dbd 100644 --- a/src/replication/operation/hook/task.go +++ b/src/replication/operation/hook/task.go @@ -25,17 +25,23 @@ func UpdateTask(ctl operation.Controller, id int64, status string) error { jobStatus := job.Status(status) // convert the job status to task status s := "" + preStatus := []string{} switch jobStatus { case job.PendingStatus: s = models.TaskStatusPending + preStatus = append(preStatus, models.TaskStatusInitialized) case job.ScheduledStatus, job.RunningStatus: s = models.TaskStatusInProgress + preStatus = append(preStatus, models.TaskStatusInitialized, models.TaskStatusPending) case job.StoppedStatus: s = models.TaskStatusStopped + preStatus = append(preStatus, models.TaskStatusInitialized, models.TaskStatusPending, models.TaskStatusInProgress) case job.ErrorStatus: s = models.TaskStatusFailed + preStatus = append(preStatus, models.TaskStatusInitialized, models.TaskStatusPending, models.TaskStatusInProgress) case job.SuccessStatus: s = models.TaskStatusSucceed + preStatus = append(preStatus, models.TaskStatusInitialized, models.TaskStatusPending, models.TaskStatusInProgress) } - return ctl.UpdateTaskStatus(id, s) + return ctl.UpdateTaskStatus(id, s, preStatus...) } diff --git a/tests/apitests/python/library/registry.py b/tests/apitests/python/library/registry.py index a0466ce58..fe25fa7a6 100644 --- a/tests/apitests/python/library/registry.py +++ b/tests/apitests/python/library/registry.py @@ -5,15 +5,19 @@ import base import swagger_client class Registry(base.Base): - def create_registry(self, endpoint, name=None, username="", - password="", insecure=True, **kwargs): - if name is None: - name = base._random_name("registry") + def create_registry(self, url, registry_type= "harbor", description="", credentialType = "basic", + access_key = "admin", access_secret = "Harbor12345", name=base._random_name("registry"), + insecure=True, expect_status_code = 201, **kwargs): + client = self._get_client(**kwargs) - registry = swagger_client.RepTargetPost(name=name, endpoint=endpoint, - username=username, password=password, insecure=insecure) - _, _, header = client.targets_post_with_http_info(registry) - return base._get_id_from_header(header), name + registryCredential = swagger_client.RegistryCredential(type=credentialType, access_key=access_key, access_secret=access_secret) + registry = swagger_client.Registry(name=name, url=url, + description= description, type=registry_type, + insecure=insecure, credential=registryCredential) + + _, status_code, header = client.registries_post_with_http_info(registry) + base._assert_status_code(expect_status_code, status_code) + return base._get_id_from_header(header), _ def get_registry_id_by_endpoint(self, endpoint, **kwargs): client = self._get_client(**kwargs) @@ -21,4 +25,9 @@ class Registry(base.Base): for registry in registries or []: if registry.endpoint == endpoint: return registry.id - raise Exception("registry %s not found" % endpoint) \ No newline at end of file + raise Exception("registry %s not found" % endpoint) + + def delete_registry(self, registry_id, expect_status_code = 200, **kwargs): + client = self._get_client(**kwargs) + _, status_code, _ = client.registries_id_delete_with_http_info(registry_id) + base._assert_status_code(expect_status_code, status_code) \ No newline at end of file diff --git a/tests/apitests/python/library/replication.py b/tests/apitests/python/library/replication.py index 9406339b1..7e1cdd646 100644 --- a/tests/apitests/python/library/replication.py +++ b/tests/apitests/python/library/replication.py @@ -6,15 +6,31 @@ import base import swagger_client class Replication(base.Base): + def create_replication_policy(self, dest_registry=None, src_registry=None, name=None, description="", + dest_namespace = "", filters=None, trigger=swagger_client.ReplicationTrigger(type="manual",trigger_settings=swagger_client.TriggerSettings(cron="")), + deletion=False, override=True, enabled=True, expect_status_code = 201, **kwargs): + if name is None: + name = base._random_name("rule") + if filters is None: + filters = [] + for policy_filter in filters: + policy_filter["value"] = int(policy_filter["value"]) + client = self._get_client(**kwargs) + policy = swagger_client.ReplicationPolicy(name=name, description=description,dest_namespace=dest_namespace, + dest_registry=dest_registry, src_registry=src_registry,filters=filters, + trigger=trigger, deletion=deletion, override=override, enabled=enabled) + _, status_code, header = client.replication_policies_post_with_http_info(policy) + base._assert_status_code(expect_status_code, status_code) + return base._get_id_from_header(header), name def get_replication_rule(self, param = None, rule_id = None, expect_status_code = 200, **kwargs): client = self._get_client(**kwargs) if rule_id is None: if param is None: param = dict() - data, status_code, _ = client.policies_replication_get_with_http_info(param) + data, status_code, _ = client.replication_policies_id_get_with_http_info(param) else: - data, status_code, _ = client.policies_replication_id_get_with_http_info(rule_id) + data, status_code, _ = client.replication_policies_id_get_with_http_info(rule_id) base._assert_status_code(expect_status_code, status_code) return data @@ -24,11 +40,11 @@ class Replication(base.Base): raise Exception(r"Check replication rule failed, expect <{}> actual <{}>.".format(expect_rule_name, str(rule_data.name))) else: print r"Check Replication rule passed, rule name <{}>.".format(str(rule_data.name)) - get_trigger = str(rule_data.trigger.kind) - if expect_trigger is not None and get_trigger == str(expect_trigger): - print r"Check Replication rule trigger passed, trigger name <{}>.".format(get_trigger) - else: - raise Exception(r"Check replication rule trigger failed, expect <{}> actual <{}>.".format(expect_trigger, get_trigger)) + #get_trigger = str(rule_data.trigger.kind) + #if expect_trigger is not None and get_trigger == str(expect_trigger): + # print r"Check Replication rule trigger passed, trigger name <{}>.".format(get_trigger) + #else: + # raise Exception(r"Check replication rule trigger failed, expect <{}> actual <{}>.".format(expect_trigger, get_trigger)) def start_replication(self, rule_id, **kwargs): @@ -55,5 +71,5 @@ class Replication(base.Base): def delete_replication_rule(self, rule_id, expect_status_code = 200, **kwargs): client = self._get_client(**kwargs) - _, status_code, _ = client.policies_replication_id_delete_with_http_info(rule_id) + _, status_code, _ = client.replication_policies_id_delete_with_http_info(rule_id) base._assert_status_code(expect_status_code, status_code) diff --git a/tests/apitests/python/library/system.py b/tests/apitests/python/library/system.py index 5a9553e18..570d277e3 100644 --- a/tests/apitests/python/library/system.py +++ b/tests/apitests/python/library/system.py @@ -168,3 +168,17 @@ class System(base.Base): if deleted_files_count == 0: raise Exception(r"Get blobs eligible for deletion count is {}, while we expect more than 1.".format(deleted_files_count)) + def set_cve_whitelist(self, expires_at=None, expected_status_code=200, *cve_ids, **kwargs): + client = self._get_client(**kwargs) + cve_list = [swagger_client.CVEWhitelistItem(cve_id=c) for c in cve_ids] + whitelist = swagger_client.CVEWhitelist(expires_at=expires_at, items=cve_list) + try: + r = client.system_cve_whitelist_put_with_http_info(whitelist=whitelist, _preload_content=False) + except Exception as e: + base._assert_status_code(expected_status_code, e.status) + else: + base._assert_status_code(expected_status_code, r[1]) + + def get_cve_whitelist(self, **kwargs): + client = self._get_client(**kwargs) + return client.system_cve_whitelist_get() diff --git a/tests/apitests/python/library/target.py b/tests/apitests/python/library/target.py deleted file mode 100644 index ef350ea92..000000000 --- a/tests/apitests/python/library/target.py +++ /dev/null @@ -1,39 +0,0 @@ -# -*- coding: utf-8 -*- - -import time -import base -import swagger_client - -class Target(base.Base): - def create_target(self, - endpoint_target = None, - username_target = "target_user", password_target = "Aa123456", name_target=base._random_name("target"), - target_type=0, insecure_target=True, expect_status_code = 201, - **kwargs): - if endpoint_target is None: - endpoint_target = r"https://{}.{}.{}.{}".format(int(round(time.time() * 1000)) % 100, - int(round(time.time() * 1000)) % 200, - int(round(time.time() * 1000)) % 100, - int(round(time.time() * 1000)) % 254) - client = self._get_client(**kwargs) - policy = swagger_client.RepTarget(name=name_target, endpoint=endpoint_target, - username=username_target, password=password_target, type=target_type, - insecure=insecure_target) - - _, status_code, header = client.targets_post_with_http_info(policy) - base._assert_status_code(expect_status_code, status_code) - return base._get_id_from_header(header), name_target - - def get_target(self, expect_status_code = 200, params = None, **kwargs): - client = self._get_client(**kwargs) - data = [] - if params is None: - params = {} - data, status_code, _ = client.targets_get_with_http_info(**params) - base._assert_status_code(expect_status_code, status_code) - return data - - def delete_target(self, target_id, expect_status_code = 200, **kwargs): - client = self._get_client(**kwargs) - _, status_code, _ = client.targets_id_delete_with_http_info(target_id) - base._assert_status_code(expect_status_code, status_code) \ No newline at end of file diff --git a/tests/apitests/python/library/user.py b/tests/apitests/python/library/user.py index 988eadff5..7b7a9181b 100644 --- a/tests/apitests/python/library/user.py +++ b/tests/apitests/python/library/user.py @@ -70,14 +70,14 @@ class User(base.Base): base._assert_status_code(200, status_code) return user_id - def update_uesr_profile(self, user_id, email=None, realname=None, comment=None, **kwargs): + def update_user_profile(self, user_id, email=None, realname=None, comment=None, **kwargs): client = self._get_client(**kwargs) user_rofile = swagger_client.UserProfile(email, realname, comment) _, status_code, _ = client.users_user_id_put_with_http_info(user_id, user_rofile) base._assert_status_code(200, status_code) return user_id - def update_uesr_role_as_sysadmin(self, user_id, IsAdmin, **kwargs): + def update_user_role_as_sysadmin(self, user_id, IsAdmin, **kwargs): client = self._get_client(**kwargs) has_admin_role = swagger_client.HasAdminRole(IsAdmin) print "has_admin_role:", has_admin_role diff --git a/tests/apitests/python/test_add_replication_rule.py b/tests/apitests/python/test_add_replication_rule.py index 86035ee81..82e500c5c 100644 --- a/tests/apitests/python/test_add_replication_rule.py +++ b/tests/apitests/python/test_add_replication_rule.py @@ -2,11 +2,12 @@ from __future__ import absolute_import import unittest from testutils import ADMIN_CLIENT +from testutils import harbor_server from testutils import TEARDOWN from library.project import Project from library.user import User from library.replication import Replication -from library.target import Target +from library.registry import Registry import swagger_client class TestProjects(unittest.TestCase): @@ -21,8 +22,8 @@ class TestProjects(unittest.TestCase): replication = Replication() self.replication= replication - target = Target() - self.target= target + registry = Registry() + self.registry= registry @classmethod def tearDown(self): @@ -31,11 +32,10 @@ class TestProjects(unittest.TestCase): @unittest.skipIf(TEARDOWN == False, "Test data won't be erased.") def test_ClearData(self): #1. Delete rule(RA); - for rule_id in TestProjects.rule_id_list: - self.replication.delete_replication_rule(rule_id, **ADMIN_CLIENT) + self.replication.delete_replication_rule(TestProjects.rule_id, **ADMIN_CLIENT) - #2. Delete target(TA); - self.target.delete_target(TestProjects.target_id, **ADMIN_CLIENT) + #2. Delete registry(TA); + self.registry.delete_registry(TestProjects.registry_id, **ADMIN_CLIENT) #3. Delete project(PA); self.project.delete_project(TestProjects.project_add_rule_id, **TestProjects.USER_add_rule_CLIENT) @@ -50,12 +50,12 @@ class TestProjects(unittest.TestCase): Test step and expected result: 1. Create a new user(UA); 2. Create a new private project(PA) by user(UA); - 3. Create a new target(TA)/registry; - 4. Create a new rule for project(PA) and target(TA); - 5. Check if rule is exist. + 3. Create a new registry; + 4. Create a new rule for this registry; + 5. Check rule should be exist. Tear down: 1. Delete rule(RA); - 2. Delete targe(TA); + 2. Delete registry(TA); 3. Delete project(PA); 4. Delete user(UA). """ @@ -74,21 +74,15 @@ class TestProjects(unittest.TestCase): self.project.projects_should_exist(dict(public=False), expected_count = 1, expected_project_id = TestProjects.project_add_rule_id, **TestProjects.USER_add_rule_CLIENT) - #3. Create a new target(TA)/registry - TestProjects.target_id, _ = self.target.create_target(**ADMIN_CLIENT) - print "TestProjects.target_id:", TestProjects.target_id + #3. Create a new registry + TestProjects.registry_id, _ = self.registry.create_registry("https://" + harbor_server,**ADMIN_CLIENT) + print "TestProjects.registry_id:", TestProjects.registry_id - TestProjects.rule_id_list = [] + #4. Create a new rule for this registry; + TestProjects.rule_id, rule_name = self.replication.create_replication_policy(dest_registry=swagger_client.Registry(id=int(TestProjects.registry_id)), **ADMIN_CLIENT) - trigger_values_to_set = ["Manual", "Immediate"] - for value in trigger_values_to_set: - #4. Create a new rule for project(PA) and target(TA) - rule_id, rule_name = self.replication.create_replication_rule([TestProjects.project_add_rule_id], - [TestProjects.target_id], trigger=swagger_client.RepTrigger(kind=value), **ADMIN_CLIENT) - TestProjects.rule_id_list.append(rule_id) - - #5. Check rule should be exist - self.replication.check_replication_rule_should_exist(rule_id, rule_name, expect_trigger = value, **ADMIN_CLIENT) + #5. Check rule should be exist + self.replication.check_replication_rule_should_exist(TestProjects.rule_id, rule_name, **ADMIN_CLIENT) if __name__ == '__main__': diff --git a/tests/apitests/python/test_assign_sys_admin.py b/tests/apitests/python/test_assign_sys_admin.py index d098d1495..db158a2c1 100644 --- a/tests/apitests/python/test_assign_sys_admin.py +++ b/tests/apitests/python/test_assign_sys_admin.py @@ -45,15 +45,15 @@ class TestProjects(unittest.TestCase): USER_ASSIGN_SYS_ADMIN_CLIENT=dict(endpoint = url, username = user_assign_sys_admin_name, password = user_assign_sys_admin_password) #2. Set user(UA) has sysadmin role by admin, check user(UA) can modify system configuration; - self.user.update_uesr_role_as_sysadmin(TestProjects.user_assign_sys_admin_id, True, **ADMIN_CLIENT) + self.user.update_user_role_as_sysadmin(TestProjects.user_assign_sys_admin_id, True, **ADMIN_CLIENT) self.conf.set_configurations_of_token_expiration(60, **USER_ASSIGN_SYS_ADMIN_CLIENT) #3. Set user(UA) has no sysadmin role by admin, check user(UA) can not modify system configuration; - self.user.update_uesr_role_as_sysadmin(TestProjects.user_assign_sys_admin_id, False, **ADMIN_CLIENT) + self.user.update_user_role_as_sysadmin(TestProjects.user_assign_sys_admin_id, False, **ADMIN_CLIENT) self.conf.set_configurations_of_token_expiration(70, expect_status_code = 403, **USER_ASSIGN_SYS_ADMIN_CLIENT) #4. Set user(UA) has sysadmin role by admin, check user(UA) can modify system configuration. - self.user.update_uesr_role_as_sysadmin(TestProjects.user_assign_sys_admin_id, True, **ADMIN_CLIENT) + self.user.update_user_role_as_sysadmin(TestProjects.user_assign_sys_admin_id, True, **ADMIN_CLIENT) self.conf.set_configurations_of_token_expiration(80, **USER_ASSIGN_SYS_ADMIN_CLIENT) if __name__ == '__main__': diff --git a/tests/apitests/python/test_sys_cve_whitelists.py b/tests/apitests/python/test_sys_cve_whitelists.py new file mode 100644 index 000000000..9f67440e2 --- /dev/null +++ b/tests/apitests/python/test_sys_cve_whitelists.py @@ -0,0 +1,73 @@ +from __future__ import absolute_import + +import unittest +import swagger_client +import time + +from testutils import ADMIN_CLIENT +from library.user import User +from library.system import System + + +class TestSysCVEWhitelist(unittest.TestCase): + """ + Test case: + System Level CVE Whitelist + Setup: + Create user(RA) + Test Steps: + 1. User(RA) reads the system level CVE whitelist and it's empty. + 2. User(RA) updates the system level CVE whitelist, verify it's failed. + 3. Update user(RA) to system admin + 4. User(RA) updates the system level CVE whitelist, verify it's successful. + 5. User(RA) reads the system level CVE whitelist, verify the CVE list is updated. + 6. User(RA) updates the expiration date of system level CVE whitelist. + 7. User(RA) reads the system level CVE whitelist, verify the expiration date is updated. + Tear Down: + 1. Clear the system level CVE whitelist. + 2. Delete User(RA) + """ + def setUp(self): + self.user = User() + self.system = System() + user_ra_password = "Aa123456" + print("Setup: Creating user for test") + user_ra_id, user_ra_name = self.user.create_user(user_password=user_ra_password, **ADMIN_CLIENT) + print("Created user: %s, id: %s" % (user_ra_name, user_ra_id)) + self.USER_RA_CLIENT = dict(endpoint=ADMIN_CLIENT["endpoint"], + username=user_ra_name, + password=user_ra_password) + self.user_ra_id = int(user_ra_id) + + def testSysCVEWhitelist(self): + # 1. User(RA) reads the system level CVE whitelist and it's empty. + wl = self.system.get_cve_whitelist(**self.USER_RA_CLIENT) + self.assertEqual(0, len(wl.items), "The initial system level CVE whitelist is not empty: %s" % wl.items) + # 2. User(RA) updates the system level CVE whitelist, verify it's failed. + cves = ['CVE-2019-12310'] + self.system.set_cve_whitelist(None, 403, *cves, **self.USER_RA_CLIENT) + # 3. Update user(RA) to system admin + self.user.update_user_role_as_sysadmin(self.user_ra_id, True, **ADMIN_CLIENT) + # 4. User(RA) updates the system level CVE whitelist, verify it's successful. + self.system.set_cve_whitelist(None, 200, *cves, **self.USER_RA_CLIENT) + # 5. User(RA) reads the system level CVE whitelist, verify the CVE list is updated. + expect_wl = [swagger_client.CVEWhitelistItem(cve_id='CVE-2019-12310')] + wl = self.system.get_cve_whitelist(**self.USER_RA_CLIENT) + self.assertIsNone(wl.expires_at) + self.assertEqual(expect_wl, wl.items) + # 6. User(RA) updates the expiration date of system level CVE whitelist. + exp = int(time.time()) + 3600 + self.system.set_cve_whitelist(exp, 200, *cves, **self.USER_RA_CLIENT) + # 7. User(RA) reads the system level CVE whitelist, verify the expiration date is updated. + wl = self.system.get_cve_whitelist(**self.USER_RA_CLIENT) + self.assertEqual(exp, wl.expires_at) + + def tearDown(self): + print("TearDown: Clearing the Whitelist") + self.system.set_cve_whitelist(**ADMIN_CLIENT) + print("TearDown: Deleting user: %d" % self.user_ra_id) + self.user.delete_user(self.user_ra_id, **ADMIN_CLIENT) + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/tests/generateCerts.sh b/tests/generateCerts.sh index 7cc2b1570..2f95789ed 100755 --- a/tests/generateCerts.sh +++ b/tests/generateCerts.sh @@ -2,8 +2,10 @@ # These certs file is only for Harbor testing. IP='127.0.0.1' +if [ ! -z "$1" ]; then IP=$1; fi OPENSSLCNF= DATA_VOL='/data' +CUR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" for path in /etc/openssl/openssl.cnf /etc/ssl/openssl.cnf /usr/local/etc/openssl/openssl.cnf; do if [[ -e ${path} ]]; then @@ -16,21 +18,28 @@ if [[ -z ${OPENSSLCNF} ]]; then fi # Create CA certificate -openssl req \ - -newkey rsa:4096 -nodes -sha256 -keyout harbor_ca.key \ - -x509 -days 365 -out harbor_ca.crt -subj '/C=CN/ST=PEK/L=Bei Jing/O=VMware/CN=HarborCA' +#openssl req \ +# -newkey rsa:4096 -nodes -sha256 -keyout $CUR_DIR/harbor_ca.key \ +# -x509 -days 365 -out $CUR_DIR/harbor_ca.crt -subj '/C=CN/ST=PEK/L=Bei Jing/O=VMware/CN=HarborCA' # Generate a Certificate Signing Request +if echo $IP|grep -E '^([0-9]+\.){3}[0-9]+$' ; then openssl req \ -newkey rsa:4096 -nodes -sha256 -keyout $IP.key \ - -out $IP.csr -subj '/C=CN/ST=PEK/L=Bei Jing/O=VMware/CN=HarborManager' + -out $IP.csr -subj "/C=CN/ST=PEK/L=Bei Jing/O=VMware/CN=HarborManager" +echo subjectAltName = IP:$IP > extfile.cnf +else +openssl req \ + -newkey rsa:4096 -nodes -sha256 -keyout $IP.key \ + -out $IP.csr -subj "/C=CN/ST=PEK/L=Bei Jing/O=VMware/CN=$IP" +echo subjectAltName = DNS.1:$IP > extfile.cnf +fi # Generate the certificate of local registry host -echo subjectAltName = IP:$IP > extfile.cnf -openssl x509 -req -days 365 -in $IP.csr -CA harbor_ca.crt \ - -CAkey harbor_ca.key -CAcreateserial -extfile extfile.cnf -out $IP.crt +openssl x509 -req -days 365 -sha256 -in $IP.csr -CA $CUR_DIR/harbor_ca.crt \ + -CAkey $CUR_DIR/harbor_ca.key -CAcreateserial -extfile extfile.cnf -out $IP.crt # Copy to harbor default location mkdir -p $DATA_VOL/cert cp $IP.crt $DATA_VOL/cert/server.crt -cp $IP.key $DATA_VOL/cert/server.key \ No newline at end of file +cp $IP.key $DATA_VOL/cert/server.key diff --git a/tests/harbor_ca.crt b/tests/harbor_ca.crt new file mode 100644 index 000000000..b94a325c2 --- /dev/null +++ b/tests/harbor_ca.crt @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFhTCCA22gAwIBAgIUBWPUOcl5wyYV18FraR9cayN1F1UwDQYJKoZIhvcNAQEL +BQAwUjELMAkGA1UEBhMCQ04xDDAKBgNVBAgMA1BFSzERMA8GA1UEBwwIQmVpIEpp +bmcxDzANBgNVBAoMBlZNd2FyZTERMA8GA1UEAwwISGFyYm9yQ0EwHhcNMTkwODEz +MDMyMjUwWhcNMjAwODEyMDMyMjUwWjBSMQswCQYDVQQGEwJDTjEMMAoGA1UECAwD +UEVLMREwDwYDVQQHDAhCZWkgSmluZzEPMA0GA1UECgwGVk13YXJlMREwDwYDVQQD +DAhIYXJib3JDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALjlYE0c +16ZsTVBpr2s48QXxuc0IcddfyWqpBGwiWTGG3/LS/ebkiFfKVViBicK2A5IofI4X +6UBuu+hb3FZjJtpqNPFMrOK0K0eiheBQVxeCQavtoTpF7dtuWyv2bAgmvVagBxtU +sWWWzSO1vanO4Acs/ijfZjUdxN9JQk6xDj5Q+CLo0ikjFPTTD5DT40Z89qf440VU +019b70ZYUd61ZAGflfJNDQZ14GqGuG7pUTXMS76cuCbpGldhgILkBmKS/B3gm1ex +YzB6omKDbgGTOK4HiJpKsC0xWfYjY9LaTTmaJ+q8XVzv6oJu5u5RWSx2TEXy72Hv +E8rYLo1zKXQ+O03/XbPiK/bgsYEsPIxumMPKEOZJ3vdUxWOnYIssVqQgqpAByo4k ++ErBuQUwZz22NraV2nDqyiP+feuzD2nCKLAslEx2QWOvqfhvGgeyv0ViOdtyVFbf +XvOAq9FbY5w+i0MLBb0tcU+f8xzKbecsTbJDTLd0Fy7Sx2sT5ywfG1SDeNwRr8ar +QCBWUgim8Lc7U3OgrrjzMJGfKD/RgMWSjOxV1LXbjgOFhnh7/wvRxf87fURHigt0 +26ZLCKm2i2YStL4S2yNSm206SXMkHUMZV/mFMHc/JK/EuDU9xXsK2P1d1H3SNrgK +axU7fcXnwIM9gcDrIlm+8MblrJWvGTe6GDn1AgMBAAGjUzBRMB0GA1UdDgQWBBSd +0G4mm1Ui8glxkvq5fcJflnlxCDAfBgNVHSMEGDAWgBSd0G4mm1Ui8glxkvq5fcJf +lnlxCDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBDYYDcmjwy +5fmCzBcMYEh7XMiFhS3UkojgB7LB6R41o6GmXvJOgaDobQC78We3I3Y8r8vVbAY+ +Jh42tRRwKMIRUywkDLr5tfyiDUcGvSxpfysTYSNNknsctsowI6yCcRIsY0XqZEE9 +Y3GMSaljAcxG++gR2XSxSPwYQ/TKDiM1Fyv3YNhnmoycBQItcIz29hYVXRgBkNkx +Cap8MDERJKlHiAgopoXtxnSbgZn4pZa6bVRF/UUYRmRLKO8tyKd8ZXHfQvvso1HU +e+Wcy3EoADr3aYCytPppo33zDHBX4+lcL2rKAH2+K5JOhnxZuRR4dWoczkI5mYRi +qZ809uHnXoV4yJ14NWnoil6kUF3YxU9hWzjEaVcZfp7WUw0BeTZ9M0VqkjxSiSuz +QvSzoPqZ2ajfxawf1fdttU6YUewBkjMOTC2C8qoA8m7HNRTznoZbfFITG1gJlnFT +y8oWY+ZrEsG7lID2zMaZopSAwDzuBoqLGE66LK+RtFSrAcGHSr3Xlp0R6hX4FeyN +flTTBxE6eNoEiV56x9RuSDvWnw/l38B/y9q9wMNkI+kb2d8QNkWFz9q1W01Vdceo +ZzTA/fNcErZ0YiE/wY9VEW+DRoO3ntMN8lEsNLr04kUG7RJ6EOu6kQPHQuJ3Bujy +rnAVXLxzOqGPfKD6gBQS2pTikQCYpqtaFg== +-----END CERTIFICATE----- \ No newline at end of file diff --git a/tests/harbor_ca.key b/tests/harbor_ca.key new file mode 100644 index 000000000..3f544037b --- /dev/null +++ b/tests/harbor_ca.key @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQC45WBNHNembE1Q +aa9rOPEF8bnNCHHXX8lqqQRsIlkxht/y0v3m5IhXylVYgYnCtgOSKHyOF+lAbrvo +W9xWYybaajTxTKzitCtHooXgUFcXgkGr7aE6Re3bblsr9mwIJr1WoAcbVLFlls0j +tb2pzuAHLP4o32Y1HcTfSUJOsQ4+UPgi6NIpIxT00w+Q0+NGfPan+ONFVNNfW+9G +WFHetWQBn5XyTQ0GdeBqhrhu6VE1zEu+nLgm6RpXYYCC5AZikvwd4JtXsWMweqJi +g24BkziuB4iaSrAtMVn2I2PS2k05mifqvF1c7+qCbubuUVksdkxF8u9h7xPK2C6N +cyl0PjtN/12z4iv24LGBLDyMbpjDyhDmSd73VMVjp2CLLFakIKqQAcqOJPhKwbkF +MGc9tja2ldpw6soj/n3rsw9pwiiwLJRMdkFjr6n4bxoHsr9FYjnbclRW317zgKvR +W2OcPotDCwW9LXFPn/Mcym3nLE2yQ0y3dBcu0sdrE+csHxtUg3jcEa/Gq0AgVlII +pvC3O1NzoK648zCRnyg/0YDFkozsVdS1244DhYZ4e/8L0cX/O31ER4oLdNumSwip +totmErS+EtsjUpttOklzJB1DGVf5hTB3PySvxLg1PcV7Ctj9XdR90ja4CmsVO33F +58CDPYHA6yJZvvDG5ayVrxk3uhg59QIDAQABAoICAAIsH7+IMThxWU8yjq8R0jMh +re8sxDmllHY+WiDzHl0omoT92aHW2Ys+g1Yw3298N/qFo0EAIutw4aBPQ/132MME +MG8NWZKoT0HeNPh3uS47h43/kr9ehvbnCwcvNAG8gsj7xFmb2yG4bdyXjAzss1Ei +RDIyvb6uBNwivjayedpdlSzD04RMNzjRKgOnmaoAWd2LXRA5eOpL6DnJW9zkALLM +LzTTlu2WgPZ/crdK4nthVRp+OOOsJXUVXi8rgq+xzmiDdQ/Is8OkDThfFvHJywaw +a/h0HDHLvKTZsZiOnA2rNADcCbTH1NeHegsexY9yLF8+BXX/GxptA88BpWEKQiQZ +WMKfwR2EhUA/4SJmIGORZOA6LZxOVCnuAxLn7SoUNXO9x2Ci0X3XblfKJJVCXjbi +pT1OvGISzsm7ZlPB8+jV/4BbeDZnssnKLYnP43/4BomlRW8ZyxAeT9XdlWJvs7Bu +mnEaUOUWGOicYqPvbOHj1M+PAxPdmCW2vCT0TigXpN5v/isBwSCu5i2pl29u8lrX +wSE+wdS4NGyFAFlpiJafdOgrKtCOmzA2snEMzGu9PkCS6HeppIymii6kSqfRuG2O +ZWWeVLOY2jpdJPh/jszXzfq88pkoYkMIjbiu38uT7AB1NG4HKUCV3lhmaw6bS76w +hi1sMUzHEUn8Q4tpHPzZAoIBAQDfaOyGZn9P/wYF5JSKuVlCSPG8I3Brtkwtz987 +SiiQVFWsG2b2e2U+ZCsoHMFTmFrFo27zwaRPWvlBR0YA1cnqLIWwq5k/aID859o2 +sTcncDs2Dthq+R0Vh0q+n5Cx2if0heR8ilmOmLtkeRhaNOhpTAPBi5rokIsAAYTa +uCffHzp7Bosv97p9fd1+21ZnQCldJZOOzRA/e+UfMu/El1lhUElVWPTwsxgrE/jI +7uggzFGab3VlUkovS3x+iAiv6eWxJSurH89euyvYxl+EG5uFT8+invWuLKrZDEtj +iEBUoc/h/iVYGJyZ7TQXaVOoJD06T45NcI30mstotSfPqFufAoIBAQDT3i5AmH5V +7Wd9p6sA3jAWmDSzsA/oPOLi9MWKoBiyZ4hjRp7OB6YJlkZBWfpoHPHNg32mrcxr +sRBN2wm6yP7kiZHrTAZP8S37ZWGKP07i2QddKMcwhR5wej5T300EVqFOunVdHwOz +mVBYPdgbof4k6E1bOin3gj40hqvFri4Jw/5klvJdUOWBb5OgvJnixjpE3+uUkjmM +gjj51AO5WJjsscObKIPNbgiVME/L15OPDsO+tf3BAPNZ90xQLzax1mO+fBz7KIYR +eULZMRtDBGEfhWDR8BTbjw8b/pfZsQD0D0/IB593YdlWyFW3QOb8+nGl5W46vGix +4OZZ1Itf/4HrAoIBAQDBrIYPZV/NC7o+9Y/ISzIUAoR9owNcfSbBOEm/bmSH6nRy +xTaXSxXT5qZ7GaKHQ7a9SxdufVph6O3YJ1+KbcujFIG5TKmHjKL1nFFRxIOZzvOl +w2zeH6OU/DpR0qZvaD3m/wO6630D32fkjA4OdXtdfSZsbQgXwOafVLHFoov+I2Zh +LKURKmMjUy/nP2JCFB9HvsGStDb3sgJI77Fn7gTwFdfdA0ckOz4iaifsmR/m/vln +NmTBN3tUUM5WKrvNNKmIzj5zFRqCdyRlwmMfdYd3JF9ODRvSqKpbiwr3+DA8riI3 +OklJe9yWnEniWc7KHtBtcnZcr8yAVokr9o/St5LlAoIBAQDQ7eVGphrPudG4xEOK +E5HwdiBioljNeF112lODpOU16YtB+z5XhotiIOMfRw/8465AME8Us4dHG9EsNbie +jd9ul4tiMhJ3eysRIqTRpCSy57qvT6s+Wcfuu14Db82PXa6s6IscTZ1k2ue0XShj +95eb5cmDERSZk8KsIbH6uw2Da9fOclyHUWNCBTnb5KEMVNbZXMgAN0KxISn2k/Eo +Mgp8P8DZnVZ8mumz1XSbW/eTt8eopeebEMjqC0kiOa0CKp0qF1KtCwVK7f3SGO79 +Y7AzWWBlJxAqhCUuQh6U+kwqYX8XjwzeuYuOXPjKQiKHjqHMKzhMi8fiwhnmtAbN +oDYJAoIBAAYnTbQcHv5KExeolWropSnY4xfWfzC/nmfFTao1iPfbDiFMkO8uRku4 +eOgvlPbiS/cT1MEKSfQqTyMkWxlgghTiDMOTkm6iFQ1q4UQN7ua4eNVNeItTdpZb +c3UER6XNgI0CpdOb6Jq1529+g9/dly39qqQM4n82nFuizknMWlW5BlxsbwTy2xhi +JAA3JgLgB38UdL1sBDscX2vCl5pZhXXxWmVDud67exbMUnR4ib1bzG4nsXTHe72P +Jq2W5mySj9uDfcNHyBmfl95mP+VWOjQlxMg/cmS/CU3q04cMzUKX4froNRUi3eYQ +CFZg63hc+GA5YEhJM3n6ZkTZnTJH3Lc= +-----END PRIVATE KEY----- \ No newline at end of file diff --git a/tests/harbor_ca.srl b/tests/harbor_ca.srl new file mode 100644 index 000000000..34de77b2e --- /dev/null +++ b/tests/harbor_ca.srl @@ -0,0 +1 @@ +63B7F610244848F31E6F589536F579890B0812B3 \ No newline at end of file diff --git a/tests/robot-cases/Group0-BAT/API_DB.robot b/tests/robot-cases/Group0-BAT/API_DB.robot index e65452d82..da35f2222 100644 --- a/tests/robot-cases/Group0-BAT/API_DB.robot +++ b/tests/robot-cases/Group0-BAT/API_DB.robot @@ -25,8 +25,8 @@ Test Case - Delete a Repository of a Certain Project Created by Normal User Harbor API Test ./tests/apitests/python/test_del_repo.py Test Case - Add a System Global Label to a Certain Tag Harbor API Test ./tests/apitests/python/test_add_sys_label_to_tag.py -# Test Case - Add Replication Rule -# Harbor API Test ./tests/apitests/python/test_add_replication_rule.py +Test Case - Add Replication Rule + Harbor API Test ./tests/apitests/python/test_add_replication_rule.py Test Case - Edit Project Creation Harbor API Test ./tests/apitests/python/test_edit_project_creation.py Test Case - Scan Image diff --git a/tests/travis/ut_run.sh b/tests/travis/ut_run.sh index 3f21ad2a6..c1086f330 100755 --- a/tests/travis/ut_run.sh +++ b/tests/travis/ut_run.sh @@ -16,4 +16,4 @@ docker ps go test -race -i ./src/core ./src/jobservice sudo -E env "PATH=$PATH" "POSTGRES_MIGRATION_SCRIPTS_PATH=/home/travis/gopath/src/github.com/goharbor/harbor/make/migrations/postgresql/" ./tests/coverage4gotest.sh -goveralls -coverprofile=profile.cov -service=travis-ci \ No newline at end of file +goveralls -coverprofile=profile.cov -service=travis-ci || true \ No newline at end of file