diff --git a/.travis.yml b/.travis.yml index be4508a97..7ba2076f8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,6 +20,10 @@ matrix: - go: 1.12.5 env: - OFFLINE=true + - language: node_js + node_js: 10.16.2 + env: + - UI_UT=true env: global: - POSTGRESQL_HOST: localhost @@ -64,3 +68,4 @@ script: - if [ "$APITEST_DB" == true ]; then bash ./tests/travis/api_run.sh DB $IP; fi - if [ "$APITEST_LDAP" == true ]; then bash ./tests/travis/api_run.sh LDAP $IP; fi - if [ "$OFFLINE" == true ]; then bash ./tests/travis/distro_installer.sh; fi +- if [ "$UI_UT" == true ]; then bash ./tests/travis/ui_ut_run.sh ; fi diff --git a/ADOPTERS.md b/ADOPTERS.md index 2df4e3edc..f86250459 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -10,7 +10,6 @@ be added to this list as they transition to production deployments. JD.com      trendmicro        -OnStar        DataYes        axatp       

360 Total Security      diff --git a/Makefile b/Makefile index 79e5584aa..c18b29cf7 100644 --- a/Makefile +++ b/Makefile @@ -81,6 +81,7 @@ CLAIRFLAG=false HTTPPROXY= BUILDBIN=false MIGRATORFLAG=false +NPM_REGISTRY=https://registry.npmjs.org # enable/disable chart repo supporting CHARTFLAG=false @@ -97,7 +98,7 @@ VERSIONFILENAME=UIVERSION PREPARE_VERSION_NAME=versions #versions -REGISTRYVERSION=v2.7.1-patch-2819 +REGISTRYVERSION=v2.7.1-patch-2819-2553 NGINXVERSION=$(VERSIONTAG) NOTARYVERSION=v0.6.1 CLAIRVERSION=v2.0.9 @@ -234,12 +235,14 @@ PACKAGE_OFFLINE_PARA=-zcvf harbor-offline-installer-$(PKGVERSIONTAG).tgz \ $(HARBORPKG)/$(DOCKERIMGFILE).$(VERSIONTAG).tar.gz \ $(HARBORPKG)/prepare \ $(HARBORPKG)/LICENSE $(HARBORPKG)/install.sh \ + $(HARBORPKG)/common.sh \ $(HARBORPKG)/harbor.yml PACKAGE_ONLINE_PARA=-zcvf harbor-online-installer-$(PKGVERSIONTAG).tgz \ $(HARBORPKG)/prepare \ $(HARBORPKG)/LICENSE \ $(HARBORPKG)/install.sh \ + $(HARBORPKG)/common.sh \ $(HARBORPKG)/harbor.yml DOCKERCOMPOSE_FILE_OPT=-f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME) @@ -304,7 +307,8 @@ build: -e REGISTRYVERSION=$(REGISTRYVERSION) -e NGINXVERSION=$(NGINXVERSION) -e NOTARYVERSION=$(NOTARYVERSION) -e NOTARYMIGRATEVERSION=$(NOTARYMIGRATEVERSION) \ -e CLAIRVERSION=$(CLAIRVERSION) -e CLAIRDBVERSION=$(CLAIRDBVERSION) -e VERSIONTAG=$(VERSIONTAG) \ -e BUILDBIN=$(BUILDBIN) -e REDISVERSION=$(REDISVERSION) -e MIGRATORVERSION=$(MIGRATORVERSION) \ - -e CHARTMUSEUMVERSION=$(CHARTMUSEUMVERSION) -e DOCKERIMAGENAME_CHART_SERVER=$(DOCKERIMAGENAME_CHART_SERVER) + -e CHARTMUSEUMVERSION=$(CHARTMUSEUMVERSION) -e DOCKERIMAGENAME_CHART_SERVER=$(DOCKERIMAGENAME_CHART_SERVER) \ + -e NPM_REGISTRY=$(NPM_REGISTRY) install: compile ui_version build prepare start @@ -431,7 +435,7 @@ swagger_client: mkdir harborclient java -jar swagger-codegen-cli.jar generate -i docs/swagger.yaml -l python -o harborclient cd harborclient; python ./setup.py install - pip install docker -q + pip install docker -q pip freeze cleanbinary: diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000..bdf41fd4f --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,91 @@ +# Security Release Process +Harbor is a large growing community devoted in creating a private enterprise-grade registry for all your cloud native assets. The community has adopted this security disclosure and response policy to ensure we responsibly handle critical issues. + +## Supported Versions +This section describes the maximum version skew supported between various Harbor releases. Harbor versions are expressed as **x.y.z**, where **x** is the major version, **y** is the minor version, and **z** is the patch version, following [Semantic Versioning terminology](https://semver.org/). + +### Support Policy +The Harbor project maintains release branches for the three most recent minor releases. Applicable fixes, including security fixes, may be backported to those three release branches, depending on severity and feasibility. Patch releases are cut from those branches at a regular cadence, or as needed. The Harbor project typically has a minor release approximately every 3 months, maintaining each minor release branch for approximately 9 months. + +There is no mandated timeline for major versions and there are currently no criteria for shipping a new major version (i.e. Harbor 2.0.0). + +### Minor Release Support Matrix +| Version | Supported | +| ------- | ------------------ | +| Harbor v1.7.x | :white_check_mark: | +| Harbor v1.8.x | :white_check_mark: | +| Harbor v1.9.x | :white_check_mark: | + +## Reporting a Vulnerability - Private Disclosure Process +Security is of the highest importance and all security vulnerabilities or suspected security vulnerabilities should be reported to Harbor privately, to minimize attacks against current users of Harbor before they are fixed. Vulnerabilities will be investigated and patched on the next patch (or minor) release as soon as possible. This information could be kept entirely internal to the project. + +If you know of a publicly disclosed security vulnerability for Harbor, please **IMMEDIATELY** contact cncf-harbor-security@lists.cncf.io to inform the Harbor Security Team. + +**IMPORTANT: Do not file public issues on GitHub for security vulnerabilities** + +To report a vulnerability or a security-related issue, please email the private address cncf-harbor-security@lists.cncf.io with the details of the vulnerability. The email will be fielded by the Harbor Security Team, which is made up of Harbor maintainers who have committer and release permissions. Emails will be addressed within 3 business days, including a detailed plan to investigate the issue and any potential workarounds to perform in the meantime. Do not report non-security-impacting bugs through this channel. Use [GitHub issues](https://github.com/goharbor/harbor/issues/new/choose) instead. + +### Proposed Email Content +Provide a descriptive subject line and in the body of the email include the following information: +* Basic identity information, such as your name and your affiliation or company. +* Detailed steps to reproduce the vulnerability (POC scripts, screenshots, and compressed packet captures are all helpful to us). +* Description of the effects of the vulnerability on Harbor and the related hardware and software configurations, so that the Harbor Security Team can reproduce it. +* How the vulnerability affects Harbor usage and an estimation of the attack surface, if there is one. +* List other projects or dependencies that were used in conjunction with Harbor to produce the vulnerability. + +## When to report a vulnerability +* When you think Harbor has a potential security vulnerability. +* When you suspect a potential vulnerability but you are unsure that it impacts Harbor. +* When you know of or suspect a potential vulnerability on another project that is used by Harbor. For example Harbor has a dependency on Docker, PGSql, Redis, Notary, Clair, etc. + +## Patch, Release, and Disclosure +The Harbor Security Team will respond to vulnerability reports as follows: + +1. The Security Team will investigate the vulnerability and determine its effects and criticality. +2. If the issue is not deemed to be a vulnerability, the Security Team will follow up with a detailed reason for rejection. +3. The Security Team will initiate a conversation with the reporter within 3 business days. +4. If a vulnerability is acknowledged and the timeline for a fix is determined, the Security Team will work on a plan to communicate with the appropriate community, including identifying mitigating steps that affected users can take to protect themselves until the fix is rolled out. +5. The Security Team will also create a [CVSS](https://www.first.org/cvss/specification-document) using the [CVSS Calculator](https://www.first.org/cvss/calculator/3.0). The Security Team makes the final call on the calculated CVSS; it is better to move quickly than making the CVSS perfect. Issues may also be reported to [Mitre](https://cve.mitre.org/) using this [scoring calculator](https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator). The CVE will initially be set to private. +6. The Security Team will work on fixing the vulnerability and perform internal testing before preparing to roll out the fix. +7. The Security Team will provide early disclosure of the vulnerability by emailing the cncf-harbor-distributors-announce@lists.cncf.io mailing list. Distributors can initially plan for the vulnerability patch ahead of the fix, and later can test the fix and provide feedback to the Harbor team. See the section **Early Disclosure to Harbor Distributors List** for details about how to join this mailing list. +8. A public disclosure date is negotiated by the Harbor Security Team, the bug submitter, and the distributors list. We prefer to fully disclose the bug as soon as possible once a user mitigation or patch is available. It is reasonable to delay disclosure when the bug or the fix is not yet fully understood, the solution is not well-tested, or for distributor coordination. The timeframe for disclosure is from immediate (especially if it’s already publicly known) to a few weeks. For a critical vulnerability with a straightforward mitigation, we expect report date to public disclosure date to be on the order of 14 business days. The Harbor Security Team holds the final say when setting a public disclosure date. +9. Once the fix is confirmed, the Security Team will patch the vulnerability in the next patch or minor release, and backport a patch release into all earlier supported releases. Upon release of the patched version of Harbor, we will follow the **Public Disclosure Process**. + +### Public Disclosure Process +The Security Team publishes a public [advisory](https://github.com/goharbor/harbor/security/advisories) to the Harbor community via GitHub. In most cases, additional communication via Slack, Twitter, CNCF lists, blog and other channels will assist in educating Harbor users and rolling out the patched release to affected users. + +The Security Team will also publish any mitigating steps users can take until the fix can be applied to their Harbor instances. Harbor distributors will handle creating and publishing their own security advisories. + +## Mailing lists +- Use cncf-harbor-security@lists.cncf.io to report security concerns to the Harbor Security Team, who uses the list to privately discuss security issues and fixes prior to disclosure. +- Join cncf-harbor-distributors-announce@lists.cncf.io for early private information and vulnerability disclosure. Early disclosure may include mitigating steps and additional information on security patch releases. See below for information on how Harbor distributors or vendors can apply to join this list. + +## Early Disclosure to Harbor Distributors List +This private list is intended to be used primarily to provide actionable information to multiple distributor projects at once. This list is not intended to inform individuals about security issues. + +### Membership Criteria +To be eligible to join the cncf-harbor-distributors-announce@lists.cncf.io mailing list, you should: +1. Be an active distributor of Harbor. +2. Have a user base that is not limited to your own organization. +3. Have a publicly verifiable track record up to the present day of fixing security issues. +4. Not be a downstream or rebuild of another distributor. +5. Be a participant and active contributor in the Harbor community. +6. Accept the Embargo Policy that is outlined below. +7. Have someone who is already on the list vouch for the person requesting membership on behalf of your distribution. + +**The terms and conditions of the Embargo Policy apply to all members of this mailing list. A request for membership represents your acceptance to the terms and conditions of the Embargo Policy** + +### Embargo Policy +The information that members receive on cncf-harbor-distributors-announce@lists.cncf.io must not be made public, shared, or even hinted at anywhere beyond those who need to know within your specific team, unless you receive explicit approval to do so from the Harbor Security Team. This remains true until the public disclosure date/time agreed upon by the list. Members of the list and others cannot use the information for any reason other than to get the issue fixed for your respective distribution's users. +Before you share any information from the list with members of your team who are required to fix the issue, these team members must agree to the same terms, and only be provided with information on a need-to-know basis. + +In the unfortunate event that you share information beyond what is permitted by this policy, you must urgently inform the cncf-harbor-security@lists.cncf.io mailing list of exactly what information was leaked and to whom. If you continue to leak information and break the policy outlined here, you will be permanently removed from the list. + +### Requesting to Join +Send new membership requests to cncf-harbor-security@lists.cncf.io. +In the body of your request please specify how you qualify for membership and fulfill each criterion listed in the Membership Criteria section above. + +## Confidentiality, integrity and availability +We consider vulnerabilities leading to the compromise of data confidentiality, elevation of privilege, or integrity to be our highest priority concerns. Availability, in particular in areas relating to DoS and resource exhaustion, is also a serious security concern. The Harbor Security Team takes all vulnerabilities, potential vulnerabilities, and suspected vulnerabilities seriously and will investigate them in an urgent and expeditious manner. + +Note that we do not currently consider the default settings for Harbor to be secure-by-default. It is necessary for operators to explicitly configure settings, role based access control, and other resource related features in Harbor to provide a hardened Harbor environment. We will not act on any security disclosure that relates to a lack of safe defaults. Over time, we will work towards improved safe-by-default configuration, taking into account backwards compatibility. diff --git a/docs/customize_look&feel_guide.md b/docs/customize_look&feel_guide.md index 4c2db5dcd..bc967b6d0 100644 --- a/docs/customize_look&feel_guide.md +++ b/docs/customize_look&feel_guide.md @@ -10,11 +10,10 @@ Open the `setting.json` file, you'll see the default content as shown below: "headerBgColor": "#004a70", "headerLogo": "", "loginBgImg": "", + "appTitle": "", "product": { - "title": "Harbor", - "company": "goharbor", "name": "Harbor", - "introductions": { + "introduction": { "zh-cn": "", "es-es": "", "en-us": "" diff --git a/docs/img/cve-whitelist1.png b/docs/img/cve-whitelist1.png new file mode 100644 index 000000000..f345c4e88 Binary files /dev/null and b/docs/img/cve-whitelist1.png differ diff --git a/docs/img/cve-whitelist2.png b/docs/img/cve-whitelist2.png new file mode 100644 index 000000000..905768294 Binary files /dev/null and b/docs/img/cve-whitelist2.png differ diff --git a/docs/img/cve-whitelist3.png b/docs/img/cve-whitelist3.png new file mode 100644 index 000000000..524db9387 Binary files /dev/null and b/docs/img/cve-whitelist3.png differ diff --git a/docs/img/cve-whitelist4.png b/docs/img/cve-whitelist4.png new file mode 100644 index 000000000..972e84c98 Binary files /dev/null and b/docs/img/cve-whitelist4.png differ diff --git a/docs/img/cve-whitelist5.png b/docs/img/cve-whitelist5.png new file mode 100644 index 000000000..d9bc6c929 Binary files /dev/null and b/docs/img/cve-whitelist5.png differ diff --git a/docs/img/cve-whitelist6.png b/docs/img/cve-whitelist6.png new file mode 100644 index 000000000..1bbc149a8 Binary files /dev/null and b/docs/img/cve-whitelist6.png differ diff --git a/docs/img/onstar.png b/docs/img/onstar.png deleted file mode 100644 index d8cee6925..000000000 Binary files a/docs/img/onstar.png and /dev/null differ diff --git a/docs/img/project-quota1.png b/docs/img/project-quota1.png new file mode 100644 index 000000000..d0a9903dc Binary files /dev/null and b/docs/img/project-quota1.png differ diff --git a/docs/img/project-quota2.png b/docs/img/project-quota2.png new file mode 100644 index 000000000..af89b7f4f Binary files /dev/null and b/docs/img/project-quota2.png differ diff --git a/docs/img/project-quota3.png b/docs/img/project-quota3.png new file mode 100644 index 000000000..d9a6cafd3 Binary files /dev/null and b/docs/img/project-quota3.png differ diff --git a/docs/img/project-quota4.png b/docs/img/project-quota4.png new file mode 100644 index 000000000..44abc5c7d Binary files /dev/null and b/docs/img/project-quota4.png differ diff --git a/docs/img/project-quota5.png b/docs/img/project-quota5.png new file mode 100644 index 000000000..598cc42c2 Binary files /dev/null and b/docs/img/project-quota5.png differ diff --git a/docs/img/replication-endpoint1.png b/docs/img/replication-endpoint1.png new file mode 100644 index 000000000..5219658a3 Binary files /dev/null and b/docs/img/replication-endpoint1.png differ diff --git a/docs/img/replication-endpoint2.png b/docs/img/replication-endpoint2.png new file mode 100644 index 000000000..55458d59a Binary files /dev/null and b/docs/img/replication-endpoint2.png differ diff --git a/docs/img/robotaccount/add_robot_account_2.png b/docs/img/robotaccount/add_robot_account_2.png index 40945a989..951046edf 100644 Binary files a/docs/img/robotaccount/add_robot_account_2.png and b/docs/img/robotaccount/add_robot_account_2.png differ diff --git a/docs/img/tag-retention1.png b/docs/img/tag-retention1.png new file mode 100644 index 000000000..9990d7a30 Binary files /dev/null and b/docs/img/tag-retention1.png differ diff --git a/docs/img/tag-retention2.png b/docs/img/tag-retention2.png new file mode 100644 index 000000000..99368b576 Binary files /dev/null and b/docs/img/tag-retention2.png differ diff --git a/docs/img/tag-retention3.png b/docs/img/tag-retention3.png new file mode 100644 index 000000000..7cd798d79 Binary files /dev/null and b/docs/img/tag-retention3.png differ diff --git a/docs/img/tag-retention4.png b/docs/img/tag-retention4.png new file mode 100644 index 000000000..6f02f89ba Binary files /dev/null and b/docs/img/tag-retention4.png differ diff --git a/docs/img/tag-retention5.png b/docs/img/tag-retention5.png new file mode 100644 index 000000000..0b3f6f491 Binary files /dev/null and b/docs/img/tag-retention5.png differ diff --git a/docs/img/webhooks1.png b/docs/img/webhooks1.png new file mode 100644 index 000000000..28bd516f7 Binary files /dev/null and b/docs/img/webhooks1.png differ diff --git a/docs/img/webhooks2.png b/docs/img/webhooks2.png new file mode 100644 index 000000000..7c2498995 Binary files /dev/null and b/docs/img/webhooks2.png differ diff --git a/docs/img/webhooks3.png b/docs/img/webhooks3.png new file mode 100644 index 000000000..76d0472cb Binary files /dev/null and b/docs/img/webhooks3.png differ diff --git a/docs/img/webhooks4.png b/docs/img/webhooks4.png new file mode 100644 index 000000000..790dab3ec Binary files /dev/null and b/docs/img/webhooks4.png differ diff --git a/docs/installation_guide.md b/docs/installation_guide.md index 6bc190a71..d424464b5 100644 --- a/docs/installation_guide.md +++ b/docs/installation_guide.md @@ -100,19 +100,24 @@ The parameters are described below - note that at the very least, you will need - **harbor_admin_password**: The administrator's initial password. This password only takes effect for the first time Harbor launches. After that, this setting is ignored and the administrator's password should be set in the Portal. _Note that the default username/password are **admin/Harbor12345** ._ - - - **database**: the configs related to local database - - **password**: The root password for the PostgreSQL database used for **db_auth**. _Change this password for any production use!_ + - **password**: The root password for the PostgreSQL database. Change this password for any production use. + - **max_idle_conns**: The maximum number of connections in the idle connection pool. If <=0 no idle connections are retained. The default value is 50 and if it is not configured the value is 2. + - **max_open_conns**: The maximum number of open connections to the database. If <= 0 there is no limit on the number of open connections. The default value is 100 for the max connections to the Harbor database. If it is not configured the value is 0. - **jobservice**: jobservice related service - **max_job_workers**: The maximum number of replication workers in job service. For each image replication job, a worker synchronizes all tags of a repository to the remote destination. Increasing this number allows more concurrent replication jobs in the system. However, since each worker consumes a certain amount of network/CPU/IO resources, please carefully pick the value of this attribute based on the hardware resource of the host. - **log**: log related url - **level**: log level, options are debug, info, warning, error, fatal - - **rotate_count**: Log files are rotated **rotate_count** times before being removed. If count is 0, old versions are removed rather than rotated. - - **rotate_size**: Log files are rotated only if they grow bigger than **rotate_size** bytes. If size is followed by k, the size is assumed to be in kilobytes. If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G are all valid. - - **location**: the directory to store log - + - **local**: The default is to retain logs locally. + - **rotate_count**: Log files are rotated **rotate_count** times before being removed. If count is 0, old versions are removed rather than rotated. + - **rotate_size**: Log files are rotated only if they grow bigger than **rotate_size** bytes. If size is followed by k, the size is assumed to be in kilobytes. If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G are all valid. + - **location**: the directory to store logs + - **external_endpoint**: Enable this option to forward logs to a syslog server. + - **protocol**: Transport protocol for the syslog server. Default is TCP. + - **host**: The URL of the syslog server. + - **port**: The port on which the syslog server listens. + ##### optional parameters - **http**: @@ -143,6 +148,8 @@ refer to **[Configuring Harbor with HTTPS Access](configure_https.md)**. - **username**: username to connect harbor core database - **password**: password to harbor core database - **ssl_mode**: is enable ssl mode + - **max_idle_conns**: The maximum number of connections in the idle connection pool. If <=0 no idle connections are retained. The default value is 2. + - **max_open_conns**: The maximum number of open connections to the database. If <= 0 there is no limit on the number of open connections. The default value is 0. - **clair**: clair's database configs - **host**: hostname for clair database - **port**: port of clair database diff --git a/docs/migration_guide.md b/docs/migration_guide.md index 1740bdb1a..747db652d 100644 --- a/docs/migration_guide.md +++ b/docs/migration_guide.md @@ -1,91 +1,99 @@ -# Harbor upgrade and migration guide +# Harbor Upgrade and Migration Guide -This guide only covers upgrade and migration to version >= v1.8.0 +This guide covers upgrade and migration to version 1.9.0. This guide only covers migration from v1.7.x and later to the current version. If you are upgrading from an earlier version, refer to the migration guide in the `release-1.7.0` branch to upgrade to v1.7.x first, then follow this guide to perform the migration to this version. -When upgrading your existing Harbor instance to a newer version, you may need to migrate the data in your database and the settings in `harbor.cfg`. -Since the migration may alter the database schema and the settings of `harbor.cfg`, you should **always** back up your data before any migration. +When upgrading an existing Harbor 1.7.x instance to a newer version, you might need to migrate the data in your database and the settings in `harbor.cfg`. +Since the migration might alter the database schema and the settings of `harbor.cfg`, you should **always** back up your data before any migration. -**NOTE:** +**NOTES:** - Again, you must back up your data before any data migration. +- Since v1.8.0, the configuration of Harbor has changed to a `.yml` file. If you are upgrading from 1.7.x, the migrator will transform the configuration file from `harbor.cfg` to `harbor.yml`. The command will be a little different to perform this migration, so make sure you follow the steps below. +- In version 1.9.0, some containers are started by `non-root`. This does not pose problems if you are upgrading an officially released version of Harbor, but if you have deployed a customized instance of Harbor, you might encounter permission issues. +- In previous releases, user roles took precedence over group roles in a project. In this version, user roles and group roles are combined so that the user has whichever set of permissions is highest. This might cause the roles of certain users to change during upgrade. +- With the introduction of storage and artifact quotas in version 1.9.0, migration from 1.7.x and 1.8.x might take a few minutes. This is because the `core` walks through all blobs in the registry and populates the database with information about the layers and artifacts in projects. +- With the introduction of storage and artifact quotas in version 1.9.0, replication between version 1.9.0 and a previous version of Harbor does not work. You must upgrade all Harbor nodes to 1.9.0 if you have configured replication between them. -- This guide only covers the migration from v1.6.0 to current version, if you are upgrading from earlier versions please -refer to the migration guide in release branch to upgrade to v1.6.0 and follow this guide to do the migration to later version. - -- From v1.6.0 on, Harbor will automatically try to do the migrate the DB schema when it starts, so if you are upgrading from v1.6.0 -or above it's not necessary to call the migrator tool to migrate the schema. - -- For the change in Database schema please refer to [change log](../tools/migration/db/changelog.md). - -- Since v1.8.0, the configuration of Harbor has changed to `.yml` file, the migrator will transform the configuration -file from `harbor.cfg` to `harbor.yml`. The command will be a little different to perform this migration, please make sure -you follow the steps below. - - -### Upgrading Harbor and migrating data +## Upgrading Harbor and Migrating Data 1. Log in to the host that Harbor runs on, stop and remove existing Harbor instance if it is still running: - ``` + + ```sh cd harbor docker-compose down ``` -2. Back up Harbor's current files so that you can roll back to the current version when it is necessary. - ``` +2. Back up Harbor's current files so that you can roll back to the current version if necessary. + + ```sh mv harbor /my_backup_dir/harbor ``` + Back up database (by default in directory `/data/database`) - ``` + + ```sh cp -r /data/database /my_backup_dir/ ``` 3. Get the latest Harbor release package from Github: - https://github.com/goharbor/harbor/releases + [https://github.com/goharbor/harbor/releases](https://github.com/goharbor/harbor/releases) -4. Before upgrading Harbor, perform migration first. The migration tool is delivered as a docker image, so you should pull the image from docker hub. Replace [tag] with the release version of Harbor (e.g. v1.5.0) in the below command: - ``` +4. Before upgrading Harbor, perform a migration first. The migration tool is delivered as a docker image, so you should pull the image from docker hub. Replace [tag] with the release version of Harbor (for example, v1.9.0) in the command below: + + ```sh docker pull goharbor/harbor-migrator:[tag] ``` -5. Upgrade from `harbor.cfg` to `harbor.yml` - **NOTE:** You can find the ${harbor_yml} in the extracted installer you got in step `3`, after the migration the file `harbor.yml` +5. If you are current version is v1.7.x or earlier, i.e. migrate config file from `harbor.cfg` to `harbor.yml`. + + **NOTE:** You can find the ${harbor_yml} in the extracted installer you got in step `3`, after the migration the file `harbor.yml` in that path will be updated with the values from ${harbor_cfg} - - ``` + + ```sh docker run -it --rm -v ${harbor_cfg}:/harbor-migration/harbor-cfg/harbor.yml -v ${harbor_yml}:/harbor-migration/harbor-cfg-out/harbor.yml goharbor/harbor-migrator:[tag] --cfg up ``` - **NOTE:** The schema upgrade and data migration of Database is performed by core when Harbor starts, if the migration fails, - please check the log of core to debug. -6. Under the directory `./harbor`, run the `./install.sh` script to install the new Harbor instance. If you choose to install Harbor with components like Notary, Clair, and chartmuseum, refer to [Installation & Configuration Guide](../docs/installation_guide.md) for more information. + Otherwise, If your version is 1.8.x or higher, just upgrade the `harbor.yml` file. + ```sh + docker run -it --rm -v ${harbor_yml}:/harbor-migration/harbor-cfg/harbor.yml goharbor/harbor-migrator:[tag] --cfg up + ``` -### Roll back from an upgrade -For any reason, if you want to roll back to the previous version of Harbor, follow the below steps: + **NOTE:** The schema upgrade and data migration of the database is performed by core when Harbor starts, if the migration fails, please check the log of core to debug. -**NOTE:** Roll back doesn't support upgrade across v1.5.0, like from v1.2.0 to v1.7.0. This is because Harbor changes DB to PostgreSQL from v1.7.0, the migrator cannot roll back data to MariaDB. +6. Under the directory `./harbor`, run the `./install.sh` script to install the new Harbor instance. If you choose to install Harbor with components such as Notary, Clair, and chartmuseum, refer to [Installation & Configuration Guide](../docs/installation_guide.md) for more information. + +## Roll Back from an Upgrade + +If, for any reason, you want to roll back to the previous version of Harbor, perform the following steps: 1. Stop and remove the current Harbor service if it is still running. - ``` + + ```sh cd harbor docker-compose down ``` - + 2. Remove current Harbor instance. - ``` + + ```sh rm -rf harbor ``` - + 3. Restore the older version package of Harbor. + ```sh mv /my_backup_dir/harbor harbor ``` - -4. Restore database, copy the data files from backup directory to you data volume, by default `/data/database`. + +4. Restore database, copy the data files from backup directory to you data volume, by default `/data/database`. 5. Restart Harbor service using the previous configuration. If previous version of Harbor was installed by a release build: + ```sh cd harbor ./install.sh ``` + +**NOTE**: While you can roll back an upgrade to the state before you started the upgrade, Harbor does not support downgrades. diff --git a/docs/permissions.md b/docs/permissions.md index f732f0de6..2543ef3c3 100644 --- a/docs/permissions.md +++ b/docs/permissions.md @@ -43,3 +43,11 @@ The following table depicts the various user permission levels in a project. | Add/Remove labels of helm chart version | | ✓ | ✓ | ✓ | | See a list of project robots | | | ✓ | ✓ | | Create/edit/delete project robots | | | | ✓ | +| See configured CVE whitelist | ✓ | ✓ | ✓ | ✓ | +| Create/edit/remove CVE whitelist | | | | ✓ | +| Enable/disable webhooks | | ✓ | ✓ | ✓ | +| Create/delete tag retention rules | | ✓ | ✓ | ✓ | +| Enable/disable tag retention rules | | ✓ | ✓ | ✓ | +| See project quotas | ✓ | ✓ | ✓ | ✓ | +| Edit project quotas | | | | | + diff --git a/docs/swagger.yaml b/docs/swagger.yaml index 687f92d15..e2a83c8ab 100644 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -774,20 +774,15 @@ paths: description: Internal errors. /users/search: get: - summary: Search users by username, email + summary: Search users by username description: | - This endpoint is to search the users by username, email. + This endpoint is to search the users by username. parameters: - name: username in: query type: string - required: false + required: true description: Username for filtering results. - - name: email - in: query - type: string - required: false - description: Email for filtering results. - name: page in: query type: integer @@ -964,13 +959,13 @@ paths: description: User ID does not exist. '500': description: Unexpected internal errors. - '/users/{user_id}/gen_cli_secret': - post: - summary: Generate new CLI secret for a user. + '/users/{user_id}/cli_secret': + put: + summary: Set CLI secret for a user. description: | This endpoint let user generate a new CLI secret for himself. This API only works when auth mode is set to 'OIDC'. Once this API returns with successful status, the old secret will be invalid, as there will be only one CLI secret - for a user. The new secret will be returned in the response. + for a user. parameters: - name: user_id in: path @@ -978,19 +973,23 @@ paths: format: int required: true description: User ID - tags: - - Products - responses: - '200': - description: The secret is successfully generated. + - name: input_secret + in: body + description: JSON object that includes the new secret + required: true schema: type: object properties: secret: type: string description: The new secret + tags: + - Products + responses: + '200': + description: The secret is successfully updated '400': - description: Invalid user ID. Or user is not onboarded via OIDC authentication. + description: Invalid user ID. Or user is not onboarded via OIDC authentication. Or the secret does not meet the standard. '401': description: User need to log in first. '403': @@ -2415,7 +2414,7 @@ paths: description: | This endpoint is for syncing quota usage of registry/chart with database. tags: - - Products + - Products responses: '200': description: Sync repositories successfully. @@ -2423,6 +2422,28 @@ paths: description: User need to log in first. '403': description: User does not have permission of system admin role. + /internal/switchquota: + put: + summary: Enable or disable quota. + description: | + This endpoint is for enable/disable quota. When quota is disabled, no resource require/release in image/chart push and delete. + tags: + - Products + parameters: + - name: switcher + in: body + required: true + schema: + $ref: '#/definitions/QuotaSwitcher' + responses: + '200': + description: Enable/Disable quota successfully. + '401': + description: User need to log in first. + '403': + description: User does not have permission of system admin role. + '500': + description: Unexpected internal errors. /systeminfo: get: summary: Get general system info @@ -3600,7 +3621,6 @@ paths: description: List quotas tags: - Products - - Quota parameters: - name: reference in: query @@ -3968,7 +3988,124 @@ paths: description: User have no permission to list webhook jobs of the project. '500': description: Unexpected internal errors. - + '/projects/{project_id}/immutabletagrules': + get: + summary: List all immutable tag rules of current project + description: | + This endpoint returns the immutable tag rules of a project + parameters: + - name: project_id + in: path + type: integer + format: int64 + required: true + description: Relevant project ID. + tags: + - Products + responses: + '200': + description: List project immutable tag rules successfully. + schema: + type: array + items: + $ref: '#/definitions/ImmutableTagRule' + '400': + description: Illegal format of provided ID value. + '401': + description: User need to log in first. + '403': + description: User have no permission to list immutable tag rules of the project. + '500': + description: Unexpected internal errors. + post: + summary: Add an immutable tag rule to current project + description: | + This endpoint add an immutable tag rule to the project + parameters: + - name: project_id + in: path + type: integer + format: int64 + required: true + description: Relevant project ID. + - name: immutabletagrule + in: body + schema: + $ref: '#/definitions/ImmutableTagRule' + tags: + - Products + responses: + '200': + description: Add the immutable tag rule successfully. + '400': + description: Illegal format of provided ID value. + '401': + description: User need to log in first. + '403': + description: User have no permission to get immutable tag rule of the project. + '500': + description: Internal server errors. + '/projects/{project_id}/immutabletagrules/{id}': + put: + summary: Update the immutable tag rule or enable or disable the rule + parameters: + - name: project_id + in: path + type: integer + format: int64 + required: true + description: Relevant project ID. + - name: id + in: path + type: integer + format: int64 + required: true + description: Immutable tag rule ID. + - name: immutabletagrule + in: body + schema: + $ref: '#/definitions/ImmutableTagRule' + tags: + - Products + responses: + '200': + description: Update the immutable tag rule successfully. + '400': + description: Illegal format of provided ID value. + '401': + description: User need to log in first. + '403': + description: User have no permission to update the immutable tag rule of the project. + '500': + description: Internal server errors. + delete: + summary: Delete the immutable tag rule. + parameters: + - name: project_id + in: path + type: integer + format: int64 + required: true + description: Relevant project ID. + - name: id + in: path + type: integer + format: int64 + required: true + description: Immutable tag rule ID. + tags: + - Products + responses: + '200': + description: Delete the immutable tag rule successfully. + '400': + description: Illegal format of provided ID value. + '401': + description: User need to log in first. + '403': + description: User have no permission to delete immutable tags of the project. + '500': + description: Internal server errors. '/retentions/metadatas': get: summary: Get Retention Metadatas @@ -6248,4 +6385,22 @@ definitions: type: integer retained: type: integer - + QuotaSwitcher: + type: object + properties: + enabled: + type: boolean + description: The quota is enable or disable + ImmutableTagRule: + type: object + properties: + id: + type: integer + format: int64 + project_id: + type: integer + format: int64 + tag_filter: + type: string + enabled: + type: boolean diff --git a/docs/user_guide.md b/docs/user_guide.md index c584a07d5..42d08e7db 100644 --- a/docs/user_guide.md +++ b/docs/user_guide.md @@ -1,736 +1,1113 @@ -# User Guide -## Overview -This guide walks you through the fundamentals of using Harbor. You'll learn how to use Harbor to: - -* [Manage your projects.](#managing-projects) -* [Manage members of a project.](#managing-members-of-a-project) -* [Replicate resources between Harbor and non-Harbor registries.](#replicating-resources) -* [Retag images within Harbor](#retag-images) -* [Search projects and repositories.](#searching-projects-and-repositories) -* [Manage labels.](#managing-labels) -* [Manage Harbor system if you are the system administrator:](#administrator-options) - * [Manage users.](#managing-user) - * [Manage registries.](#managing-registry) - * [Manage replication rules.](#managing-replication) - * [Manage authentication.](#managing-authentication) - * [Manage project creation.](#managing-project-creation) - * [Manage self-registration.](#managing-self-registration) - * [Manage email settings.](#managing-email-settings) - * [Manage registry read only.](#managing-registry-read-only) - * [Manage role by LDAP group.](#managing-role-by-ldap-group) -* [Pull and push images using Docker client.](#pulling-and-pushing-images-using-docker-client) -* [Add description to repositories](#add-description-to-repositories) -* [Delete repositories and images.](#deleting-repositories) -* [Content trust. ](#content-trust) -* [Vulnerability scanning via Clair.](#vulnerability-scanning-via-clair) -* [Pull image from Harbor in Kubernetes.](#pull-image-from-harbor-in-kubernetes) -* [Manage Helm Charts](#manage-helm-charts) - * [Manage Helm Charts via portal](#manage-helm-charts-via-portal) - * [Working with Helm CLI](#working-with-helm-cli) -* [Online Garbage Collection.](#online-garbage-collection) -* [View build history.](#build-history) -* [Using CLI after login via OIDC based SSO](#using-oidc-cli-secret) -* [Manage robot account of a project.](#robot-account) -* [Using API Explorer](#api-explorer) - -## Role Based Access Control(RBAC) - -![rbac](img/rbac.png) - -Harbor manages images through projects. Users can be added into one project as a member with one of three different roles: - -* **Guest**: Guest has read-only privilege for a specified project. -* **Developer**: Developer has read and write privileges for a project. -* **Master**: Master has elevated permissions beyond those of 'Developer' including the ability to scan images, view replications jobs, and delete images and helm charts. -* **ProjectAdmin**: When creating a new project, you will be assigned the "ProjectAdmin" role to the project. Besides read-write privileges, the "ProjectAdmin" also has some management privileges, such as adding and removing members, starting a vulnerability scan. - -Besides the above three roles, there are two system-level roles: - -* **SysAdmin**: "SysAdmin" has the most privileges. In addition to the privileges mentioned above, "SysAdmin" can also list all projects, set an ordinary user as administrator, delete users and set vulnerability scan policy for all images. The public project "library" is also owned by the administrator. -* **Anonymous**: When a user is not logged in, the user is considered as an "Anonymous" user. An anonymous user has no access to private projects and has read-only access to public projects. - -See detailed permissions matrix listed here: https://github.com/goharbor/harbor/blob/master/docs/permissions.md - -## User account -Harbor supports different authentication modes: - -* **Database(db_auth)** - - Users are stored in the local database. - - A user can register himself/herself in Harbor in this mode. To disable user self-registration, refer to the [installation guide](installation_guide.md) for initial configuration, or disable this feature in [Administrator Options](#administrator-options). When self-registration is disabled, the system administrator can add users into Harbor. - - When registering or adding a new user, the username and email must be unique in the Harbor system. The password must contain at least 8 characters with 1 lowercase letter, 1 uppercase letter and 1 numeric character. - - When you forgot your password, you can follow the below steps to reset the password: - - 1. Click the link "Forgot Password" in the sign in page. - 2. Input the email address entered when you signed up, an email will be sent out to you for password reset. - 3. After receiving the email, click on the link in the email which directs you to a password reset web page. - 4. Input your new password and click "Save". - -* **LDAP/Active Directory (ldap_auth)** - - Under this authentication mode, users whose credentials are stored in an external LDAP or AD server can log in to Harbor directly. - - When an LDAP/AD user logs in by *username* and *password*, Harbor binds to the LDAP/AD server with the **"LDAP Search DN"** and **"LDAP Search Password"** described in [installation guide](installation_guide.md). If it succeeded, Harbor looks up the user under the LDAP entry **"LDAP Base DN"** including substree. The attribute (such as uid, cn) specified by **"LDAP UID"** is used to match a user with the *username*. If a match is found, the user's *password* is verified by a bind request to the LDAP/AD server. Uncheck **"LDAP Verify Cert"** if the LDAP/AD server uses a self-signed or an untrusted certificate. - - Self-registration, deleting user, changing password and resetting password are not supported under LDAP/AD authentication mode because the users are managed by LDAP or AD. - -* **OIDC Provider (oidc_auth)** - - With this authentication mode, regular user will login to Harbor Portal via SSO flow. - After the system administrator configure Harbor to authenticate via OIDC (more details refer to [this section](#managing-authentication)), - a button `LOGIN VIA OIDC PROVIDER` will appear on the login page. - ![oidc_login](img/oidc_login.png) - - By clicking this button user will kick off the SSO flow and be redirected to the OIDC Provider for authentication. After a successful - authentication at the remote site, user will be redirected to Harbor. There will be an "onboard" step if it's the first time the user - authenticate using his account, in which there will be a dialog popped up for him to set his user name in Harbor: - ![oidc_onboar](img/oidc_onboard_dlg.png) - - This user name will be the identifier for this user in Harbor, which will be used in the cases such as adding member to a project, assigning roles, etc. - This has to be a unique user name, if another user has used this user name to onboard, user will be prompted to choose another one. - - Regarding this user to use docker CLI, please refer to [Using CLI after login via OIDC based SSO](#using-oidc-cli-secret) - - **NOTE:** - 1. After the onboard process, you still have to login to Harbor via SSO flow, the `Username` and `Password` fields are only for - local admin to login when Harbor is configured authentication via OIDC. - 2. Similar to LDAP authentication mode, self-registration, updating profile, deleting user, changing password and - resetting password are not supported. - - -## Managing projects -A project in Harbor contains all repositories of an application. No images can be pushed to Harbor before the project is created. RBAC is applied to a project. There are two types of projects in Harbor: - -* **Public**: All users have the read privilege to a public project, it's convenient for you to share some repositories with others in this way. -* **Private**: A private project can only be accessed by users with proper privileges. - -You can create a project after you signed in. Check on the "Access Level" checkbox will make this project public. - -![create project](img/new_create_project.png) - -After the project is created, you can browse repositories, members, logs, replication and configuration using the navigation tab. - -![browse project](img/new_browse_project.png) - -There are two views to show repositories, list view and card view, you can switch between them by clicking the corresponding icon. - -![browse repositories](img/browse_project_repositories.png) - -All logs can be listed by clicking "Logs". You can apply a filter by username, or operations and dates under "Advanced Search". - -![browse project](img/log_search_advanced.png) - -![browse project](img/new_project_log.png) - -Project properties can be changed by clicking "Configuration". - -* To make all repositories under the project accessible to everyone, select the `Public` checkbox. - -* To prevent un-signed images under the project from being pulled, select the `Enable content trust` checkbox. - -* To prevent vulnerable images under the project from being pulled, select the `Prevent vulnerable images from running` checkbox and change the severity level of vulnerabilities. Images cannot be pulled if their level equals to or higher than the currently selected level. - -* To activate an immediate vulnerability scan on new images that are pushed to the project, select the `Automatically scan images on push` checkbox. - -![browse project](img/project_configuration.png) - -## Managing members of a project -### Adding members -You can add members with different roles to an existing project. You can add a LDAP/AD user to project members under LDAP/AD authentication mode. - -![browse project](img/new_add_member.png) - -### Updating and removing members -You can check one or more members, then click `ACTION`, choose one role to batch switch checked members' roles or remove them from the project. - -![browse project](img/new_remove_update_member.png) - -## Replicating resources -Replication allows users to replicate resources(images/charts) between Harbor and non-Harbor registries in both pull or push mode. Currently, the non-Harbor registries includes Docker Hub, Docker registry, Huawei SWR, and more registries will be supported in future. - -Once the system administrator has set a rule, all resources that match the defined [filter](#resource-filter) patterns will be replicated to the destination registry when the [triggering condition](#trigger-mode) is matched. Each resource will start a task to run. If the namespace does not exist on the destination registry, a new namespace will be created automatically. If it already exists and the user configured in policy has no write privilege to it, the process will fail. The member information will not be replicated. - -There may be a bit of delay during replication based on the situation of the network. If replication task fails, it will be re-scheduled a few minutes later and try 3 times. - -**Note:** Due to API changes, replication between different versions of Harbor may be broken. - -### Creating a replication rule -Login as a system administrator user, click `NEW REPLICATION RULE` under `Administration->Replications` and fill in the necessary fields. You can choose different replication modes, [resource filters](#resource-filter) and [trigger modes](#trigger-mode) according to the different requirements. If there is no endpoint available in the list, follow the instructions in the [Installation Guide](installation_guide.md) to create one. Click `SAVE` to create a replication rule. - -![browse project](img/create_rule.png) - -#### Resource filter -Three resource filters are supported: -* **Name**: Filter resources according to the name. -* **Tag**: Filter resources according to the tag. -* **Resource**: Filter images according to the resource type. - -The terms supported in the pattern used by name filter and tag filter are as follows: -* **\***: Matches any sequence of non-separator characters `/`. -* **\*\***: Matches any sequence of characters, including path separators `/`. -* **?**: Matches any single non-separator character `/`. -* **{alt1,...}**: Matches a sequence of characters if one of the comma-separated alternatives matches. - -**Note:** `library` must be added if you want to replicate the official images of Docker Hub. For example, `library/hello-world` matches the official hello-world images. - -Pattern | String(Match or not) ----------- | ------- -`library/*` | `library/hello-world`(Y)
`library/my/hello-world`(N) -`library/**` | `library/hello-world`(Y)
`library/my/hello-world`(Y) -`{library,goharbor}/**` | `library/hello-world`(Y)
`goharbor/harbor-core`(Y)
`google/hello-world`(N) -`1.?` | `1.0`(Y)
`1.01`(N) - -#### Trigger mode -* **Manual**: Replicate the resources manually when needed. **Note**: The deletion operations are not replicated. -* **Scheduled**: Replicate the resources periodically. **Note**: The deletion operations are not replicated. -* **Event Based**: When a new resource is pushed to the project, it is replicated to the remote registry immediately. Same to the deletion operation if the `Delete remote resources when locally deleted` checkbox is selected. - -### Starting a replication manually -Select a replication rule and click `REPLICATE`, the resources which the rule is applied to will be replicated from the source registry to the destination immediately. - -![browse project](img/start_replicate.png) - -### Listing and stopping replication executions -Click a rule, the execution records which belong to this rule will be listed. Each record represents the summary of one execution of the rule. Click `STOP` to stop the executions which are in progress. - -![browse project](img/list_stop_executions.png) - -### Listing tasks -Click the ID of one execution, you can get the execution summary and the task list. Click the log icon can get the detail information for the replication progress. -**Note**: The count of `IN PROGRESS` status in the summary includes both `Pending` and `In Progress` tasks. - -![browse project](img/list_tasks.png) - -### Deleting the replication rule -Select the replication rule and click `DELETE` to delete it. Only rules which have no in progress executions can be deleted. - -![browse project](img/delete_rule.png) - -## Retag Images - -Images retag helps users to tag images in Harbor, images can be tagged to different repositories and projects, as long as the users have sufficient permissions. For example, - -``` -release/app:stg --> release/app:prd -develop/app:v1.0 --> release/app:v1.0 -``` -To retag an image, users should have read permission (guest role or above) to the source project and write permission (developer role or above) to the target project. - -In Harbor portal, select the image you'd like to retag, and click the enabled `Retag` button to open the retag dialog. - -![retag image](img/retag_image.png) - -In the retag dialog, project name, repository name and the new tag should be specified. On click the `CONFIRM` button, the new tag would be created instantly. You can check the new tag in the corresponding project. - -## Searching projects and repositories -Entering a keyword in the search field at the top lists all matching projects and repositories. The search result includes both public and private repositories you have access to. - -![browse project](img/new_search.png) - -## Managing labels -Harbor provides two kinds of labels to isolate kinds of resources(only images for now): -* **Global Level Label**: Managed by system administrators and used to manage the images of the whole system. They can be added to images under any projects. -* **Project Level Label**: Managed by project administrators under a project and can only be added to the images of the project. - -### Managing global level labels -The system administrators can list, create, update and delete the global level labels under `Administration->Configuration->Labels`: - -![manage global level labels](img/manage_global_level_labels.png) - -### Managing project level labels -The project administrators and system administrators can list, create, update and delete the project level labels under `Labels` tab of the project detail page: - -![manage project level labels](img/manage_project_level_labels.png) - -### Adding labels to/remove labels from images -Users who have system administrator, project administrator or project developer role can click the `ADD LABELS` button to add labels to or remove labels from images. The label list contains both globel level labels(come first) and project level labels: - -![add labels to images](img/add_labels_to_images.png) - -### Filtering images by labels -The images can be filtered by labels: - -![filter images by labels](img/filter_images_by_label.png) - -## Administrator options -### Managing user -Administrator can add "Administrator" role to one or more ordinary users by checking checkboxes and clicking `SET AS ADMINISTRATOR`. To delete users, checked checkboxes and select `DELETE`. Deleting user is only supported under database authentication mode. - -![browse project](img/new_set_admin_remove_user.png) - -### Managing registry -You can list, add, edit and delete registries under `Administration->Registries`. Only registries which are not referenced by any rules can be deleted. - -![browse project](img/manage_registry.png) - -### Managing replication -You can list, add, edit and delete rules under `Administration->Replications`. - -![browse project](img/manage_replication.png) - -### Managing authentication -You can change authentication mode between **Database**(default) and **LDAP** before any user is added, when there is at least one user(besides admin) in Harbor, you cannot change the authentication mode. -![browse project](img/new_auth.png) -When using LDAP mode, user's self-registration is disabled. The parameters of LDAP server must be filled in. For more information, refer to [User account](#user-account). -![browse project](img/ldap_auth.png) - -When using OIDC mode, user will login Harbor via OIDC based SSO. A client has to be registered on the OIDC provider and Harbor's callback URI needs to be associated to that client as a redirectURI. -![OIDC settings](img/oidc_auth_setting.png) - -The settings of this auth mode: -* OIDC Provider Name: The name of the OIDC Provider. -* OIDC Provider Endpoint: The URL of the endpoint of the OIDC provider(a.k.a the Authorization Server in OAuth's terminology), -which must service the "well-known" URI for its configuration, more details please refer to https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationRequest -* OIDC Client ID: The ID of client configured on OIDC Provider. -* OIDC Client Secret: The secret for this client. -* OIDC Scope: The scope values to be used during the authentication. It is the comma separated string, which must contain `openid`. -Normally it should also contain `profile` and `email`. For getting the refresh token it should also contain `offline_access`. Please check with the administrator of the OIDC Provider. -* Verify Certificate: Whether to check the certificate when accessing the OIDC Provider. if you are running the OIDC Provider with self-signed -certificate, make sure this value is set to false. - - -### Managing project creation -Use the **Project Creation** drop-down menu to set which users can create projects. Select **Everyone** to allow all users to create projects. Select **Admin Only** to allow only users with the Administrator role to create projects. -![browse project](img/new_proj_create.png) - -### Managing self-registration -You can manage whether a user can sign up for a new account. This option is not available if you use LDAP authentication. -![browse project](img/new_self_reg.png) - -### Managing email settings -You can change Harbor's email settings, the mail server is used to send out responses to users who request to reset their password. -![browse project](img/new_config_email.png) - -### Managing registry read only -You can change Harbor's registry read only settings, read only mode will allow 'docker pull' while preventing 'docker push' and the deletion of repository and tag. -![browse project](img/read_only.png) - -If it set to true, deleting repository, tag and pushing image will be disabled. -![browse project](img/read_only_enable.png) - - -``` -$ docker push 10.117.169.182/demo/ubuntu:14.04 -The push refers to a repository [10.117.169.182/demo/ubuntu] -0271b8eebde3: Preparing -denied: The system is in read only mode. Any modification is prohibited. -``` -### Managing role by LDAP group - -If auth_mode is ldap_auth, you can manage project role by LDAP/AD group. please refer [manage role by ldap group guide](manage_role_by_ldap_group.md). - -## Pulling and pushing images using Docker client - -**NOTE: Harbor only supports Registry V2 API. You need to use Docker client 1.6.0 or higher.** - -Harbor supports HTTP by default and Docker client tries to connect to Harbor using HTTPS first, so if you encounter an error as below when you pull or push images, you need to configure insecure registry. Please, read [this document](https://docs.docker.com/registry/insecure/) in order to understand how to do this. - - -```Error response from daemon: Get https://myregistrydomain.com/v1/users/: dial tcp myregistrydomain.com:443 getsockopt: connection refused.``` - -If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add -`--insecure-registry myregistrydomain.com` to the daemon's start up arguments. - - -In the case of HTTPS, if you have access to the registry's CA certificate, simply place the CA certificate at /etc/docker/certs.d/myregistrydomain.com/ca.crt . - -### Pulling images -If the project that the image belongs to is private, you should sign in first: - -```sh -$ docker login 10.117.169.182 -``` - -You can now pull the image: - -```sh -$ docker pull 10.117.169.182/library/ubuntu:14.04 -``` - -**Note: Replace "10.117.169.182" with the IP address or domain name of your Harbor node. You cannot pull a unsigned image if you enabled content trust.** - -### Pushing images -Before pushing an image, you must create a corresponding project on Harbor web UI. - -First, log in from Docker client: - -```sh -$ docker login 10.117.169.182 -``` - -Tag the image: - -```sh -$ docker tag ubuntu:14.04 10.117.169.182/demo/ubuntu:14.04 -``` - -Push the image: - -```sh -$ docker push 10.117.169.182/demo/ubuntu:14.04 -``` - -**Note: Replace "10.117.169.182" with the IP address or domain name of your Harbor node.** - -### Add description to repositories - -After pushing an image, an Information can be added by project admin to describe this repository. - -Go into the repository and select the "Info" tab, and click the "EDIT" button. An textarea will appear and enter description here. Click "SAVE" button to save this information. - -![edit info](img/edit_description.png) - -### Download the harbor certs - -Users can click the "registry certificate" link to download the registry certificate. - -![browse project](img/download_harbor_certs.png) - -### Deleting repositories - -Repository deletion runs in two steps. - -First, delete a repository in Harbor's UI. This is soft deletion. You can delete the entire repository or just a tag of it. After the soft deletion, -the repository is no longer managed in Harbor, however, the files of the repository still remain in Harbor's storage. - -![browse project](img/new_delete_repo.png) -![browse project](img/new_delete_tag.png) - -**CAUTION: If both tag A and tag B refer to the same image, after deleting tag A, B will also get deleted. if you enabled content trust, you need to use notary command line tool to delete the tag's signature before you delete an image.** - -Next, delete the actual files of the repository using the [garbage collection](#online-garbage-collection) in Harbor's UI. - -### Content trust -**NOTE: Notary is an optional component, please make sure you have already installed it in your Harbor instance before you go through this section.** -If you want to enable content trust to ensure that images are signed, please set two environment variables in the command line before pushing or pulling any image: -```sh -export DOCKER_CONTENT_TRUST=1 -export DOCKER_CONTENT_TRUST_SERVER=https://10.117.169.182:4443 -``` -If you push the image for the first time, You will be asked to enter the root key passphrase. This will be needed every time you push a new image while the ``DOCKER_CONTENT_TRUST`` flag is set. -The root key is generated at: ``/root/.docker/trust/private/root_keys`` -You will also be asked to enter a new passphrase for the image. This is generated at ``/root/.docker/trust/private/tuf_keys/[registry name] /[imagepath]``. -If you are using a self-signed cert, make sure to copy the CA cert into ```/etc/docker/certs.d/10.117.169.182``` and ```$HOME/.docker/tls/10.117.169.182:4443/```. When an image is signed, it is indicated in the Web UI. -**Note: Replace "10.117.169.182" with the IP address or domain name of your Harbor node. In order to use content trust, HTTPS must be enabled in Harbor.** - - -When an image is signed, it has a tick shown in UI; otherwise, a cross sign(X) is displayed instead. -![browse project](img/content_trust.png) - -### Vulnerability scanning via Clair -**CAUTION: Clair is an optional component, please make sure you have already installed it in your Harbor instance before you go through this section.** - -Static analysis of vulnerabilities is provided through open source project [Clair](https://github.com/coreos/clair). You can initiate scanning on a particular image, or on all images in Harbor. Additionally, you can also set a policy to scan all the images at a specified time everyday. - -**Vulnerability metadata** - -Clair depends on the vulnerability metadata to complete the analysis process. After the first initial installation, Clair will automatically start to update the metadata database from different vulnerability repositories. The updating process may take a while based on the data size and network connection. If the database has not been fully populated, there is a warning message at the footer of the repository datagrid view. -![browse project](img/clair_not_ready.png) - -The 'database not fully ready' warning message is also displayed in the **'Vulnerability'** tab of **'Configuration'** section under **'Administration'** for your awareness. -![browse project](img/clair_not_ready2.png) - -Once the database is ready, an overall database updated timestamp will be shown in the **'Vulnerability'** tab of **'Configuration'** section under **'Administration'**. -![browse project](img/clair_ready.png) - -**Scanning an image** - -Enter your project, select the repository. For each tag there will be an 'Vulnerability' column to display vulnerability scanning status and related information. You can select the image and click the "SCAN" button to trigger the vulnerability scan process. -![browse project](img/scan_image.png) -**NOTES: Only the users with 'Project Admin' role have the privilege to launch the analysis process.** - -The analysis process may have the following status that are indicated in the 'Vulnerability' column: -* **Not Scanned:** The tag has never been scanned. -* **Queued:** The scanning task is scheduled but not executed yet. -* **Scanning:** The scanning process is in progress. -* **Error:** The scanning process failed to complete. -* **Complete:** The scanning process was successfully completed. - -For the **'Not Scanned'** and **'Queued'** statuses, a text label with status information is shown. For the **'Scanning'**, a progress bar will be displayed. -If an error occurred, you can click on the **'View Log'** link to view the related logs. -![browse project](img/log_viewer.png) - -If the process was successfully completed, a result bar is created. The width of the different colored sections indicates the percentage of features with vulnerabilities for a particular severity level. -* **Red:** **High** level of vulnerabilities -* **Orange:** **Medium** level of vulnerabilities -* **Yellow:** **Low** level of vulnerabilities -* **Grey:** **Unknown** level of vulnerabilities -* **Green:** **No** vulnerabilities -![browse project](img/bar_chart.png) - -Move the cursor over the bar, a tooltip with summary report will be displayed. Besides showing the total number of features with vulnerabilities and the total number of features in the scanned image tag, the report also lists the counts of features with vulnerabilities of different severity levels. The completion time of the last analysis process is shown at the bottom of the tooltip. -![browse project](img/summary_tooltip.png) - -Click on the tag name link, the detail page will be opened. Besides the information about the tag, all the vulnerabilities found in the last analysis process will be listed with the related information. You can order or filter the list by columns. -![browse project](img/tag_detail.png) - -**NOTES: You can initiate the vulnerability analysis for a tag at anytime you want as long as the status is not 'Queued' or 'Scanning'.** - -**Scanning all images** - -In the **'Vulnerability'** tab of **'Configuration'** section under **'Administration'**, click on the **'SCAN NOW'** button to start the analysis process for all the existing images. - -**NOTES: The scanning process is executed via multiple concurrent asynchronous tasks. There is no guarantee on the order of scanning or the returned results.** -![browse project](img/scan_all.png) - -To avoid frequently triggering the resource intensive scanning process, the availability of the button is restricted. It can be only triggered once in a predefined period. The next available time will be displayed besides the button. -![browse project](img/scan_all2.png) - -**Scheduled Scan by Policy** - -You can set policies to control the vulnerability analysis process. Currently, two options are available: -* **None:** No policy is selected. -* **Daily:** Policy is activated daily. It means an analysis job is scheduled to be executed at the specified time everyday. The scheduled job will scan all the images in Harbor. -![browse project](img/scan_policy.png) - -**NOTES: Once the scheduled job is executed, the completion time of scanning all images will be updated accordingly. Please be aware that the completion time of the images may be different because the execution of analysis for each image may be carried out at different time.** - -### Pull image from Harbor in Kubernetes -Kubernetes users can easily deploy pods with images stored in Harbor. The settings are similar to that of another private registry. There are two major issues: - -1. When your Harbor instance is hosting http and the certificate is self signed. You need to modify daemon.json on each work node of your cluster, for details please refer to: https://docs.docker.com/registry/insecure/#deploy-a-plain-http-registry -2. If your pod references an image under private project, you need to create a secret with the credentials of user who has permission to pull image from this project, for details refer to: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - -## Manage Helm Charts -[Helm](https://helm.sh) is a package manager for [Kubernetes](https://kubernetes.io). Helm uses a packaging format called [charts](https://docs.helm.sh/developing_charts). Since version 1.6.0 Harbor is now a composite cloud-native registry which supports both container image management and Helm charts management. Access to Helm charts in Harbor is controlled by [role-based access controls (RBAC)](https://en.wikipedia.org/wiki/Role-based_access_control) and is restricted by projects. - -### Manage Helm Charts via portal -#### List charts -Click your project to enter the project detail page after successful logging in. The existing helm charts will be listed under the tab `Helm Charts` which is beside the image `Repositories` tab with the following information: -* Name of helm chart -* The status of the chart: Active or Deprecated -* The count of chart versions -* The created time of the chart - -![list charts](img/chartrepo/list_charts.png) - -You can click the icon buttons on the top right to switch views between card view and list view. - -#### Upload new chart -Click the `UPLOAD` button on the top left to open the chart uploading dialog. Choose the uploading chart from your filesystem. Click the `UPLOAD` button to upload it to the chart repository server. - -![upload charts](img/chartrepo/upload_charts.png) - -If the chart is signed, you can choose the corresponding provenance file from your filesystem and Click the `UPLOAD` button to upload them together at once. - -If the chart is successfully uploaded, it will be displayed in the chart list at once. - -#### List chart versions -Clicking the chart name from the chart list will show all the available versions of that chart with the following information: -* the chart version number -* the maintainers of the chart version -* the template engine used (default is gotpl) -* the created timestamp of the chart version - -![list charts versions](img/chartrepo/list_chart_versions.png) - -Obviously, there will be at least 1 version for each of the charts in the top chart list. Same with chart list view, you can also click the icon buttons on the top right to switch views between card view and list view. - -Check the checkbox at the 1st column to select the specified chart versions: -* Click the `DELETE` button to delete all the selected chart versions from the chart repository server. Batch operation is supported. -* Click the `DOWNLOAD` button to download the chart artifact file. Batch operation is not supported. -* Click the `UPLOAD` button to upload the new chart version for the current chart - -#### Adding labels to/remove labels from chart versions -Users who have system administrator, project administrator or project developer role can click the `ADD LABELS` button to add labels to or remove labels from chart versions. - -![add labels to chart versions](img/chartrepo/add_labesl_to_chart_versions.png) - - -#### Filtering chart versions by labels -The chart versions can be filtered by labels: - -![filter chart versions by labels](img/chartrepo/filter_chart_versions_by_label.png) - -#### View chart version details -Clicking the chart version number link will open the chart version details view. You can see more details about the specified chart version here. There are three content sections: -* **Summary:** - * readme of the chart - * overall metadata like home, created timestamp and application version - * related helm commands for reference, such as `helm add repo` and `helm install` etc. -![chart details](img/chartrepo/chart_details.png) -* **Dependencies:** - * list all the dependant sun charts with 'name', 'version' and 'repository' fields -![chart dependencies](img/chartrepo/chart_dependencies.png) -* **Values:** - * display the content from `values.yaml` file with highlight code preview - * clicking the icon buttons on the top right to switch the yaml file view to k-v value pair list view -![chart values](img/chartrepo/chart_values.png) - -Clicking the `DOWNLOAD` button on the top right will start the downloading process. - -### Working with Helm CLI -As a helm chart repository, Harbor can work smoothly with Helm CLI. About how to install Helm CLI, please refer [install helm](https://docs.helm.sh/using_helm/#installing-helm). Run command `helm version` to make sure the version of Helm CLI is v2.9.1+. -``` -helm version - -#Client: &version.Version{SemVer:"v2.9.1", GitCommit:"20adb27c7c5868466912eebdf6664e7390ebe710", GitTreeState:"clean"} -#Server: &version.Version{SemVer:"v2.9.1", GitCommit:"20adb27c7c5868466912eebdf6664e7390ebe710", GitTreeState:"clean"} -``` -#### Add harbor to the repository list -Before working, Harbor should be added into the repository list with `helm repo add` command. Two different modes are supported. -* Add Harbor as a unified single index entry point - -With this mode Helm can be made aware of all the charts located in different projects and which are accessible by the currently authenticated user. -``` -helm repo add --ca-file ca.crt --username=admin --password=Passw0rd myrepo https://xx.xx.xx.xx/chartrepo -``` -**NOTES:** Providing both ca file and cert files is caused by an issue from helm. - -* Add Harbor project as separate index entry point - -With this mode, helm can only pull charts in the specified project. -``` -helm repo add --ca-file ca.crt --username=admin --password=Passw0rd myrepo https://xx.xx.xx.xx/chartrepo/myproject -``` - -#### Push charts to the repository server by CLI -As an alternative, you can also upload charts via the CLI. It is not supported by the native helm CLI. A plugin from the community should be installed before pushing. Run `helm plugin install` to install the `push` plugin first. -``` -helm plugin install https://github.com/chartmuseum/helm-push -``` -After a successful installation, run `push` command to upload your charts: -``` -helm push --ca-file=ca.crt --username=admin --password=passw0rd chart_repo/hello-helm-0.1.0.tgz myrepo -``` -**NOTES:** `push` command does not support pushing a prov file of a signed chart yet. - -#### Install charts -Before installing, make sure your helm is correctly initialized with command `helm init` and the chart index is synchronized with command `helm repo update`. - -Search the chart with the keyword if you're not sure where it is: -``` -helm search hello - -#NAME CHART VERSION APP VERSION DESCRIPTION -#local/hello-helm 0.3.10 1.3 A Helm chart for Kubernetes -#myrepo/chart_repo/hello-helm 0.1.10 1.2 A Helm chart for Kubernetes -#myrepo/library/hello-helm 0.3.10 1.3 A Helm chart for Kubernetes -``` -Everything is ready, install the chart to your kubernetes: -``` -helm install --ca-file=ca.crt --username=admin --password=Passw0rd --version 0.1.10 repo248/chart_repo/hello-helm -``` - -For other more helm commands like how to sign a chart, please refer to the [helm doc](https://docs.helm.sh/helm/#helm). - -## Online Garbage Collection -Online Garbage Collection enables user to trigger docker registry garbage collection by clicking button on UI. - -**NOTES:** The space is not freed when the images are deleted from Harbor, Garbage Collection is the task to free up the space by removing blobs from the filesystem when they are no longer referenced by a manifest. - -For more information about Garbage Collection, please see [Garbage Collection](https://github.com/docker/docker.github.io/blob/master/registry/garbage-collection.md). - -### Setting up Garbage Collection -If you are a system admin, you can trigger garbage collection by clicking "GC Now" in the **'Garbage Collection'** tab of **'Configuration'** section under **'Administration'**. - -![browse project](img/gc_now.png) -**NOTES:** Harbor is put into read-only mode when to execute Garbage Collection, and any modification on docker registry is prohibited. - -To avoid frequently triggering the garbage collection process, the availability of the button is restricted. It can be only triggered once in one minute. -![browse project](img/gc_now2.png) - -**Scheduled Garbage Collection by Policy** -* **None:** No policy is selected. -* **Daily:** Policy is activated daily. It means an analysis job is scheduled to be executed at the specified time everyday. The scheduled job will do garbage collection in Harbor. -* **Weekly:** Policy is activated weekly. It means an analysis job is scheduled to be executed at the specified time every week. The scheduled job will do garbage collection in Harbor. -Once the policy has been configured, you have the option to save the schedule. -![browse project](img/gc_policy.png) - -### Garbage Collection history -If you are a system admin, you can view the latest 10 records of garbage collection execution. -![browse project](img/gc_history.png) - -You can click on the 'details' link to view the related logs. -![browse project](img/gc_details.png) - -## Build history - -Build history make it easy to see the contents of a container image, find the code which bulids an image, or locate the image for a source repository. - -In Harbor portal, enter your project, select the repository, click on the link of tag name you'd like to see its build history, the detail page will be opened. Then switch to `Build History` tab, you can see the build history information. - -![build_ history](img/build_history.png) - -## Using OIDC CLI secret - -Having authenticated via OIDC SSO and onboarded to Harbor, you can use Docker/Helm CLI to access Harbor to read/write the artifacts. -As the CLI cannot handle redirection for SSO, we introduced `CLI secret`, which is only available when Harbor's authentication mode -is configured to OIDC based. -After logging into Harbor, click the drop down list to view user's profile: -![user_profile](img/user_profile.png) - -You can copy your CLI secret via the dialog of profile: -![profile_dlg](img/profile_dlg.png) - -After that you can authenticate using your user name in Harbor that you set during onboard process, and CLI secret as the password -with Docker/Helm CLI, for example: -```sh -docker login -u testuser -p xxxxxx jt-test.local.goharbor.io - -``` - -When you click the "..." icon in the profile dialog, a button for generating new CLI secret will appear, and you can generate a new -CLI secret by clicking this button. Please be reminded one user can only have one CLI secret, so when a new secret is generated, the -old one becomes invalid at once. - -**NOTE**: -Under the hood the CLI secret is associated with the ID token, and Harbor will try to refresh the token, so the CLI secret will -be valid after th ID token expires. However, if the OIDC Provider does not provide refresh token or the refresh fails for some -reason, the CLI secret will become invalid. In that case you can logout and login Harbor via SSO flow again so Harbor can get a -new ID token and the CLI secret will work again. - - -## Robot Account -Robot Accounts are accounts created by project admins that are intended for automated operations. They have the following limitations: - -1, Robot Accounts cannot login Harbor portal -2, Robot Accounts can only perform `docker push`/`docker pull` operations with a token. - -### Add a Robot Account -If you are a project admin, you can create a Robot Account by clicking "New Robot Account" in the `Robot Accounts` tab of a project, and enter a name, a description and permission. -![add_robot_account](img/robotaccount/add_robot_account.png) - -![add_robot_account](img/robotaccount/add_robot_account_2.png) - -> **NOTE:** The name will become `robot$` and will be used to distinguish a robot account from a normal harbor user. - -![copy_robot_account_token](img/robotaccount/copy_robot_account_token.png) -As Harbor doesn't store your account token, please make sure to copy it in the pop up dialog after creating, otherwise, there is no way to get it from Harbor. - -### Configure duration of robot account -If you are a system admin, you can configure the robot account token duration in days. -![set_robot_account_token_duration](img/robotaccount/set_robot_account_token_duration.png) - -### Authenticate with a robot account -To authenticate with a Robot Account, use `docker login` as below, - -``` -docker login harbor.io -Username: robot$accountname -Password: Thepasswordgeneratedbyprojectadmin -``` - -### Disable a robot account -If you are a project admin, you can disable a Robot Account by clicking "Disable Account" in the `Robot Accounts` tab of a project. -![disable_robot_account](img/robotaccount/disable_delete_robot_account.png) - -### Delete a robot account -If you are a project admin, you can delete a Robot Account by clicking "Delete" in the `Robot Accounts` tab of a project. -![delete_robot_account](img/robotaccount/disable_delete_robot_account.png) - -## API Explorer - -Harbor integrated swagger UI from 1.8. That means all apis can be invoked through UI. Normally, user have 2 ways to navigate to API Explorer. - -1. User can login harbor, and click the "API EXPLORER" button.All apis will be invoked with current user authorization. -![navigation bar](img/api_explorer_btn.png) - - -2. User can navigate to swagger page by ip address by router "devcenter". For example: https://10.192.111.118/devcenter. After go to the page, need to click "authorize" button to give basic authentication to all apis. All apis will be invoked with the authorized user authorization. -![authentication](img/authorize.png) - - +# User Guide +## Overview +This guide walks you through the fundamentals of using Harbor. You'll learn how to use Harbor to: + +* [Manage your projects](#managing-projects) +* [Manage members of a project](#managing-members-of-a-project) +* [Replicate resources between Harbor and non-Harbor registries](#replicating-resources) +* [Retag images within Harbor](#retag-images) +* [Search projects and repositories](#searching-projects-and-repositories) +* [Manage labels](#managing-labels) +* [Configure CVE Whitelists](#configure-cve-whitelists) +* [Set Project Quotas](#set-project-quotas) +* [Manage Harbor system if you are the system administrator:](#administrator-options) + * [Manage users](#managing-user) + * [Manage registries](#managing-registry) + * [Manage replication rules](#managing-replication) + * [Manage authentication](#managing-authentication) + * [Manage project creation](#managing-project-creation) + * [Manage self-registration](#managing-self-registration) + * [Manage email settings](#managing-email-settings) + * [Manage registry read only](#managing-registry-read-only) + * [Manage role by LDAP group](#managing-role-by-ldap-group) +* [Pull and push images using Docker client](#pulling-and-pushing-images-using-docker-client) +* [Add description to repositories](#add-description-to-repositories) +* [Delete repositories and images](#deleting-repositories) +* [Content trust](#content-trust) +* [Vulnerability scanning via Clair](#vulnerability-scanning-via-clair) +* [Pull image from Harbor in Kubernetes](#pull-image-from-harbor-in-kubernetes) +* [Manage Helm Charts](#manage-helm-charts) + * [Manage Helm Charts via portal](#manage-helm-charts-via-portal) + * [Working with Helm CLI](#working-with-helm-cli) +* [Online Garbage Collection](#online-garbage-collection) +* [View build history](#build-history) +* [Using CLI after login via OIDC based SSO](#using-oidc-cli-secret) +* [Manage robot account of a project](#robot-account) +* [Tag Retention Rules](#tag-retention-rules) +* [Webhook Notifications](#webhook-notifications) +* [Using API Explorer](#api-explorer) + +## Role Based Access Control(RBAC) + +![rbac](img/rbac.png) + +Harbor manages images through projects. Users can be added into one project as a member with one of three different roles: + +* **Guest**: Guest has read-only privilege for a specified project. +* **Developer**: Developer has read and write privileges for a project. +* **Master**: Master has elevated permissions beyond those of 'Developer' including the ability to scan images, view replications jobs, and delete images and helm charts. +* **ProjectAdmin**: When creating a new project, you will be assigned the "ProjectAdmin" role to the project. Besides read-write privileges, the "ProjectAdmin" also has some management privileges, such as adding and removing members, starting a vulnerability scan. + +Besides the above three roles, there are two system-level roles: + +* **SysAdmin**: "SysAdmin" has the most privileges. In addition to the privileges mentioned above, "SysAdmin" can also list all projects, set an ordinary user as administrator, delete users and set vulnerability scan policy for all images. The public project "library" is also owned by the administrator. +* **Anonymous**: When a user is not logged in, the user is considered as an "Anonymous" user. An anonymous user has no access to private projects and has read-only access to public projects. + +See detailed permissions matrix listed here: https://github.com/goharbor/harbor/blob/master/docs/permissions.md + +## User account +Harbor supports different authentication modes: + +* **Database(db_auth)** + + Users are stored in the local database. + + A user can register himself/herself in Harbor in this mode. To disable user self-registration, refer to the [installation guide](installation_guide.md) for initial configuration, or disable this feature in [Administrator Options](#administrator-options). When self-registration is disabled, the system administrator can add users into Harbor. + + When registering or adding a new user, the username and email must be unique in the Harbor system. The password must contain at least 8 characters with 1 lowercase letter, 1 uppercase letter and 1 numeric character. + + When you forgot your password, you can follow the below steps to reset the password: + + 1. Click the link "Forgot Password" in the sign in page. + 2. Input the email address entered when you signed up, an email will be sent out to you for password reset. + 3. After receiving the email, click on the link in the email which directs you to a password reset web page. + 4. Input your new password and click "Save". + +* **LDAP/Active Directory (ldap_auth)** + + Under this authentication mode, users whose credentials are stored in an external LDAP or AD server can log in to Harbor directly. + + When an LDAP/AD user logs in by *username* and *password*, Harbor binds to the LDAP/AD server with the **"LDAP Search DN"** and **"LDAP Search Password"** described in [installation guide](installation_guide.md). If it succeeded, Harbor looks up the user under the LDAP entry **"LDAP Base DN"** including substree. The attribute (such as uid, cn) specified by **"LDAP UID"** is used to match a user with the *username*. If a match is found, the user's *password* is verified by a bind request to the LDAP/AD server. Uncheck **"LDAP Verify Cert"** if the LDAP/AD server uses a self-signed or an untrusted certificate. + + Self-registration, deleting user, changing password and resetting password are not supported under LDAP/AD authentication mode because the users are managed by LDAP or AD. + +* **OIDC Provider (oidc_auth)** + + With this authentication mode, regular user will login to Harbor Portal via SSO flow. + After the system administrator configure Harbor to authenticate via OIDC (more details refer to [this section](#managing-authentication)), + a button `LOGIN VIA OIDC PROVIDER` will appear on the login page. + ![oidc_login](img/oidc_login.png) + + By clicking this button user will kick off the SSO flow and be redirected to the OIDC Provider for authentication. After a successful + authentication at the remote site, user will be redirected to Harbor. There will be an "onboard" step if it's the first time the user + authenticate using his account, in which there will be a dialog popped up for him to set his user name in Harbor: + ![oidc_onboar](img/oidc_onboard_dlg.png) + + This user name will be the identifier for this user in Harbor, which will be used in the cases such as adding member to a project, assigning roles, etc. + This has to be a unique user name, if another user has used this user name to onboard, user will be prompted to choose another one. + + Regarding this user to use docker CLI, please refer to [Using CLI after login via OIDC based SSO](#using-oidc-cli-secret) + + **NOTE:** + 1. After the onboard process, you still have to login to Harbor via SSO flow, the `Username` and `Password` fields are only for + local admin to login when Harbor is configured authentication via OIDC. + 2. Similar to LDAP authentication mode, self-registration, updating profile, deleting user, changing password and + resetting password are not supported. + + +## Managing projects +A project in Harbor contains all repositories of an application. No images can be pushed to Harbor before the project is created. RBAC is applied to a project. There are two types of projects in Harbor: + +* **Public**: All users have the read privilege to a public project, it's convenient for you to share some repositories with others in this way. +* **Private**: A private project can only be accessed by users with proper privileges. + +You can create a project after you signed in. Check on the "Access Level" checkbox will make this project public. + +![create project](img/new_create_project.png) + +After the project is created, you can browse repositories, members, logs, replication and configuration using the navigation tab. + +![browse project](img/new_browse_project.png) + +There are two views to show repositories, list view and card view, you can switch between them by clicking the corresponding icon. + +![browse repositories](img/browse_project_repositories.png) + +All logs can be listed by clicking "Logs". You can apply a filter by username, or operations and dates under "Advanced Search". + +![browse project](img/log_search_advanced.png) + +![browse project](img/new_project_log.png) + +Project properties can be changed by clicking "Configuration". + +* To make all repositories under the project accessible to everyone, select the `Public` checkbox. + +* To prevent un-signed images under the project from being pulled, select the `Enable content trust` checkbox. + +* To prevent vulnerable images under the project from being pulled, select the `Prevent vulnerable images from running` checkbox and change the severity level of vulnerabilities. Images cannot be pulled if their level equals to or higher than the currently selected level. + +* To activate an immediate vulnerability scan on new images that are pushed to the project, select the `Automatically scan images on push` checkbox. + +![browse project](img/project_configuration.png) + +## Managing members of a project +### Adding members +You can add members with different roles to an existing project. You can add a LDAP/AD user to project members under LDAP/AD authentication mode. + +![browse project](img/new_add_member.png) + +### Updating and removing members +You can check one or more members, then click `ACTION`, choose one role to batch switch checked members' roles or remove them from the project. + +![browse project](img/new_remove_update_member.png) + +## Replicating resources +Replication allows users to replicate resources (images/charts) between Harbor and non-Harbor registries in both pull or push mode. + +Once the system administrator has set a rule, all resources that match the defined [filter](#resource-filter) patterns will be replicated to the destination registry when the [triggering condition](#trigger-mode) is matched. Each resource will start a task to run. If the namespace does not exist on the destination registry, a new namespace will be created automatically. If it already exists and the user configured in the policy has no write privilege to it, the process will fail. The member information will not be replicated. + +There may be a bit of delay during replication based on the situation of the network. If a replication task fails, it will be re-scheduled a few minutes later and retried times. + +**Note:** Due to API changes, replication between different versions of Harbor is not supported. + +### Creating replication endpoints + +To replicate image repositories from one instance of Harbor to another Harbor or non-Harbor registry, you first create replication endpoints. + +1. Go to **Registries** and click the **+ New Endpoint** button. + + ![New replication endpoint](img/replication-endpoint1.png) +1. For **Provider**, use the drop-down menu to select the type of registry to set up as a replication endpoint. + + The endpoint can be another Harbor instance, or a non-Harbor registry. Currently, the following non-Harbor registries are supported: + + - Docker Hub + - Docker registry + - AWS Elastic Container Registry + - Azure Container Registry + - Ali Cloud Container Registry + - Google Container Registry + - Huawei SWR + - Helm Hub + + ![Replication providers](img/replication-endpoint2.png) + +1. Enter a suitable name and description for the new replication endpoint. +1. Enter the full URL of the registry to set up as a replication endpoint. + + For example, to replicate to another Harbor instance, enter https://harbor_instance_address:443. The registry must exist and be running before you create the endpoint. +1. Enter the Access ID and Access Secret for the endpoint registry instance. + + Use an account that has the appropriate privileges on that registry, or an account that has write permission on the corresponding project in a Harbor registry. + + **NOTES**: + - AWS ECR adapters should use access keys, not a username and password. The access key should have sufficient permissions, such as storage permission. + - Google GCR adapters should use the entire JSON key generated in the service account. The namespace should start with the project ID. +1. Optionally, select the **Verify Remote Cert** check box. + + Deselect the check box if the remote registry uses a self-signed or untrusted certificate. +1. Click **Test Connection**. +1. When you have successfully tested the connection, click **OK**. + +### Creating a replication rule +Login as a system administrator user, click `NEW REPLICATION RULE` under `Administration->Replications` and fill in the necessary fields. You can choose different replication modes, [resource filters](#resource-filter) and [trigger modes](#trigger-mode) according to the different requirements. If there is no endpoint available in the list, follow the instructions in the [Creating replication endpoints](#creating-replication-endpoints) to create one. Click `SAVE` to create a replication rule. + +![browse project](img/create_rule.png) + +#### Resource filter +Three resource filters are supported: +* **Name**: Filter resources according to the name. +* **Tag**: Filter resources according to the tag. +* **Resource**: Filter images according to the resource type. + +The terms supported in the pattern used by name filter and tag filter are as follows: +* **\***: Matches any sequence of non-separator characters `/`. +* **\*\***: Matches any sequence of characters, including path separators `/`. +* **?**: Matches any single non-separator character `/`. +* **{alt1,...}**: Matches a sequence of characters if one of the comma-separated alternatives matches. + +**Note:** `library` must be added if you want to replicate the official images of Docker Hub. For example, `library/hello-world` matches the official hello-world images. + +Pattern | String(Match or not) +---------- | ------- +`library/*` | `library/hello-world`(Y)
`library/my/hello-world`(N) +`library/**` | `library/hello-world`(Y)
`library/my/hello-world`(Y) +`{library,goharbor}/**` | `library/hello-world`(Y)
`goharbor/harbor-core`(Y)
`google/hello-world`(N) +`1.?` | `1.0`(Y)
`1.01`(N) + +#### Trigger mode +* **Manual**: Replicate the resources manually when needed. **Note**: The deletion operations are not replicated. +* **Scheduled**: Replicate the resources periodically. **Note**: The deletion operations are not replicated. +* **Event Based**: When a new resource is pushed to the project, it is replicated to the remote registry immediately. Same to the deletion operation if the `Delete remote resources when locally deleted` checkbox is selected. + +### Starting a replication manually +Select a replication rule and click `REPLICATE`, the resources which the rule is applied to will be replicated from the source registry to the destination immediately. + +![browse project](img/start_replicate.png) + +### Listing and stopping replication executions +Click a rule, the execution records which belong to this rule will be listed. Each record represents the summary of one execution of the rule. Click `STOP` to stop the executions which are in progress. + +![browse project](img/list_stop_executions.png) + +### Listing tasks +Click the ID of one execution, you can get the execution summary and the task list. Click the log icon can get the detail information for the replication progress. +**Note**: The count of `IN PROGRESS` status in the summary includes both `Pending` and `In Progress` tasks. + +![browse project](img/list_tasks.png) + +### Deleting the replication rule +Select the replication rule and click `DELETE` to delete it. Only rules which have no in progress executions can be deleted. + +![browse project](img/delete_rule.png) + +## Retag Images + +Images retag helps users to tag images in Harbor, images can be tagged to different repositories and projects, as long as the users have sufficient permissions. For example, + +``` +release/app:stg --> release/app:prd +develop/app:v1.0 --> release/app:v1.0 +``` +To retag an image, users should have read permission (guest role or above) to the source project and write permission (developer role or above) to the target project. + +In Harbor portal, select the image you'd like to retag, and click the enabled `Retag` button to open the retag dialog. + +![retag image](img/retag_image.png) + +In the retag dialog, project name, repository name and the new tag should be specified. On click the `CONFIRM` button, the new tag would be created instantly. You can check the new tag in the corresponding project. + +## Searching projects and repositories +Entering a keyword in the search field at the top lists all matching projects and repositories. The search result includes both public and private repositories you have access to. + +![browse project](img/new_search.png) + +## Managing labels +Harbor provides two kinds of labels to isolate kinds of resources(only images for now): +* **Global Level Label**: Managed by system administrators and used to manage the images of the whole system. They can be added to images under any projects. +* **Project Level Label**: Managed by project administrators under a project and can only be added to the images of the project. + +### Managing global level labels +The system administrators can list, create, update and delete the global level labels under `Administration->Configuration->Labels`: + +![manage global level labels](img/manage_global_level_labels.png) + +### Managing project level labels +The project administrators and system administrators can list, create, update and delete the project level labels under `Labels` tab of the project detail page: + +![manage project level labels](img/manage_project_level_labels.png) + +### Adding labels to/remove labels from images +Users who have system administrator, project administrator or project developer role can click the `ADD LABELS` button to add labels to or remove labels from images. The label list contains both globel level labels(come first) and project level labels: + +![add labels to images](img/add_labels_to_images.png) + +### Filtering images by labels +The images can be filtered by labels: + +![filter images by labels](img/filter_images_by_label.png) + +## Configure CVE Whitelists + +When you run vulnerability scans, images that are subject to Common Vulnerabilities and Exposures (CVE) are identified. According to the severity of the CVE and your security settings, these images might not be permitted to run. As a system administrator, you can create whitelists of CVEs to ignore during vulnerability scanning. + +You can set a system-wide CVE whitelist or you can set CVE whitelists on a per-project basis. + +### Configure a System-Wide CVE Whitelist + +System-wide CVE whitelists apply to all of the projects in a Harbor instance. + +1. Go to **Configuration** > **System Settings**. +1. Under **Deployment security**, click **Add**. + ![System-wide CVE whitelist](img/cve-whitelist1.png) +1. Enter the list of CVE IDs to ignore during vulnerability scanning. + ![Add system CVE whitelist](img/cve-whitelist2.png) + + Either use a comma-separated list or newlines to add multiple CVE IDs to the list. +1. Click **Add** at the bottom of the window to add the list. +1. Optionally uncheck the **Never expires** checkbox and use the calendar selector to set an expiry date for the whitelist. + ![Add system CVEs](img/cve-whitelist3.png) +1. Click **Save** at the bottom of the page to save your settings. + +After you have created a system whitelist, you can remove CVE IDs from the list by clicking the delete button next to it in the list. You can click **Add** to add more CVE IDs to the system whitelist. + +![Add and remove system CVEs](img/cve-whitelist4.png) + +### Configure a Per-Project CVE Whitelist + +By default, the system whitelist is applied to all projects. You can configure different CVE whitelists for individual projects, that override the system whitelist. + +1. Go to **Projects**, select a project, and select **Configuration**. +1. Under **CVE whitelist**, select **Project whitelist**. + ![Project CVE whitelist](img/cve-whitelist5.png) +1. Optionally click **Copy From System** to add all of the CVE IDs from the system CVE whitelist to this project whitelist. +1. Click **Add** and enter a list of additional CVE IDs to ignore during vulnerability scanning of this project. + ![Add project CVEs](img/cve-whitelist6.png) + + Either use a comma-separated list or newlines to add multiple CVE IDs to the list. +1. Click **Add** at the bottom of the window to add the CVEs to the project whitelist. +1. Optionally uncheck the **Never expires** checkbox and use the calendar selector to set an expiry date for the whitelist. +1. Click **Save** at the bottom of the page to save your settings. + +After you have created a project whitelist, you can remove CVE IDs from the list by clicking the delete button next to it in the list. You can click **Add** at any time to add more CVE IDs to the whitelist for this project. + +If CVEs are added to the system whitelist after you have created a project whitelist, click **Copy From System** to add the new entries from the system whitelist to the project whitelist. + +**NOTE**: If CVEs are deleted from the system whitelist after you have created a project whitelist, and if you added the system whitelist to the project whitelist, you must manually remove the deleted CVEs from the project whitelist. If you click **Copy From System** after CVEs have been deleted from the system whitelist, the deleted CVEs are not automatically removed from the project whitelist. + +## Set Project Quotas + +To exercise control over resource use, as a system administrator you can set quotas on projects. You can limit the number of tags that a project can contain and limit the amount of storage capacity that a project can consume. You can set default quotas that apply to all projects globally. + +**NOTE**: Default quotas apply to projects that are created after you set or change the default quota. The default quota is not applied to projects that already existed before you set it. + +You can also set quotas on individual projects. If you set a global default quota and you set different quotas on individual projects, the per-project quotas are applied. + +By default, all projects have unlimited quotas for both tags and storage use. + +1. Go to **Configuration** > **Project Quotas**. + ![Project quotas](img/project-quota1.png) +1. To set global default quotas on all projects, click **Edit**. + ![Project quotas](img/project-quota2.png) + 1. For **Default artifact count**, enter the maximum number of tags that any project can contain. + + Enter `-1` to set the default to unlimited. + 1. For **Default storage consumption**, enter the maximum quantity of storage that any project can consume, selecting `MB`, `GB`, or `TB` from the drop-down menu. + + Enter `-1` to set the default to unlimited. + ![Project quotas](img/project-quota3.png) + 1. Click **OK**. +1. To set quotas on an individual project, click the 3 vertical dots next to a project and select **Edit**. + ![Project quotas](img/project-quota4.png) + 1. For **Default artifact count**, enter the maximum number of tags that this individual project can contain, or enter `-1` to set the default to unlimited. + 1. For **Default storage consumption**, enter the maximum quantity of storage that this individual project can consume, selecting `MB`, `GB`, or `TB` from the drop-down menu. + +After you set quotas, the you can see how much of their quotas each project has consumed in the **Project Quotas** tab. + +![Project quotas](img/project-quota5.png) + +### How Harbor Calculates Resource Usage + +When setting project quotas, it is useful to know how Harbor calculates tag numbers and storage use, especially in relation to image pushing, retagging, and garbage collection. + +- Harbor computes image size when blobs and manifests are pushed from the Docker client. +- Harbor computes tag counts when manifests are pushed from the Docker client. + + **NOTE**: When users push an image, the manifest is pushed last, after all of the associated blobs have been pushed successfully to the registry. If several images are pushed concurrently and if there is an insufficient number of tags left in the quota for all of them, images are accepted in the order that their manifests arrive. Consequently, an attempt to push an image might not be immediately rejected for exceeding the quota. This is because there was availability in the tag quota when the push was initiated, but by the time the manifest arrived the quota had been exhausted. +- Shared blobs are only computed once per project. In Docker, blob sharing is defined globally. In Harbor, blob sharing is defined at the project level. As a consequence, overall storage usage can be greater than the actual disk capacity. +- Retagging images reserves and releases resources: + - If you retag an image within a project, the tag count increases by one, but storage usage does not change because there are no new blobs or manifests. + - If you retag an image from one project to another, the tag count and storage usage both increase. +- During garbage collection, Harbor frees the storage used by untagged blobs in the project. +- If the tag count reaches the limit, image blobs can be pushed into a project and storage usage is updated accordingly. You can consider these blobs to be untagged blobs. They can be removed by garbage collection, and the storage that they consume is returned after garbage colletion. +- Helm chart size is not calculated. Only tag counts are calculated. + +## Administrator options +### Managing user +Administrator can add "Administrator" role to one or more ordinary users by checking checkboxes and clicking `SET AS ADMINISTRATOR`. To delete users, checked checkboxes and select `DELETE`. Deleting user is only supported under database authentication mode. + +![browse project](img/new_set_admin_remove_user.png) + +### Managing registry +You can list, add, edit and delete registries under `Administration->Registries`. Only registries which are not referenced by any rules can be deleted. + +![browse project](img/manage_registry.png) + +### Managing replication +You can list, add, edit and delete rules under `Administration->Replications`. + +![browse project](img/manage_replication.png) + +### Managing authentication +You can change authentication mode between **Database**(default) and **LDAP** before any user is added, when there is at least one user(besides admin) in Harbor, you cannot change the authentication mode. +![browse project](img/new_auth.png) +When using LDAP mode, user's self-registration is disabled. The parameters of LDAP server must be filled in. For more information, refer to [User account](#user-account). +![browse project](img/ldap_auth.png) + +When using OIDC mode, user will login Harbor via OIDC based SSO. A client has to be registered on the OIDC provider and Harbor's callback URI needs to be associated to that client as a redirectURI. +![OIDC settings](img/oidc_auth_setting.png) + +The settings of this auth mode: +* OIDC Provider Name: The name of the OIDC Provider. +* OIDC Provider Endpoint: The URL of the endpoint of the OIDC provider(a.k.a the Authorization Server in OAuth's terminology), +which must service the "well-known" URI for its configuration, more details please refer to https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationRequest +* OIDC Client ID: The ID of client configured on OIDC Provider. +* OIDC Client Secret: The secret for this client. +* OIDC Scope: The scope values to be used during the authentication. It is the comma separated string, which must contain `openid`. +Normally it should also contain `profile` and `email`. For getting the refresh token it should also contain `offline_access`. Please check with the administrator of the OIDC Provider. +* Verify Certificate: Whether to check the certificate when accessing the OIDC Provider. if you are running the OIDC Provider with self-signed +certificate, make sure this value is set to false. + + +### Managing project creation +Use the **Project Creation** drop-down menu to set which users can create projects. Select **Everyone** to allow all users to create projects. Select **Admin Only** to allow only users with the Administrator role to create projects. +![browse project](img/new_proj_create.png) + +### Managing self-registration +You can manage whether a user can sign up for a new account. This option is not available if you use LDAP authentication. +![browse project](img/new_self_reg.png) + +### Managing email settings +You can change Harbor's email settings, the mail server is used to send out responses to users who request to reset their password. +![browse project](img/new_config_email.png) + +### Managing registry read only +You can change Harbor's registry read only settings, read only mode will allow 'docker pull' while preventing 'docker push' and the deletion of repository and tag. +![browse project](img/read_only.png) + +If it set to true, deleting repository, tag and pushing image will be disabled. +![browse project](img/read_only_enable.png) + + +``` +$ docker push 10.117.169.182/demo/ubuntu:14.04 +The push refers to a repository [10.117.169.182/demo/ubuntu] +0271b8eebde3: Preparing +denied: The system is in read only mode. Any modification is prohibited. +``` +### Managing role by LDAP group + +If auth_mode is ldap_auth, you can manage project role by LDAP/AD group. please refer [manage role by ldap group guide](manage_role_by_ldap_group.md). + +## Pulling and pushing images using Docker client + +**NOTE: Harbor only supports Registry V2 API. You need to use Docker client 1.6.0 or higher.** + +Harbor supports HTTP by default and Docker client tries to connect to Harbor using HTTPS first, so if you encounter an error as below when you pull or push images, you need to configure insecure registry. Please, read [this document](https://docs.docker.com/registry/insecure/) in order to understand how to do this. + +```Error response from daemon: Get https://myregistrydomain.com/v1/users/: dial tcp myregistrydomain.com:443 getsockopt: connection refused.``` + +If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add +`--insecure-registry myregistrydomain.com` to the daemon's start up arguments. + +In the case of HTTPS, if you have access to the registry's CA certificate, simply place the CA certificate at /etc/docker/certs.d/myregistrydomain.com/ca.crt . + +### Pulling images +If the project that the image belongs to is private, you should sign in first: + +```sh +$ docker login 10.117.169.182 +``` + +You can now pull the image: + +```sh +$ docker pull 10.117.169.182/library/ubuntu:14.04 +``` + +**Note: Replace "10.117.169.182" with the IP address or domain name of your Harbor node. You cannot pull a unsigned image if you enabled content trust.** + +### Pushing images +Before pushing an image, you must create a corresponding project on Harbor web UI. + +First, log in from Docker client: + +```sh +$ docker login 10.117.169.182 +``` + +Tag the image: + +```sh +$ docker tag ubuntu:14.04 10.117.169.182/demo/ubuntu:14.04 +``` + +Push the image: + +```sh +$ docker push 10.117.169.182/demo/ubuntu:14.04 +``` + +**Note: Replace "10.117.169.182" with the IP address or domain name of your Harbor node.** + +### Add description to repositories + +After pushing an image, an Information can be added by project admin to describe this repository. + +Go into the repository and select the "Info" tab, and click the "EDIT" button. An textarea will appear and enter description here. Click "SAVE" button to save this information. + +![edit info](img/edit_description.png) + +### Download the harbor certs + +Users can click the "registry certificate" link to download the registry certificate. + +![browse project](img/download_harbor_certs.png) + +### Deleting repositories + +Repository deletion runs in two steps. + +First, delete a repository in Harbor's UI. This is soft deletion. You can delete the entire repository or just a tag of it. After the soft deletion, +the repository is no longer managed in Harbor, however, the files of the repository still remain in Harbor's storage. + +![browse project](img/new_delete_repo.png) +![browse project](img/new_delete_tag.png) + +**CAUTION: If both tag A and tag B refer to the same image, after deleting tag A, B will also get deleted. if you enabled content trust, you need to use notary command line tool to delete the tag's signature before you delete an image.** + +Next, delete the actual files of the repository using the [garbage collection](#online-garbage-collection) in Harbor's UI. + +### Content trust +**NOTE: Notary is an optional component, please make sure you have already installed it in your Harbor instance before you go through this section.** +If you want to enable content trust to ensure that images are signed, please set two environment variables in the command line before pushing or pulling any image: +```sh +export DOCKER_CONTENT_TRUST=1 +export DOCKER_CONTENT_TRUST_SERVER=https://10.117.169.182:4443 +``` +If you push the image for the first time, You will be asked to enter the root key passphrase. This will be needed every time you push a new image while the ``DOCKER_CONTENT_TRUST`` flag is set. +The root key is generated at: ``/root/.docker/trust/private/root_keys`` +You will also be asked to enter a new passphrase for the image. This is generated at ``/root/.docker/trust/private/tuf_keys/[registry name] /[imagepath]``. +If you are using a self-signed cert, make sure to copy the CA cert into ```/etc/docker/certs.d/10.117.169.182``` and ```$HOME/.docker/tls/10.117.169.182:4443/```. When an image is signed, it is indicated in the Web UI. +**Note: Replace "10.117.169.182" with the IP address or domain name of your Harbor node. In order to use content trust, HTTPS must be enabled in Harbor.** + + +When an image is signed, it has a tick shown in UI; otherwise, a cross sign(X) is displayed instead. +![browse project](img/content_trust.png) + +### Vulnerability scanning via Clair +**CAUTION: Clair is an optional component, please make sure you have already installed it in your Harbor instance before you go through this section.** + +Static analysis of vulnerabilities is provided through open source project [Clair](https://github.com/coreos/clair). You can initiate scanning on a particular image, or on all images in Harbor. Additionally, you can also set a policy to scan all the images at a specified time everyday. + +**Vulnerability metadata** + +Clair depends on the vulnerability metadata to complete the analysis process. After the first initial installation, Clair will automatically start to update the metadata database from different vulnerability repositories. The updating process may take a while based on the data size and network connection. If the database has not been fully populated, there is a warning message at the footer of the repository datagrid view. +![browse project](img/clair_not_ready.png) + +The 'database not fully ready' warning message is also displayed in the **'Vulnerability'** tab of **'Configuration'** section under **'Administration'** for your awareness. +![browse project](img/clair_not_ready2.png) + +Once the database is ready, an overall database updated timestamp will be shown in the **'Vulnerability'** tab of **'Configuration'** section under **'Administration'**. +![browse project](img/clair_ready.png) + +**Scanning an image** + +Enter your project, select the repository. For each tag there will be an 'Vulnerability' column to display vulnerability scanning status and related information. You can select the image and click the "SCAN" button to trigger the vulnerability scan process. +![browse project](img/scan_image.png) +**NOTES: Only the users with 'Project Admin' role have the privilege to launch the analysis process.** + +The analysis process may have the following status that are indicated in the 'Vulnerability' column: +* **Not Scanned:** The tag has never been scanned. +* **Queued:** The scanning task is scheduled but not executed yet. +* **Scanning:** The scanning process is in progress. +* **Error:** The scanning process failed to complete. +* **Complete:** The scanning process was successfully completed. + +For the **'Not Scanned'** and **'Queued'** statuses, a text label with status information is shown. For the **'Scanning'**, a progress bar will be displayed. +If an error occurred, you can click on the **'View Log'** link to view the related logs. +![browse project](img/log_viewer.png) + +If the process was successfully completed, a result bar is created. The width of the different colored sections indicates the percentage of features with vulnerabilities for a particular severity level. +* **Red:** **High** level of vulnerabilities +* **Orange:** **Medium** level of vulnerabilities +* **Yellow:** **Low** level of vulnerabilities +* **Grey:** **Unknown** level of vulnerabilities +* **Green:** **No** vulnerabilities +![browse project](img/bar_chart.png) + +Move the cursor over the bar, a tooltip with summary report will be displayed. Besides showing the total number of features with vulnerabilities and the total number of features in the scanned image tag, the report also lists the counts of features with vulnerabilities of different severity levels. The completion time of the last analysis process is shown at the bottom of the tooltip. +![browse project](img/summary_tooltip.png) + +Click on the tag name link, the detail page will be opened. Besides the information about the tag, all the vulnerabilities found in the last analysis process will be listed with the related information. You can order or filter the list by columns. +![browse project](img/tag_detail.png) + +**NOTES: You can initiate the vulnerability analysis for a tag at anytime you want as long as the status is not 'Queued' or 'Scanning'.** + +**Scanning all images** + +In the **'Vulnerability'** tab of **'Configuration'** section under **'Administration'**, click on the **'SCAN NOW'** button to start the analysis process for all the existing images. + +**NOTES: The scanning process is executed via multiple concurrent asynchronous tasks. There is no guarantee on the order of scanning or the returned results.** +![browse project](img/scan_all.png) + +To avoid frequently triggering the resource intensive scanning process, the availability of the button is restricted. It can be only triggered once in a predefined period. The next available time will be displayed besides the button. +![browse project](img/scan_all2.png) + +**Scheduled Scan by Policy** + +You can set policies to control the vulnerability analysis process. Currently, two options are available: +* **None:** No policy is selected. +* **Daily:** Policy is activated daily. It means an analysis job is scheduled to be executed at the specified time everyday. The scheduled job will scan all the images in Harbor. +![browse project](img/scan_policy.png) + +**NOTES: Once the scheduled job is executed, the completion time of scanning all images will be updated accordingly. Please be aware that the completion time of the images may be different because the execution of analysis for each image may be carried out at different time.** + +### Pull image from Harbor in Kubernetes +Kubernetes users can easily deploy pods with images stored in Harbor. The settings are similar to that of another private registry. There are two major issues: + +1. When your Harbor instance is hosting http and the certificate is self signed. You need to modify daemon.json on each work node of your cluster, for details please refer to: https://docs.docker.com/registry/insecure/#deploy-a-plain-http-registry +2. If your pod references an image under private project, you need to create a secret with the credentials of user who has permission to pull image from this project, for details refer to: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + +## Manage Helm Charts +[Helm](https://helm.sh) is a package manager for [Kubernetes](https://kubernetes.io). Helm uses a packaging format called [charts](https://docs.helm.sh/developing_charts). Since version 1.6.0 Harbor is now a composite cloud-native registry which supports both container image management and Helm charts management. Access to Helm charts in Harbor is controlled by [role-based access controls (RBAC)](https://en.wikipedia.org/wiki/Role-based_access_control) and is restricted by projects. + +### Manage Helm Charts via portal +#### List charts +Click your project to enter the project detail page after successful logging in. The existing helm charts will be listed under the tab `Helm Charts` which is beside the image `Repositories` tab with the following information: +* Name of helm chart +* The status of the chart: Active or Deprecated +* The count of chart versions +* The created time of the chart + +![list charts](img/chartrepo/list_charts.png) + +You can click the icon buttons on the top right to switch views between card view and list view. + +#### Upload new chart +Click the `UPLOAD` button on the top left to open the chart uploading dialog. Choose the uploading chart from your filesystem. Click the `UPLOAD` button to upload it to the chart repository server. + +![upload charts](img/chartrepo/upload_charts.png) + +If the chart is signed, you can choose the corresponding provenance file from your filesystem and Click the `UPLOAD` button to upload them together at once. + +If the chart is successfully uploaded, it will be displayed in the chart list at once. + +#### List chart versions +Clicking the chart name from the chart list will show all the available versions of that chart with the following information: +* the chart version number +* the maintainers of the chart version +* the template engine used (default is gotpl) +* the created timestamp of the chart version + +![list charts versions](img/chartrepo/list_chart_versions.png) + +Obviously, there will be at least 1 version for each of the charts in the top chart list. Same with chart list view, you can also click the icon buttons on the top right to switch views between card view and list view. + +Check the checkbox at the 1st column to select the specified chart versions: +* Click the `DELETE` button to delete all the selected chart versions from the chart repository server. Batch operation is supported. +* Click the `DOWNLOAD` button to download the chart artifact file. Batch operation is not supported. +* Click the `UPLOAD` button to upload the new chart version for the current chart + +#### Adding labels to/remove labels from chart versions +Users who have system administrator, project administrator or project developer role can click the `ADD LABELS` button to add labels to or remove labels from chart versions. + +![add labels to chart versions](img/chartrepo/add_labesl_to_chart_versions.png) + + +#### Filtering chart versions by labels +The chart versions can be filtered by labels: + +![filter chart versions by labels](img/chartrepo/filter_chart_versions_by_label.png) + +#### View chart version details +Clicking the chart version number link will open the chart version details view. You can see more details about the specified chart version here. There are three content sections: +* **Summary:** + * readme of the chart + * overall metadata like home, created timestamp and application version + * related helm commands for reference, such as `helm add repo` and `helm install` etc. +![chart details](img/chartrepo/chart_details.png) +* **Dependencies:** + * list all the dependant sun charts with 'name', 'version' and 'repository' fields +![chart dependencies](img/chartrepo/chart_dependencies.png) +* **Values:** + * display the content from `values.yaml` file with highlight code preview + * clicking the icon buttons on the top right to switch the yaml file view to k-v value pair list view +![chart values](img/chartrepo/chart_values.png) + +Clicking the `DOWNLOAD` button on the top right will start the downloading process. + +### Working with Helm CLI +As a helm chart repository, Harbor can work smoothly with Helm CLI. About how to install Helm CLI, please refer [install helm](https://docs.helm.sh/using_helm/#installing-helm). Run command `helm version` to make sure the version of Helm CLI is v2.9.1+. +``` +helm version + +#Client: &version.Version{SemVer:"v2.9.1", GitCommit:"20adb27c7c5868466912eebdf6664e7390ebe710", GitTreeState:"clean"} +#Server: &version.Version{SemVer:"v2.9.1", GitCommit:"20adb27c7c5868466912eebdf6664e7390ebe710", GitTreeState:"clean"} +``` +#### Add harbor to the repository list +Before working, Harbor should be added into the repository list with `helm repo add` command. Two different modes are supported. +* Add Harbor as a unified single index entry point + +With this mode Helm can be made aware of all the charts located in different projects and which are accessible by the currently authenticated user. +``` +helm repo add --ca-file ca.crt --username=admin --password=Passw0rd myrepo https://xx.xx.xx.xx/chartrepo +``` +**NOTES:** Providing both ca file and cert files is caused by an issue from helm. + +* Add Harbor project as separate index entry point + +With this mode, helm can only pull charts in the specified project. +``` +helm repo add --ca-file ca.crt --username=admin --password=Passw0rd myrepo https://xx.xx.xx.xx/chartrepo/myproject +``` + +#### Push charts to the repository server by CLI +As an alternative, you can also upload charts via the CLI. It is not supported by the native helm CLI. A plugin from the community should be installed before pushing. Run `helm plugin install` to install the `push` plugin first. +``` +helm plugin install https://github.com/chartmuseum/helm-push +``` +After a successful installation, run `push` command to upload your charts: +``` +helm push --ca-file=ca.crt --username=admin --password=passw0rd chart_repo/hello-helm-0.1.0.tgz myrepo +``` +**NOTES:** `push` command does not support pushing a prov file of a signed chart yet. + +#### Install charts +Before installing, make sure your helm is correctly initialized with command `helm init` and the chart index is synchronized with command `helm repo update`. + +Search the chart with the keyword if you're not sure where it is: +``` +helm search hello + +#NAME CHART VERSION APP VERSION DESCRIPTION +#local/hello-helm 0.3.10 1.3 A Helm chart for Kubernetes +#myrepo/chart_repo/hello-helm 0.1.10 1.2 A Helm chart for Kubernetes +#myrepo/library/hello-helm 0.3.10 1.3 A Helm chart for Kubernetes +``` +Everything is ready, install the chart to your kubernetes: +``` +helm install --ca-file=ca.crt --username=admin --password=Passw0rd --version 0.1.10 repo248/chart_repo/hello-helm +``` + +For other more helm commands like how to sign a chart, please refer to the [helm doc](https://docs.helm.sh/helm/#helm). + +## Online Garbage Collection +Online Garbage Collection enables user to trigger docker registry garbage collection by clicking button on UI. + +**NOTES:** The space is not freed when the images are deleted from Harbor, Garbage Collection is the task to free up the space by removing blobs from the filesystem when they are no longer referenced by a manifest. + +For more information about Garbage Collection, please see [Garbage Collection](https://github.com/docker/docker.github.io/blob/master/registry/garbage-collection.md). + +### Setting up Garbage Collection +If you are a system admin, you can trigger garbage collection by clicking "GC Now" in the **'Garbage Collection'** tab of **'Configuration'** section under **'Administration'**. + +![browse project](img/gc_now.png) +**NOTES:** Harbor is put into read-only mode when to execute Garbage Collection, and any modification on docker registry is prohibited. + +To avoid frequently triggering the garbage collection process, the availability of the button is restricted. It can be only triggered once in one minute. +![browse project](img/gc_now2.png) + +**Scheduled Garbage Collection by Policy** +* **None:** No policy is selected. +* **Daily:** Policy is activated daily. It means an analysis job is scheduled to be executed at the specified time everyday. The scheduled job will do garbage collection in Harbor. +* **Weekly:** Policy is activated weekly. It means an analysis job is scheduled to be executed at the specified time every week. The scheduled job will do garbage collection in Harbor. +Once the policy has been configured, you have the option to save the schedule. +![browse project](img/gc_policy.png) + +### Garbage Collection history +If you are a system admin, you can view the latest 10 records of garbage collection execution. +![browse project](img/gc_history.png) + +You can click on the 'details' link to view the related logs. +![browse project](img/gc_details.png) + +## Build history + +Build history make it easy to see the contents of a container image, find the code which bulids an image, or locate the image for a source repository. + +In Harbor portal, enter your project, select the repository, click on the link of tag name you'd like to see its build history, the detail page will be opened. Then switch to `Build History` tab, you can see the build history information. + +![build_ history](img/build_history.png) + +## Using OIDC CLI secret + +Having authenticated via OIDC SSO and onboarded to Harbor, you can use Docker/Helm CLI to access Harbor to read/write the artifacts. +As the CLI cannot handle redirection for SSO, we introduced `CLI secret`, which is only available when Harbor's authentication mode +is configured to OIDC based. +After logging into Harbor, click the drop down list to view user's profile: +![user_profile](img/user_profile.png) + +You can copy your CLI secret via the dialog of profile: +![profile_dlg](img/profile_dlg.png) + +After that you can authenticate using your user name in Harbor that you set during onboard process, and CLI secret as the password +with Docker/Helm CLI, for example: +```sh +docker login -u testuser -p xxxxxx jt-test.local.goharbor.io + +``` + +When you click the "..." icon in the profile dialog, a button for generating new CLI secret will appear, and you can generate a new +CLI secret by clicking this button. Please be reminded one user can only have one CLI secret, so when a new secret is generated, the +old one becomes invalid at once. + +**NOTE**: +Under the hood the CLI secret is associated with the ID token, and Harbor will try to refresh the token, so the CLI secret will +be valid after th ID token expires. However, if the OIDC Provider does not provide refresh token or the refresh fails for some +reason, the CLI secret will become invalid. In that case you can logout and login Harbor via SSO flow again so Harbor can get a +new ID token and the CLI secret will work again. + + +## Robot Account +Robot Accounts are accounts created by project admins that are intended for automated operations. They have the following limitations: + +1, Robot Accounts cannot login Harbor portal +2, Robot Accounts can only perform operations by using the Docker and Helm CLIs. + +### Add a Robot Account +If you are a project admin, you can create a Robot Account by clicking "New Robot Account" in the `Robot Accounts` tab of a project, and enter a name, a description, and grant permission to the account to push and pull images and Helm charts. +![add_robot_account](img/robotaccount/add_robot_account.png) + +![add_robot_account](img/robotaccount/add_robot_account_2.png) + +> **NOTE:** The name will become `robot$` and will be used to distinguish a robot account from a normal harbor user. + +![copy_robot_account_token](img/robotaccount/copy_robot_account_token.png) +As Harbor doesn't store your account token, please make sure to copy it in the pop up dialog after creating, otherwise, there is no way to get it from Harbor. + +### Configure duration of robot account +If you are a system admin, you can configure the robot account token duration in days. +![set_robot_account_token_duration](img/robotaccount/set_robot_account_token_duration.png) + +### Authenticate with a robot account +To authenticate with a Robot Account, use `docker login` as below, + +``` +docker login harbor.io +Username: robot$accountname +Password: Thepasswordgeneratedbyprojectadmin +``` + +### Disable a robot account +If you are a project admin, you can disable a Robot Account by clicking "Disable Account" in the `Robot Accounts` tab of a project. +![disable_robot_account](img/robotaccount/disable_delete_robot_account.png) + +### Delete a robot account +If you are a project admin, you can delete a Robot Account by clicking "Delete" in the `Robot Accounts` tab of a project. +![delete_robot_account](img/robotaccount/disable_delete_robot_account.png) + +## Tag Retention Rules + +A repository can rapidly accumulate a large number of image tags, many of which might not be required after a given time or once they have been superseded by a subsequent image build. These excess tags can obviously consume large quantities of storage capacity. As a system administrator, you can define rules that govern how many tags of a given repository to retain, or for how long to retain certain tags. + +### How Tag Retention Rules Work + +You define tag retention rules on repositories, not on projects. This allows for greater granularity when defining your retention rules. As the name suggests, when you define a retention rule for a repository, you are identifying which tags to retain. You do not define rules to explicitly remove tags. Rather, when you set a rule, any tags in a repository that are not identified as being eligible for retention are discarded. + +A tag retention rule has 3 filters that are applied sequentially, as described in the following table. + +|Order|Filter|Description| +|---|---|---| +|First|Repository or repositories|Identify the repository or repositories on which to apply the rule. You can identify repositories that either have a certain name or name fragment, or that do not have that name or name fragment. Wild cards (for example `*repo`, `repo*`, and `**`) are permitted. The repository filter is applied first to mark the repositories to which to apply the retention rule. The identified repositories are earmarked for further matching based on the tag criteria. No action is taken on the nonspecified repositories at this stage.| +|Second|Quantity to retain|Set which tags to retain either by specifying a maximum number of tags, or by specifying a maximum period for which to retain tags.| +|Third|Tags to retain|Identify the tag or tags on which to apply the rule. You can identify tags that either have a certain name or name fragment, or that do not have that name or name fragment. Wild cards (for example `*tag`, `tag*`, and `**`) are permitted.| + +For information about how the `**` wildcard is applied, see https://github.com/bmatcuk/doublestar#patterns. + +#### Example 1 + +- You have 5 repositories in a project, repositories A to E. + - Repository A has 100 image tags, all of which have been pulled in the last week. + - Repositories B to E each have 6 images, none of which have been pulled in the last month. +- You set the repository filter to `**`, meaning that all repositories in the project are included. +- You set the retention policy to retain the 10 most recently pulled images in each repository. +- You set the tag filter to `**`, meaning that all tags in the repository are included. + +In this example the rule retains the 10 most recently pulled images in repository A, and all 6 of the images in each of the 4 repositories B to E. So, a total of 34 image tags are retained in the project. In other words, the rule does not treat all of the images in repositories A to E as a single pool from which to choose the 10 most recent images. So, even if the 11th to 100th tags in repository A have been pulled more recently than any of the tags in repositories B to E, all of the tags in repositories B to E are retained, because each of those repositories has fewer than 10 tags. + +#### Example 2 + +This example uses the same project and repositories as example 1, but sets the retention policy to retain the images in each repository that have been pulled in the last 7 days. + +In this case, all of the images in repository A are retained because they have been pulled in the last 7 days. None of the images in repositories B to E are retained, because none of them has been pulled in the last week. In this example, 100 images are retained, as opposed to 34 images in example 1. + +#### Tag Retention Rules and Native Docker Tag Deletion + +**WARNING**: Due to native Docker tag deletion behavior, there is an issue with the current retention policy implementation. If you have multiple tags that refer to the same SHA digest, and if a subset of these tags are marked for deletion by a configured retention policy, all of the remaining tags would also be deleted. This violates the retention policy, so in this case all of the tags are retained. This issue will be addressed in a future update release, so that tag retention policies can delete tags without deleting the digest and other shared tags. + +For example, you have following tags, listed according to their push time, and all of them refer to the same SHA digest: + +- `harbor-1.8`, pushed 8/14/2019 12:00am +- `harbor-release`, pushed 8/14/2019 03:00am +- `harbor-nightly`, pushed 8/14/2019 06:00am +- `harbor-latest`, pushed 8/14/2019 09:00am + +You configure a retention policy to retain the two latest tags that match `harbor-*`, so that `harbor-rc` and `harbor-latest` are deleted. However, since all tags refer to the same SHA digest, this policy would also delete the tags `harbor-1.8` and `harbor-release`, so all tags are retained. + +### Combining Rules on a Repository + +You can define up to 15 rules per project. You can apply multiple rules to a repository or set of repositories. When you apply multiple rules to a repository, they are applied with `OR` logic rather than with `AND` logic. In this way, there is no prioritization of application of the rules on a given repository. Rules run concurrently in the background, and the resulting sets from each rule are combined at the end of the run. + +#### Example 3 + +This example uses the same project and repositories as examples 1 and 2, but sets two rules: + +- Rule 1: Retain all of the images in each repository that have been pulled in the last 7 days. +- Rule 2: Retain a maximum number of 10 images in each repository. + +For repository A, rule 1 retains all of the images because they have all been pulled in the last week. Rule 2 retains the 10 most recently pulled images. So, since the two rules are applied with an `OR` relationship, all 100 images are retained in repository A. + +For repositories B-E, rule 1 will retain 0 images as no images are pulled in the last week. Rule 2 will retain all 6 images because 6 < 10. So, since the two rules are applied with an `OR` relationship, for repositories B-E, each repository will keep all 6 images. + +In this example, all of the images are retained. + +#### Example 4 + +This example uses a different repository to the previous examples. + +- You have a repository that has 12 tags: + + |Production|Release Candidate|Release| + |---|---|---| + |`2.1-your_repo-prod`|`2.1-your_repo-rc`|`2.1-your_repo-release`| + |`2.2-your_repo-prod`|`2.2-your_repo-rc`|`2.2-your_repo-release`| + |`3.1-your_repo-prod`|`3.1-your_repo-rc`|`3.1-your_repo-release`| + |`4.4-your_repo-prod`|`4.4-your_repo-rc`|`4.4-your_repo-release`| + +- You define three tag retention rules on this repository: + - Retain the 10 most recently pushed image tags that start with `2`. + - Retain the 10 most recently pushed image tags that end with `-prod`. + - Retain all tags that do not include `2.1-your_repo-prod`. + +In this example, the rules are applied to the following 7 tags: + +- `2.1-your_repo-rc` +- `2.1-your_repo-release` +- `2.2-your_repo-prod` +- `2.2-your_repo-rc` +- `2.2-your_repo-release` +- `3.1-your_repo-prod` +- `4.4-your_repo-prod` + +### How Tag Retention Rules Interact with Project Quotas + +The system administrator can set a maximum on the number of tags that a project can contain and the amount of storage that it can consume. For information about project quotas, see [Set Project Quotas](#set-project-quotas). + +If you set a quota on a project, this quota cannot be exceeded. The quota is applied to a project even if you set a retention rule that would exceed it. In other words, you cannot use retention rules to bypass quotas. + +### Configure Tag Retention Rules + +1. Select a project and go to the **Tag Retention** tab. + ![Tag Retention option](img/tag-retention1.png) +1. Click **Add Rule** to add a rule. +1. In the **For the repositories** drop-down menu, select **matching** or **excluding**. + ![Select repositories](img/tag-retention2.png) +1. Identify the repositories on which to apply the rule. + + You can define the repositories on which to apply the rule by entering the following information: + + - A repository name, for example `my_repo_1`. + - A comma-separated list of repository names, for example `my_repo_1,my_repo_2,your_repo_3`. + - A partial repository name with wildcards, for example `my_*`, `*_3`, or `*_repo_*`. + - `**` to apply the rule to all of the repositories in the project. + + If you selected **matching**, the rule is applied to the repositories you identified. If you selected **excluding**, the rule is applied to all of the repositories in the project except for the ones that you identified. +1. Define how many tags to retain or how the period to retain tags. + ![Select retention criteria](img/tag-retention3.png) + + |Option|Description| + |---|---| + |**retain the most recently pushed # images**|Enter the maximum number of images to retain, keeping the ones that have been pushed most recently. There is no maximum age for an image.| + |**retain the most recently pulled # images**|Enter the maximum number of images to retain, keeping only the ones that have been pulled recently. There is no maximum age for an image.| + |**retain the images pushed within the last # days**|Enter the number of days to retain images, keeping only the ones that have been pushed during this period. There is no maximum number of images.| + |**retain the images pulled within the last # days**|Enter the number of days to retain images, keeping only the ones that have been pulled during this period. There is no maximum number of images.| + |**retain always**|Always retain the images identified by this rule.| + +1. In the **Tags** drop-down menu, select **matching** or **excluding**. +1. Identify the tags on which to apply the rule. + + You can define the tags on which to apply the rule by entering the following information: + + - A tag name, for example `my_tag_1`. + - A comma-separated list of tag names, for example `my_tag_1,my_tag_2,your_tag_3`. + - A partial tag name with wildcards, for example `my_*`, `*_3`, or `*_tag_*`. + - `**` to apply the rule to all of the tags in the project. + + If you selected **matching**, the rule is applied to the tags you identified. If you selected **excluding**, the rule is applied to all of the tags in the repository except for the ones that you identified. +1. Click **Add** to save the rule. +1. (Optional) Click **Add Rule** to add more rules, up to a maximum of 15 per project. +1. (Optional) Under Schedule, click **Edit** and select how often to run the rule. + ![Select retention criteria](img/tag-retention4.png) + If you select **Custom**, enter a cron job command to schedule the rule. + + **NOTE**: If you define multiple rules, the schedule is applied to all of the rules. You cannot schedule different rules to run at different times. +1. Click **Dry Run** to test the rule or rules that you have defined. +1. Click **Run Now** to run the rule immediately. + +**WARNING**: You cannot revert a rule after you run it. It is strongly recommended to perform a dry run before you run rules. + +To modify an existing rule, click the three vertical dots next to a rule to disable, edit, or delete that rule. + +![Modify tag retention rules](img/tag-retention5.png) + +## Webhook Notifications + +If you are a project administrator, you can configure a connection from a project in Harbor to a webhook endpoint. If you configure webhooks, Harbor notifies the webhook endpoint of certain events that occur in the project. Webhooks allow you to integrate Harbor with other tools to streamline continuous integration and development processes. + +The action that is taken upon receiving a notification from a Harbor project depends on your continuous integration and development processes. For example, by configuring Harbor to send a `POST` request to a webhook listener at an endpoint of your choice, you can trigger a build and deployment of an application whenever there is a change to an image in the repository. + +### Supported Events + +You can define one webhook endpoint per project. Webhook notifications provide information about events in JSON format and are delivered by `HTTP` or `HTTPS POST` to an existing webhhook endpoint URL that you provide. The following table describes the events that trigger notifications and the contents of each notification. + +|Event|Webhook Event Type|Contents of Notification| +|---|---|---| +|Push image to registry|`IMAGE PUSH`|Repository namespace name, repository name, resource URL, tags, manifest digest, image name, push time timestamp, username of user who pushed image| +|Pull manifest from registry|`IMAGE PULL`|Repository namespace name, repository name, manifest digest, image name, pull time timestamp, username of user who pulled image| +|Delete manifest from registry|`IMAGE DELETE`|Repository namespace name, repository name, manifest digest, image name, image size, delete time timestamp, username of user who deleted image| +|Upload Helm chart to registry|`CHART PUSH`|Repository name, chart name, chart type, chart version, chart size, tag, timestamp of push, username of user who uploaded chart| +|Download Helm chart from registry|`CHART PULL`|Repository name, chart name, chart type, chart version, chart size, tag, timestamp of push, username of user who pulled chart| +|Delete Helm chart from registry|`CHART DELETE`|Repository name, chart name, chart type, chart version, chart size, tag, timestamp of delete, username of user who deleted chart| +|Image scan completed|`IMAGE SCAN COMPLETED`|Repository namespace name, repository name, tag scanned, image name, number of critical issues, number of major issues, number of minor issues, last scan status, scan completion time timestamp, vulnerability information (CVE ID, description, link to CVE, criticality, URL for any fix), username of user who performed scan| +|Image scan failed|`IMAGE SCAN FAILED`|Repository namespace name, repository name, tag scanned, image name, error that occurred, username of user who performed scan| + +#### JSON Payload Format + +The webhook notification is delivered in JSON format. The following example shows the JSON notification for a push image event: + +``` +{ + "event_type": "pushImage" + "events": [ + { + "project": "prj", + "repo_name": "repo1", + "tag": "latest", + "full_name": "prj/repo1", + "trigger_time": 158322233213, + "image_id": "9e2c9d5f44efbb6ee83aecd17a120c513047d289d142ec5738c9f02f9b24ad07", + "project_type": "Private" + } + ] +} +``` + +### Webhook Endpoint Recommendations + +The endpoint that receives the webhook should ideally have a webhook listener that is capable of interpreting the payload and acting upon the information it contains. For example, running a shell script. + +### Example Use Cases + +You can configure your continuous integration and development infrastructure so that it performs the following types of operations when it receives a webhook notification from Harbor. + +- Image push: + - Trigger a new build immediately following a push on selected repositories or tags. + - Notify services or applications that use the image that a new image is available and pull it. + - Scan the image using Clair. + - Replicate the image to remote registries. +- Image scanning: + - If a vulnerability is found, rescan the image or replicate it to another registry. + - If the scan passes, deploy the image. + +### Configure Webhooks + +1. Select a project and go to the Webhooks tab. + ![Webhooks option](img/webhooks1.png) +1. Enter the URL for your webhook endpoint listener. + ![Webhook URL](img/webhooks2.png) +1. If your webhook listener implements authentication, enter the authentication header. +1. To implement `HTTPS POST` instead of `HTTP POST`, select the **Verifiy Remote Certficate** check box. +1. Click **Test Endpoint** to make sure that Harbor can connect to the listener. +1. Click **Continue** to create the webhook. + +When you have created the webhook, you see the status of the different notifications and the timestamp of the last time each notification was triggered. You can click **Disable** to disable notifications. + +![Webhook Status](img/webhooks3.png) + +**NOTE**: You can only disable and reenable all notifications. You cannot disable and enable selected notifications. + +If a webhook notification fails to send, or if it receives an HTTP error response with a code other than `2xx`, the notification is re-sent based on the configuration that you set in `harbor.yml`. + +### Globally Enable and Disable Webhooks + +As a system administrator, you can enable and disable webhook notifications for all projects. + +1. Go to **Configuration** > **System Settings**. +1. Scroll down and check or uncheck the **Webhooks enabled** check box. + + ![Enable/disable webhooks](img/webhooks4.png) + +## API Explorer + +Harbor integrated swagger UI from 1.8. That means all apis can be invoked through UI. Normally, user have 2 ways to navigate to API Explorer. + +1. User can login harbor, and click the "API EXPLORER" button.All apis will be invoked with current user authorization. +![navigation bar](img/api_explorer_btn.png) + + +2. User can navigate to swagger page by ip address by router "devcenter". For example: https://10.192.111.118/devcenter. After go to the page, need to click "authorize" button to give basic authentication to all apis. All apis will be invoked with the authorized user authorization. +![authentication](img/authorize.png) + + diff --git a/make/checkenv.sh b/make/checkenv.sh index 4c82c5522..df143477d 100755 --- a/make/checkenv.sh +++ b/make/checkenv.sh @@ -1,50 +1,6 @@ -#/bin/bash +#!/bin/bash -#docker version: 1.11.2 -#docker-compose version: 1.7.1 -#Harbor version: 0.4.5+ -set +e set -o noglob - -# -# Set Colors -# - -bold=$(tput bold) -underline=$(tput sgr 0 1) -reset=$(tput sgr0) - -red=$(tput setaf 1) -green=$(tput setaf 76) -white=$(tput setaf 7) -tan=$(tput setaf 202) -blue=$(tput setaf 25) - -# -# Headers and Logging -# - -underline() { printf "${underline}${bold}%s${reset}\n" "$@" -} -h1() { printf "\n${underline}${bold}${blue}%s${reset}\n" "$@" -} -h2() { printf "\n${underline}${bold}${white}%s${reset}\n" "$@" -} -debug() { printf "${white}%s${reset}\n" "$@" -} -info() { printf "${white}➜ %s${reset}\n" "$@" -} -success() { printf "${green}✔ %s${reset}\n" "$@" -} -error() { printf "${red}✖ %s${reset}\n" "$@" -} -warn() { printf "${tan}➜ %s${reset}\n" "$@" -} -bold() { printf "${bold}%s${reset}\n" "$@" -} -note() { printf "\n${underline}${bold}${blue}Note:${reset} ${blue}%s${reset}\n" "$@" -} - set -e usage=$'Checking environment for harbor build and install. Include golang, docker and docker-compose.' @@ -61,89 +17,8 @@ while [ $# -gt 0 ]; do shift || true done -function check_golang { - if ! go version &> /dev/null - then - warn "No golang package in your enviroment. You should use golang docker image build binary." - return - fi - - # docker has been installed and check its version - if [[ $(go version) =~ (([0-9]+)\.([0-9]+)([\.0-9]*)) ]] - then - golang_version=${BASH_REMATCH[1]} - golang_version_part1=${BASH_REMATCH[2]} - golang_version_part2=${BASH_REMATCH[3]} - - # the version of golang does not meet the requirement - if [ "$golang_version_part1" -lt 1 ] || ([ "$golang_version_part1" -eq 1 ] && [ "$golang_version_part2" -lt 6 ]) - then - warn "Better to upgrade golang package to 1.6.0+ or use golang docker image build binary." - return - else - note "golang version: $golang_version" - fi - else - warn "Failed to parse golang version." - return - fi -} - -function check_docker { - if ! docker --version &> /dev/null - then - error "Need to install docker(1.10.0+) first and run this script again." - exit 1 - fi - - # docker has been installed and check its version - if [[ $(docker --version) =~ (([0-9]+)\.([0-9]+)([\.0-9]*)) ]] - then - docker_version=${BASH_REMATCH[1]} - docker_version_part1=${BASH_REMATCH[2]} - docker_version_part2=${BASH_REMATCH[3]} - - # the version of docker does not meet the requirement - if [ "$docker_version_part1" -lt 1 ] || ([ "$docker_version_part1" -eq 1 ] && [ "$docker_version_part2" -lt 10 ]) - then - error "Need to upgrade docker package to 1.10.0+." - exit 1 - else - note "docker version: $docker_version" - fi - else - error "Failed to parse docker version." - exit 1 - fi -} - -function check_dockercompose { - if ! docker-compose --version &> /dev/null - then - error "Need to install docker-compose(1.7.1+) by yourself first and run this script again." - exit 1 - fi - - # docker-compose has been installed, check its version - if [[ $(docker-compose --version) =~ (([0-9]+)\.([0-9]+)([\.0-9]*)) ]] - then - docker_compose_version=${BASH_REMATCH[1]} - docker_compose_version_part1=${BASH_REMATCH[2]} - docker_compose_version_part2=${BASH_REMATCH[3]} - - # the version of docker-compose does not meet the requirement - if [ "$docker_compose_version_part1" -lt 1 ] || ([ "$docker_compose_version_part1" -eq 1 ] && [ "$docker_compose_version_part2" -lt 6 ]) - then - error "Need to upgrade docker-compose package to 1.7.1+." - exit 1 - else - note "docker-compose version: $docker_compose_version" - fi - else - error "Failed to parse docker-compose version." - exit 1 - fi -} +DIR="$(cd "$(dirname "$0")" && pwd)" +source $DIR/common.sh check_golang check_docker diff --git a/make/common.sh b/make/common.sh new file mode 100644 index 000000000..0a8a52b88 --- /dev/null +++ b/make/common.sh @@ -0,0 +1,132 @@ +#!/bin/bash +#docker version: 17.06.0+ +#docker-compose version: 1.18.0+ +#golang version: 1.12.0+ + +set +e +set -o noglob + +# +# Set Colors +# + +bold=$(tput bold) +underline=$(tput sgr 0 1) +reset=$(tput sgr0) + +red=$(tput setaf 1) +green=$(tput setaf 76) +white=$(tput setaf 7) +tan=$(tput setaf 202) +blue=$(tput setaf 25) + +# +# Headers and Logging +# + +underline() { printf "${underline}${bold}%s${reset}\n" "$@" +} +h1() { printf "\n${underline}${bold}${blue}%s${reset}\n" "$@" +} +h2() { printf "\n${underline}${bold}${white}%s${reset}\n" "$@" +} +debug() { printf "${white}%s${reset}\n" "$@" +} +info() { printf "${white}➜ %s${reset}\n" "$@" +} +success() { printf "${green}✔ %s${reset}\n" "$@" +} +error() { printf "${red}✖ %s${reset}\n" "$@" +} +warn() { printf "${tan}➜ %s${reset}\n" "$@" +} +bold() { printf "${bold}%s${reset}\n" "$@" +} +note() { printf "\n${underline}${bold}${blue}Note:${reset} ${blue}%s${reset}\n" "$@" +} + +set -e + +function check_golang { + if ! go version &> /dev/null + then + warn "No golang package in your enviroment. You should use golang docker image build binary." + return + fi + + # docker has been installed and check its version + if [[ $(go version) =~ (([0-9]+)\.([0-9]+)([\.0-9]*)) ]] + then + golang_version=${BASH_REMATCH[1]} + golang_version_part1=${BASH_REMATCH[2]} + golang_version_part2=${BASH_REMATCH[3]} + + # the version of golang does not meet the requirement + if [ "$golang_version_part1" -lt 1 ] || ([ "$golang_version_part1" -eq 1 ] && [ "$golang_version_part2" -lt 12 ]) + then + warn "Better to upgrade golang package to 1.12.0+ or use golang docker image build binary." + return + else + note "golang version: $golang_version" + fi + else + warn "Failed to parse golang version." + return + fi +} + +function check_docker { + if ! docker --version &> /dev/null + then + error "Need to install docker(17.06.0+) first and run this script again." + exit 1 + fi + + # docker has been installed and check its version + if [[ $(docker --version) =~ (([0-9]+)\.([0-9]+)([\.0-9]*)) ]] + then + docker_version=${BASH_REMATCH[1]} + docker_version_part1=${BASH_REMATCH[2]} + docker_version_part2=${BASH_REMATCH[3]} + + # the version of docker does not meet the requirement + if [ "$docker_version_part1" -lt 17 ] || ([ "$docker_version_part1" -eq 17 ] && [ "$docker_version_part2" -lt 6 ]) + then + error "Need to upgrade docker package to 17.06.0+." + exit 1 + else + note "docker version: $docker_version" + fi + else + error "Failed to parse docker version." + exit 1 + fi +} + +function check_dockercompose { + if ! docker-compose --version &> /dev/null + then + error "Need to install docker-compose(1.18.0+) by yourself first and run this script again." + exit 1 + fi + + # docker-compose has been installed, check its version + if [[ $(docker-compose --version) =~ (([0-9]+)\.([0-9]+)([\.0-9]*)) ]] + then + docker_compose_version=${BASH_REMATCH[1]} + docker_compose_version_part1=${BASH_REMATCH[2]} + docker_compose_version_part2=${BASH_REMATCH[3]} + + # the version of docker-compose does not meet the requirement + if [ "$docker_compose_version_part1" -lt 1 ] || ([ "$docker_compose_version_part1" -eq 1 ] && [ "$docker_compose_version_part2" -lt 18 ]) + then + error "Need to upgrade docker-compose package to 1.18.0+." + exit 1 + else + note "docker-compose version: $docker_compose_version" + fi + else + error "Failed to parse docker-compose version." + exit 1 + fi +} \ No newline at end of file diff --git a/make/install.sh b/make/install.sh index f8acf0f2d..ee6d88c4a 100755 --- a/make/install.sh +++ b/make/install.sh @@ -1,50 +1,11 @@ #!/bin/bash -set +e -set -o noglob - -# -# Set Colors -# - -bold=$(tput bold) -underline=$(tput sgr 0 1) -reset=$(tput sgr0) - -red=$(tput setaf 1) -green=$(tput setaf 76) -white=$(tput setaf 7) -tan=$(tput setaf 202) -blue=$(tput setaf 25) - -# -# Headers and Logging -# - -underline() { printf "${underline}${bold}%s${reset}\n" "$@" -} -h1() { printf "\n${underline}${bold}${blue}%s${reset}\n" "$@" -} -h2() { printf "\n${underline}${bold}${white}%s${reset}\n" "$@" -} -debug() { printf "${white}%s${reset}\n" "$@" -} -info() { printf "${white}➜ %s${reset}\n" "$@" -} -success() { printf "${green}✔ %s${reset}\n" "$@" -} -error() { printf "${red}✖ %s${reset}\n" "$@" -} -warn() { printf "${tan}➜ %s${reset}\n" "$@" -} -bold() { printf "${bold}%s${reset}\n" "$@" -} -note() { printf "\n${underline}${bold}${blue}Note:${reset} ${blue}%s${reset}\n" "$@" -} - set -e set +o noglob +DIR="$(cd "$(dirname "$0")" && pwd)" +source $DIR/common.sh + usage=$'Please set hostname and other necessary attributes in harbor.yml first. DO NOT use localhost or 127.0.0.1 for hostname, because Harbor needs to be accessed by external clients. Please set --with-notary if needs enable Notary in Harbor, and set ui_url_protocol/ssl_cert/ssl_cert_key in harbor.yml bacause notary must run under https. Please set --with-clair if needs enable Clair in Harbor @@ -86,62 +47,6 @@ then exit 1 fi -function check_docker { - if ! docker --version &> /dev/null - then - error "Need to install docker(17.06.0+) first and run this script again." - exit 1 - fi - - # docker has been installed and check its version - if [[ $(docker --version) =~ (([0-9]+).([0-9]+).([0-9]+)) ]] - then - docker_version=${BASH_REMATCH[1]} - docker_version_part1=${BASH_REMATCH[2]} - docker_version_part2=${BASH_REMATCH[3]} - - # the version of docker does not meet the requirement - if [ "$docker_version_part1" -lt 17 ] || ([ "$docker_version_part1" -eq 17 ] && [ "$docker_version_part2" -lt 6 ]) - then - error "Need to upgrade docker package to 17.06.0+." - exit 1 - else - note "docker version: $docker_version" - fi - else - error "Failed to parse docker version." - exit 1 - fi -} - -function check_dockercompose { - if ! docker-compose --version &> /dev/null - then - error "Need to install docker-compose(1.18.0+) by yourself first and run this script again." - exit 1 - fi - - # docker-compose has been installed, check its version - if [[ $(docker-compose --version) =~ (([0-9]+).([0-9]+).([0-9]+)) ]] - then - docker_compose_version=${BASH_REMATCH[1]} - docker_compose_version_part1=${BASH_REMATCH[2]} - docker_compose_version_part2=${BASH_REMATCH[3]} - - # the version of docker-compose does not meet the requirement - if [ "$docker_compose_version_part1" -lt 1 ] || ([ "$docker_compose_version_part1" -eq 1 ] && [ "$docker_compose_version_part2" -lt 18 ]) - then - error "Need to upgrade docker-compose package to 1.18.0+." - exit 1 - else - note "docker-compose version: $docker_compose_version" - fi - else - error "Failed to parse docker-compose version." - exit 1 - fi -} - h2 "[Step $item]: checking installation environment ..."; let item+=1 check_docker check_dockercompose diff --git a/make/migrations/postgresql/0010_1.9.0_schema.up.sql b/make/migrations/postgresql/0010_1.9.0_schema.up.sql index 5ca2c39f2..767e0e669 100644 --- a/make/migrations/postgresql/0010_1.9.0_schema.up.sql +++ b/make/migrations/postgresql/0010_1.9.0_schema.up.sql @@ -185,4 +185,4 @@ create table notification_policy ( ALTER TABLE replication_task ADD COLUMN status_revision int DEFAULT 0; DELETE FROM project_metadata WHERE deleted = TRUE; -ALTER TABLE project_metadata DROP COLUMN deleted; \ No newline at end of file +ALTER TABLE project_metadata DROP COLUMN deleted; diff --git a/make/migrations/postgresql/0011_1.9.1_schema.up.sql b/make/migrations/postgresql/0011_1.9.1_schema.up.sql new file mode 100644 index 000000000..4943b237d --- /dev/null +++ b/make/migrations/postgresql/0011_1.9.1_schema.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE harbor_user ADD COLUMN password_version varchar(16) Default 'sha256'; +UPDATE harbor_user SET password_version = 'sha1'; \ No newline at end of file diff --git a/make/migrations/postgresql/0015_1.10.0_schema.up.sql b/make/migrations/postgresql/0015_1.10.0_schema.up.sql new file mode 100644 index 000000000..44372ba93 --- /dev/null +++ b/make/migrations/postgresql/0015_1.10.0_schema.up.sql @@ -0,0 +1,47 @@ +/*Table for keeping the plug scanner registration*/ +CREATE TABLE scanner_registration +( + id SERIAL PRIMARY KEY NOT NULL, + uuid VARCHAR(64) UNIQUE NOT NULL, + url VARCHAR(256) UNIQUE NOT NULL, + name VARCHAR(128) UNIQUE NOT NULL, + description VARCHAR(1024) NULL, + auth VARCHAR(16) NOT NULL, + access_cred VARCHAR(512) NULL, + disabled BOOLEAN NOT NULL DEFAULT FALSE, + is_default BOOLEAN NOT NULL DEFAULT FALSE, + skip_cert_verify BOOLEAN NOT NULL DEFAULT FALSE, + create_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + update_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +/*Table for keeping the scan report. The report details are stored as JSON*/ +CREATE TABLE scan_report +( + id SERIAL PRIMARY KEY NOT NULL, + uuid VARCHAR(64) UNIQUE NOT NULL, + digest VARCHAR(256) NOT NULL, + registration_uuid VARCHAR(64) NOT NULL, + mime_type VARCHAR(256) NOT NULL, + job_id VARCHAR(64), + track_id VARCHAR(64), + status VARCHAR(1024) NOT NULL, + status_code INTEGER DEFAULT 0, + status_rev BIGINT DEFAULT 0, + report JSON, + start_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + end_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + UNIQUE(digest, registration_uuid, mime_type) +); + +/** Add table for immutable tag **/ +CREATE TABLE immutable_tag_rule +( + id SERIAL PRIMARY KEY NOT NULL, + project_id int NOT NULL, + tag_filter text, + enabled boolean default true NOT NULL, + creation_time timestamp default CURRENT_TIMESTAMP +); + +ALTER TABLE robot ADD COLUMN visible boolean DEFAULT true NOT NULL; \ No newline at end of file diff --git a/make/photon/Makefile b/make/photon/Makefile index 8481fd7a7..76dde92de 100644 --- a/make/photon/Makefile +++ b/make/photon/Makefile @@ -1,5 +1,5 @@ # Makefile for a harbor project -# +# # Targets: # # build: build harbor photon images @@ -109,20 +109,20 @@ _build_db: _build_portal: @echo "building portal container for photon..." - $(DOCKERBUILD) -f $(DOCKERFILEPATH_PORTAL)/$(DOCKERFILENAME_PORTAL) -t $(DOCKERIMAGENAME_PORTAL):$(VERSIONTAG) . + $(DOCKERBUILD) --build-arg npm_registry=$(NPM_REGISTRY) -f $(DOCKERFILEPATH_PORTAL)/$(DOCKERFILENAME_PORTAL) -t $(DOCKERIMAGENAME_PORTAL):$(VERSIONTAG) . @echo "Done." -_build_core: +_build_core: @echo "building core container for photon..." @$(DOCKERBUILD) -f $(DOCKERFILEPATH_CORE)/$(DOCKERFILENAME_CORE) -t $(DOCKERIMAGENAME_CORE):$(VERSIONTAG) . @echo "Done." - -_build_jobservice: + +_build_jobservice: @echo "building jobservice container for photon..." @$(DOCKERBUILD) -f $(DOCKERFILEPATH_JOBSERVICE)/$(DOCKERFILENAME_JOBSERVICE) -t $(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG) . @echo "Done." -_build_log: +_build_log: @echo "building log container for photon..." $(DOCKERBUILD) -f $(DOCKERFILEPATH_LOG)/$(DOCKERFILENAME_LOG) -t $(DOCKERIMAGENAME_LOG):$(VERSIONTAG) . @echo "Done." @@ -154,7 +154,7 @@ _build_chart_server: rm -rf $(DOCKERFILEPATH_CHART_SERVER)/binary; \ echo "Done." ; \ fi - + _build_nginx: @echo "building nginx container for photon..." @$(DOCKERBUILD) -f $(DOCKERFILEPATH_NGINX)/$(DOCKERFILENAME_NGINX) -t $(DOCKERIMAGENAME_NGINX):$(NGINXVERSION) . @@ -175,7 +175,7 @@ _build_notary: rm -rf $(DOCKERFILEPATH_NOTARY)/binary; \ echo "Done."; \ fi - + _build_registry: @if [ "$(BUILDBIN)" != "true" ] ; then \ rm -rf $(DOCKERFILEPATH_REG)/binary && mkdir -p $(DOCKERFILEPATH_REG)/binary && \ @@ -187,7 +187,7 @@ _build_registry: @chmod 655 $(DOCKERFILEPATH_REG)/binary/registry && $(DOCKERBUILD) -f $(DOCKERFILEPATH_REG)/$(DOCKERFILENAME_REG) -t $(DOCKERIMAGENAME_REG):$(REGISTRYVERSION)-$(VERSIONTAG) . @echo "Done." -_build_registryctl: +_build_registryctl: @echo "building registry controller for photon..." @$(DOCKERBUILD) -f $(DOCKERFILEPATH_REGISTRYCTL)/$(DOCKERFILENAME_REGISTRYCTL) -t $(DOCKERIMAGENAME_REGISTRYCTL):$(VERSIONTAG) . @rm -rf $(DOCKERFILEPATH_REG)/binary @@ -217,7 +217,7 @@ cleanimage: - $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_CORE):$(VERSIONTAG) - $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG) - $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_LOG):$(VERSIONTAG) - + .PHONY: clean clean: cleanimage diff --git a/make/photon/portal/Dockerfile b/make/photon/portal/Dockerfile index 9f71410f7..9dc834413 100644 --- a/make/photon/portal/Dockerfile +++ b/make/photon/portal/Dockerfile @@ -1,20 +1,26 @@ FROM node:10.15.0 as nodeportal -COPY src/portal /portal_src -COPY ./docs/swagger.yaml /portal_src -COPY ./LICENSE /portal_src - WORKDIR /build_dir -RUN cp -r /portal_src/* /build_dir \ - && ls -la \ - && apt-get update \ +ARG npm_registry=https://registry.npmjs.org +ENV NPM_CONFIG_REGISTRY=${npm_registry} + +COPY src/portal/package.json /build_dir +COPY src/portal/package-lock.json /build_dir +COPY ./docs/swagger.yaml /build_dir + +RUN apt-get update \ && apt-get install -y --no-install-recommends python-yaml=3.12-1 \ && python -c 'import sys, yaml, json; y=yaml.load(sys.stdin.read()); print json.dumps(y)' < swagger.yaml > swagger.json \ - && npm install \ + && npm install + +COPY ./LICENSE /build_dir +COPY src/portal /build_dir + +RUN ls -la \ && npm run build_lib \ && npm run link_lib \ - && npm run release + && node --max_old_space_size=8192 'node_modules/@angular/cli/bin/ng' build --prod FROM photon:2.0 diff --git a/make/photon/prepare/main.py b/make/photon/prepare/main.py index e617baebc..bec165455 100644 --- a/make/photon/prepare/main.py +++ b/make/photon/prepare/main.py @@ -29,7 +29,7 @@ old_private_key_pem_path, old_crt_path) def main(conf, with_notary, with_clair, with_chartmuseum): delfile(config_dir) - config_dict = parse_yaml_config(conf) + config_dict = parse_yaml_config(conf, with_notary=with_notary, with_clair=with_clair, with_chartmuseum=with_chartmuseum) validate(config_dict, notary_mode=with_notary) prepare_log_configs(config_dict) diff --git a/make/photon/prepare/templates/nginx/nginx.http.conf.jinja b/make/photon/prepare/templates/nginx/nginx.http.conf.jinja index 09e1f4346..e80d6e9fd 100644 --- a/make/photon/prepare/templates/nginx/nginx.http.conf.jinja +++ b/make/photon/prepare/templates/nginx/nginx.http.conf.jinja @@ -39,6 +39,10 @@ http { # disable any limits to avoid HTTP 413 for large image uploads client_max_body_size 0; + # Add extra headers + add_header X-Frame-Options DENY; + add_header Content-Security-Policy "frame-ancestors 'none'"; + # costumized location config file can place to /etc/nginx/etc with prefix harbor.http. and suffix .conf include /etc/nginx/conf.d/harbor.http.*.conf; diff --git a/make/photon/prepare/templates/nginx/nginx.https.conf.jinja b/make/photon/prepare/templates/nginx/nginx.https.conf.jinja index e4ac93078..8db15c9c3 100644 --- a/make/photon/prepare/templates/nginx/nginx.https.conf.jinja +++ b/make/photon/prepare/templates/nginx/nginx.https.conf.jinja @@ -45,7 +45,7 @@ http { ssl_certificate_key {{ssl_cert_key}}; # Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html - ssl_protocols TLSv1.1 TLSv1.2; + ssl_protocols TLSv1.2; ssl_ciphers '!aNULL:kECDH+AESGCM:ECDH+AESGCM:RSA+AESGCM:kECDH+AES:ECDH+AES:RSA+AES:'; ssl_prefer_server_ciphers on; ssl_session_cache shared:SSL:10m; @@ -56,6 +56,11 @@ http { # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) chunked_transfer_encoding on; + # Add extra headers + add_header Strict-Transport-Security "max-age=31536000; includeSubdomains; preload"; + add_header X-Frame-Options DENY; + add_header Content-Security-Policy "frame-ancestors 'none'"; + # costumized location config file can place to /etc/nginx dir with prefix harbor.https. and suffix .conf include /etc/nginx/conf.d/harbor.https.*.conf; @@ -68,8 +73,7 @@ http { # When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings. proxy_set_header X-Forwarded-Proto $scheme; - # Add Secure flag when serving HTTPS - proxy_cookie_path / "/; secure"; + proxy_cookie_path / "/; HttpOnly; Secure"; proxy_buffering off; proxy_request_buffering off; @@ -83,7 +87,9 @@ http { # When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings. proxy_set_header X-Forwarded-Proto $scheme; - + + proxy_cookie_path / "/; Secure"; + proxy_buffering off; proxy_request_buffering off; } @@ -96,6 +102,8 @@ http { # When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings. proxy_set_header X-Forwarded-Proto $scheme; + + proxy_cookie_path / "/; Secure"; proxy_buffering off; proxy_request_buffering off; @@ -109,6 +117,8 @@ http { # When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings. proxy_set_header X-Forwarded-Proto $scheme; + + proxy_cookie_path / "/; Secure"; proxy_buffering off; proxy_request_buffering off; @@ -139,6 +149,8 @@ http { # When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings. proxy_set_header X-Forwarded-Proto $scheme; + proxy_cookie_path / "/; Secure"; + proxy_buffering off; proxy_request_buffering off; } diff --git a/make/photon/prepare/utils/configs.py b/make/photon/prepare/utils/configs.py index 57f8554c7..f43dcbe87 100644 --- a/make/photon/prepare/utils/configs.py +++ b/make/photon/prepare/utils/configs.py @@ -56,7 +56,7 @@ def parse_versions(): versions = yaml.load(f) return versions -def parse_yaml_config(config_file_path): +def parse_yaml_config(config_file_path, with_notary, with_clair, with_chartmuseum): ''' :param configs: config_parser object :returns: dict of configs @@ -117,27 +117,31 @@ def parse_yaml_config(config_file_path): config_dict['harbor_db_sslmode'] = 'disable' config_dict['harbor_db_max_idle_conns'] = db_configs.get("max_idle_conns") or default_db_max_idle_conns config_dict['harbor_db_max_open_conns'] = db_configs.get("max_open_conns") or default_db_max_open_conns - # clari db - config_dict['clair_db_host'] = 'postgresql' - config_dict['clair_db_port'] = 5432 - config_dict['clair_db_name'] = 'postgres' - config_dict['clair_db_username'] = 'postgres' - config_dict['clair_db_password'] = db_configs.get("password") or '' - config_dict['clair_db_sslmode'] = 'disable' - # notary signer - config_dict['notary_signer_db_host'] = 'postgresql' - config_dict['notary_signer_db_port'] = 5432 - config_dict['notary_signer_db_name'] = 'notarysigner' - config_dict['notary_signer_db_username'] = 'signer' - config_dict['notary_signer_db_password'] = 'password' - config_dict['notary_signer_db_sslmode'] = 'disable' - # notary server - config_dict['notary_server_db_host'] = 'postgresql' - config_dict['notary_server_db_port'] = 5432 - config_dict['notary_server_db_name'] = 'notaryserver' - config_dict['notary_server_db_username'] = 'server' - config_dict['notary_server_db_password'] = 'password' - config_dict['notary_server_db_sslmode'] = 'disable' + + if with_clair: + # clair db + config_dict['clair_db_host'] = 'postgresql' + config_dict['clair_db_port'] = 5432 + config_dict['clair_db_name'] = 'postgres' + config_dict['clair_db_username'] = 'postgres' + config_dict['clair_db_password'] = db_configs.get("password") or '' + config_dict['clair_db_sslmode'] = 'disable' + + if with_notary: + # notary signer + config_dict['notary_signer_db_host'] = 'postgresql' + config_dict['notary_signer_db_port'] = 5432 + config_dict['notary_signer_db_name'] = 'notarysigner' + config_dict['notary_signer_db_username'] = 'signer' + config_dict['notary_signer_db_password'] = 'password' + config_dict['notary_signer_db_sslmode'] = 'disable' + # notary server + config_dict['notary_server_db_host'] = 'postgresql' + config_dict['notary_server_db_port'] = 5432 + config_dict['notary_server_db_name'] = 'notaryserver' + config_dict['notary_server_db_username'] = 'server' + config_dict['notary_server_db_password'] = 'password' + config_dict['notary_server_db_sslmode'] = 'disable' # Data path volume @@ -240,27 +244,30 @@ def parse_yaml_config(config_file_path): config_dict['harbor_db_sslmode'] = external_db_configs['harbor']['ssl_mode'] config_dict['harbor_db_max_idle_conns'] = external_db_configs['harbor'].get("max_idle_conns") or default_db_max_idle_conns config_dict['harbor_db_max_open_conns'] = external_db_configs['harbor'].get("max_open_conns") or default_db_max_open_conns - # clair db - config_dict['clair_db_host'] = external_db_configs['clair']['host'] - config_dict['clair_db_port'] = external_db_configs['clair']['port'] - config_dict['clair_db_name'] = external_db_configs['clair']['db_name'] - config_dict['clair_db_username'] = external_db_configs['clair']['username'] - config_dict['clair_db_password'] = external_db_configs['clair']['password'] - config_dict['clair_db_sslmode'] = external_db_configs['clair']['ssl_mode'] - # notary signer - config_dict['notary_signer_db_host'] = external_db_configs['notary_signer']['host'] - config_dict['notary_signer_db_port'] = external_db_configs['notary_signer']['port'] - config_dict['notary_signer_db_name'] = external_db_configs['notary_signer']['db_name'] - config_dict['notary_signer_db_username'] = external_db_configs['notary_signer']['username'] - config_dict['notary_signer_db_password'] = external_db_configs['notary_signer']['password'] - config_dict['notary_signer_db_sslmode'] = external_db_configs['notary_signer']['ssl_mode'] - # notary server - config_dict['notary_server_db_host'] = external_db_configs['notary_server']['host'] - config_dict['notary_server_db_port'] = external_db_configs['notary_server']['port'] - config_dict['notary_server_db_name'] = external_db_configs['notary_server']['db_name'] - config_dict['notary_server_db_username'] = external_db_configs['notary_server']['username'] - config_dict['notary_server_db_password'] = external_db_configs['notary_server']['password'] - config_dict['notary_server_db_sslmode'] = external_db_configs['notary_server']['ssl_mode'] + + if with_clair: + # clair db + config_dict['clair_db_host'] = external_db_configs['clair']['host'] + config_dict['clair_db_port'] = external_db_configs['clair']['port'] + config_dict['clair_db_name'] = external_db_configs['clair']['db_name'] + config_dict['clair_db_username'] = external_db_configs['clair']['username'] + config_dict['clair_db_password'] = external_db_configs['clair']['password'] + config_dict['clair_db_sslmode'] = external_db_configs['clair']['ssl_mode'] + if with_notary: + # notary signer + config_dict['notary_signer_db_host'] = external_db_configs['notary_signer']['host'] + config_dict['notary_signer_db_port'] = external_db_configs['notary_signer']['port'] + config_dict['notary_signer_db_name'] = external_db_configs['notary_signer']['db_name'] + config_dict['notary_signer_db_username'] = external_db_configs['notary_signer']['username'] + config_dict['notary_signer_db_password'] = external_db_configs['notary_signer']['password'] + config_dict['notary_signer_db_sslmode'] = external_db_configs['notary_signer']['ssl_mode'] + # notary server + config_dict['notary_server_db_host'] = external_db_configs['notary_server']['host'] + config_dict['notary_server_db_port'] = external_db_configs['notary_server']['port'] + config_dict['notary_server_db_name'] = external_db_configs['notary_server']['db_name'] + config_dict['notary_server_db_username'] = external_db_configs['notary_server']['username'] + config_dict['notary_server_db_password'] = external_db_configs['notary_server']['password'] + config_dict['notary_server_db_sslmode'] = external_db_configs['notary_server']['ssl_mode'] else: config_dict['external_database'] = False diff --git a/make/photon/registry/builder b/make/photon/registry/builder index 67ea71ede..48ec5b3dd 100755 --- a/make/photon/registry/builder +++ b/make/photon/registry/builder @@ -22,6 +22,13 @@ cur=$PWD TEMP=`mktemp -d /$TMPDIR/distribution.XXXXXX` git clone -b $VERSION https://github.com/docker/distribution.git $TEMP +# add patch 2879 +echo 'add patch https://github.com/docker/distribution/pull/2879 ...' +cd $TEMP +wget https://github.com/docker/distribution/pull/2879.patch +git apply 2879.patch +cd $cur + echo 'build the registry binary bases on the golang:1.11...' cp Dockerfile.binary $TEMP docker build -f $TEMP/Dockerfile.binary -t registry-golang $TEMP diff --git a/src/common/config/metadata/metadatalist.go b/src/common/config/metadata/metadatalist.go index 7106a38c6..bf0b70872 100644 --- a/src/common/config/metadata/metadatalist.go +++ b/src/common/config/metadata/metadatalist.go @@ -143,6 +143,7 @@ var ( {Name: common.OIDCEndpoint, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}}, {Name: common.OIDCCLientID, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}}, {Name: common.OIDCClientSecret, Scope: UserScope, Group: OIDCGroup, ItemType: &PasswordType{}}, + {Name: common.OIDCGroupsClaim, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}}, {Name: common.OIDCScope, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}}, {Name: common.OIDCVerifyCert, Scope: UserScope, Group: OIDCGroup, DefaultValue: "true", ItemType: &BoolType{}}, diff --git a/src/common/const.go b/src/common/const.go index 850bb8a65..45c2e2692 100755 --- a/src/common/const.go +++ b/src/common/const.go @@ -109,6 +109,7 @@ const ( OIDCCLientID = "oidc_client_id" OIDCClientSecret = "oidc_client_secret" OIDCVerifyCert = "oidc_verify_cert" + OIDCGroupsClaim = "oidc_groups_claim" OIDCScope = "oidc_scope" DefaultClairEndpoint = "http://clair:6060" @@ -125,13 +126,14 @@ const ( DefaultNotaryEndpoint = "http://notary-server:4443" LDAPGroupType = 1 HTTPGroupType = 2 + OIDCGroupType = 3 LDAPGroupAdminDn = "ldap_group_admin_dn" LDAPGroupMembershipAttribute = "ldap_group_membership_attribute" DefaultRegistryControllerEndpoint = "http://registryctl:8080" WithChartMuseum = "with_chartmuseum" ChartRepoURL = "chart_repository_url" DefaultChartRepoURL = "http://chartmuseum:9999" - DefaultPortalURL = "http://portal" + DefaultPortalURL = "http://portal:8080" DefaultRegistryCtlURL = "http://registryctl:8080" DefaultClairHealthCheckServerURL = "http://clair:6061" // Use this prefix to distinguish harbor user, the prefix contains a special character($), so it cannot be registered as a harbor user. diff --git a/src/common/dao/dao_test.go b/src/common/dao/dao_test.go index bc070245a..1725fdab4 100644 --- a/src/common/dao/dao_test.go +++ b/src/common/dao/dao_test.go @@ -324,7 +324,12 @@ func TestResetUserPassword(t *testing.T) { t.Errorf("Error occurred in UpdateUserResetUuid: %v", err) } - err = ResetUserPassword(models.User{UserID: currentUser.UserID, Password: "HarborTester12345", ResetUUID: uuid, Salt: currentUser.Salt}) + err = ResetUserPassword( + models.User{ + UserID: currentUser.UserID, + PasswordVersion: utils.SHA256, + ResetUUID: uuid, + Salt: currentUser.Salt}, "HarborTester12345") if err != nil { t.Errorf("Error occurred in ResetUserPassword: %v", err) } @@ -346,7 +351,12 @@ func TestChangeUserPassword(t *testing.T) { t.Errorf("Error occurred when get user salt") } currentUser.Salt = query.Salt - err = ChangeUserPassword(models.User{UserID: currentUser.UserID, Password: "NewHarborTester12345", Salt: currentUser.Salt}) + err = ChangeUserPassword( + models.User{ + UserID: currentUser.UserID, + Password: "NewHarborTester12345", + PasswordVersion: utils.SHA256, + Salt: currentUser.Salt}) if err != nil { t.Errorf("Error occurred in ChangeUserPassword: %v", err) } diff --git a/src/common/dao/project_blob.go b/src/common/dao/project_blob.go index 6191cd26e..e6f2e47e8 100644 --- a/src/common/dao/project_blob.go +++ b/src/common/dao/project_blob.go @@ -54,7 +54,7 @@ func AddBlobsToProject(projectID int64, blobs ...*models.Blob) (int64, error) { }) } - cnt, err := GetOrmer().InsertMulti(10, projectBlobs) + cnt, err := GetOrmer().InsertMulti(100, projectBlobs) if err != nil { if strings.Contains(err.Error(), "duplicate key value violates unique constraint") { return cnt, ErrDupRows @@ -121,7 +121,7 @@ func CountSizeOfProject(pid int64) (int64, error) { var blobs []models.Blob sql := ` -SELECT +SELECT DISTINCT bb.digest, bb.id, bb.content_type, @@ -132,7 +132,7 @@ JOIN artifact_blob afnb ON af.digest = afnb.digest_af JOIN BLOB bb ON afnb.digest_blob = bb.digest -WHERE af.project_id = ? +WHERE af.project_id = ? AND bb.content_type != ? ` _, err := GetOrmer().Raw(sql, pid, common.ForeignLayer).QueryRows(&blobs) @@ -152,7 +152,7 @@ AND bb.content_type != ? func RemoveUntaggedBlobs(pid int64) error { var blobs []models.Blob sql := ` -SELECT +SELECT DISTINCT bb.digest, bb.id, bb.content_type, @@ -163,7 +163,7 @@ JOIN artifact_blob afnb ON af.digest = afnb.digest_af JOIN BLOB bb ON afnb.digest_blob = bb.digest -WHERE af.project_id = ? +WHERE af.project_id = ? ` _, err := GetOrmer().Raw(sql, pid).QueryRows(&blobs) if len(blobs) == 0 { diff --git a/src/common/dao/project_blob_test.go b/src/common/dao/project_blob_test.go index 30302c315..67f91309c 100644 --- a/src/common/dao/project_blob_test.go +++ b/src/common/dao/project_blob_test.go @@ -49,19 +49,20 @@ func TestAddBlobsToProject(t *testing.T) { OwnerID: 1, }) require.Nil(t, err) + defer DeleteProject(pid) - for i := 0; i < 88888; i++ { + blobsCount := 88888 + for i := 0; i < blobsCount; i++ { blob := &models.Blob{ + ID: int64(100000 + i), // Use fake id to speed this test Digest: digest.FromString(utils.GenerateRandomString()).String(), Size: 100, } - _, err := AddBlob(blob) - require.Nil(t, err) blobs = append(blobs, blob) } cnt, err := AddBlobsToProject(pid, blobs...) require.Nil(t, err) - require.Equal(t, cnt, int64(88888)) + require.Equal(t, cnt, int64(blobsCount)) } func TestHasBlobInProject(t *testing.T) { diff --git a/src/common/dao/register.go b/src/common/dao/register.go index 7f3062153..fa6a8ac91 100644 --- a/src/common/dao/register.go +++ b/src/common/dao/register.go @@ -29,10 +29,10 @@ func Register(user models.User) (int64, error) { now := time.Now() salt := utils.GenerateRandomString() sql := `insert into harbor_user - (username, password, realname, email, comment, salt, sysadmin_flag, creation_time, update_time) - values (?, ?, ?, ?, ?, ?, ?, ?, ?) RETURNING user_id` + (username, password, password_version, realname, email, comment, salt, sysadmin_flag, creation_time, update_time) + values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) RETURNING user_id` var userID int64 - err := o.Raw(sql, user.Username, utils.Encrypt(user.Password, salt), user.Realname, user.Email, + err := o.Raw(sql, user.Username, utils.Encrypt(user.Password, salt, utils.SHA256), utils.SHA256, user.Realname, user.Email, user.Comment, salt, user.HasAdminRole, now, now).QueryRow(&userID) if err != nil { return 0, err diff --git a/src/common/dao/robot.go b/src/common/dao/robot.go deleted file mode 100644 index 0d8b5c7f1..000000000 --- a/src/common/dao/robot.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright Project Harbor Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dao - -import ( - "github.com/astaxie/beego/orm" - "github.com/goharbor/harbor/src/common/models" - "strings" - "time" -) - -// AddRobot ... -func AddRobot(robot *models.Robot) (int64, error) { - now := time.Now() - robot.CreationTime = now - robot.UpdateTime = now - id, err := GetOrmer().Insert(robot) - if err != nil { - if strings.Contains(err.Error(), "duplicate key value violates unique constraint") { - return 0, ErrDupRows - } - return 0, err - } - return id, nil -} - -// GetRobotByID ... -func GetRobotByID(id int64) (*models.Robot, error) { - robot := &models.Robot{ - ID: id, - } - if err := GetOrmer().Read(robot); err != nil { - if err == orm.ErrNoRows { - return nil, nil - } - return nil, err - } - - return robot, nil -} - -// ListRobots list robots according to the query conditions -func ListRobots(query *models.RobotQuery) ([]*models.Robot, error) { - qs := getRobotQuerySetter(query).OrderBy("Name") - if query != nil { - if query.Size > 0 { - qs = qs.Limit(query.Size) - if query.Page > 0 { - qs = qs.Offset((query.Page - 1) * query.Size) - } - } - } - robots := []*models.Robot{} - _, err := qs.All(&robots) - return robots, err -} - -func getRobotQuerySetter(query *models.RobotQuery) orm.QuerySeter { - qs := GetOrmer().QueryTable(&models.Robot{}) - - if query == nil { - return qs - } - - if len(query.Name) > 0 { - if query.FuzzyMatchName { - qs = qs.Filter("Name__icontains", query.Name) - } else { - qs = qs.Filter("Name", query.Name) - } - } - if query.ProjectID != 0 { - qs = qs.Filter("ProjectID", query.ProjectID) - } - return qs -} - -// CountRobot ... -func CountRobot(query *models.RobotQuery) (int64, error) { - return getRobotQuerySetter(query).Count() -} - -// UpdateRobot ... -func UpdateRobot(robot *models.Robot) error { - robot.UpdateTime = time.Now() - _, err := GetOrmer().Update(robot) - return err -} - -// DeleteRobot ... -func DeleteRobot(id int64) error { - _, err := GetOrmer().QueryTable(&models.Robot{}).Filter("ID", id).Delete() - return err -} diff --git a/src/common/dao/robot_test.go b/src/common/dao/robot_test.go deleted file mode 100644 index 0ffbcf081..000000000 --- a/src/common/dao/robot_test.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright Project Harbor Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dao - -import ( - "testing" - - "github.com/goharbor/harbor/src/common/models" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestAddRobot(t *testing.T) { - robotName := "test1" - robot := &models.Robot{ - Name: robotName, - Description: "test1 description", - ProjectID: 1, - } - - // add - id, err := AddRobot(robot) - require.Nil(t, err) - robot.ID = id - - require.Nil(t, err) - assert.NotNil(t, id) - -} - -func TestGetRobot(t *testing.T) { - robotName := "test2" - robot := &models.Robot{ - Name: robotName, - Description: "test2 description", - ProjectID: 1, - } - - // add - id, err := AddRobot(robot) - require.Nil(t, err) - robot.ID = id - - robot, err = GetRobotByID(id) - require.Nil(t, err) - assert.Equal(t, robotName, robot.Name) - -} - -func TestListRobots(t *testing.T) { - robotName := "test3" - robot := &models.Robot{ - Name: robotName, - Description: "test3 description", - ProjectID: 1, - } - - _, err := AddRobot(robot) - require.Nil(t, err) - - robots, err := ListRobots(&models.RobotQuery{ - ProjectID: 1, - }) - require.Nil(t, err) - assert.Equal(t, 3, len(robots)) - -} - -func TestDisableRobot(t *testing.T) { - robotName := "test4" - robot := &models.Robot{ - Name: robotName, - Description: "test4 description", - ProjectID: 1, - } - - // add - id, err := AddRobot(robot) - require.Nil(t, err) - - // Disable - robot.Disabled = true - err = UpdateRobot(robot) - require.Nil(t, err) - - // Get - robot, err = GetRobotByID(id) - require.Nil(t, err) - assert.Equal(t, true, robot.Disabled) - -} - -func TestEnableRobot(t *testing.T) { - robotName := "test5" - robot := &models.Robot{ - Name: robotName, - Description: "test5 description", - Disabled: true, - ProjectID: 1, - } - - // add - id, err := AddRobot(robot) - require.Nil(t, err) - - // Disable - robot.Disabled = false - err = UpdateRobot(robot) - require.Nil(t, err) - - // Get - robot, err = GetRobotByID(id) - require.Nil(t, err) - assert.Equal(t, false, robot.Disabled) - -} - -func TestDeleteRobot(t *testing.T) { - robotName := "test6" - robot := &models.Robot{ - Name: robotName, - Description: "test6 description", - ProjectID: 1, - } - - // add - id, err := AddRobot(robot) - require.Nil(t, err) - - // Disable - err = DeleteRobot(id) - require.Nil(t, err) - - // Get - robot, err = GetRobotByID(id) - assert.Nil(t, robot) - -} - -func TestListAllRobot(t *testing.T) { - - robots, err := ListRobots(nil) - require.Nil(t, err) - assert.Equal(t, 5, len(robots)) - -} diff --git a/src/common/dao/user.go b/src/common/dao/user.go index 535887b1e..0417b44ab 100644 --- a/src/common/dao/user.go +++ b/src/common/dao/user.go @@ -23,7 +23,6 @@ import ( "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/common/utils" - "github.com/goharbor/harbor/src/common/utils/log" ) @@ -32,7 +31,7 @@ func GetUser(query models.User) (*models.User, error) { o := GetOrmer() - sql := `select user_id, username, password, email, realname, comment, reset_uuid, salt, + sql := `select user_id, username, password, password_version, email, realname, comment, reset_uuid, salt, sysadmin_flag, creation_time, update_time from harbor_user u where deleted = false ` @@ -76,9 +75,9 @@ func GetUser(query models.User) (*models.User, error) { // LoginByDb is used for user to login with database auth mode. func LoginByDb(auth models.AuthModel) (*models.User, error) { + var users []models.User o := GetOrmer() - var users []models.User n, err := o.Raw(`select * from harbor_user where (username = ? or email = ?) and deleted = false`, auth.Principal, auth.Principal).QueryRows(&users) if err != nil { @@ -90,12 +89,10 @@ func LoginByDb(auth models.AuthModel) (*models.User, error) { user := users[0] - if user.Password != utils.Encrypt(auth.Password, user.Salt) { + if !matchPassword(&user, auth.Password) { return nil, nil } - user.Password = "" // do not return the password - return &user, nil } @@ -165,23 +162,34 @@ func ToggleUserAdminRole(userID int, hasAdmin bool) error { func ChangeUserPassword(u models.User) error { u.UpdateTime = time.Now() u.Salt = utils.GenerateRandomString() - u.Password = utils.Encrypt(u.Password, u.Salt) - _, err := GetOrmer().Update(&u, "Password", "Salt", "UpdateTime") + u.Password = utils.Encrypt(u.Password, u.Salt, utils.SHA256) + var err error + if u.PasswordVersion == utils.SHA1 { + u.PasswordVersion = utils.SHA256 + _, err = GetOrmer().Update(&u, "Password", "PasswordVersion", "Salt", "UpdateTime") + } else { + _, err = GetOrmer().Update(&u, "Password", "Salt", "UpdateTime") + } return err } // ResetUserPassword ... -func ResetUserPassword(u models.User) error { - o := GetOrmer() - r, err := o.Raw(`update harbor_user set password=?, reset_uuid=? where reset_uuid=?`, utils.Encrypt(u.Password, u.Salt), "", u.ResetUUID).Exec() +func ResetUserPassword(u models.User, rawPassword string) error { + var rowsAffected int64 + var err error + u.UpdateTime = time.Now() + u.Password = utils.Encrypt(rawPassword, u.Salt, utils.SHA256) + u.ResetUUID = "" + if u.PasswordVersion == utils.SHA1 { + u.PasswordVersion = utils.SHA256 + rowsAffected, err = GetOrmer().Update(&u, "Password", "PasswordVersion", "ResetUUID", "UpdateTime") + } else { + rowsAffected, err = GetOrmer().Update(&u, "Password", "ResetUUID", "UpdateTime") + } if err != nil { return err } - count, err := r.RowsAffected() - if err != nil { - return err - } - if count == 0 { + if rowsAffected == 0 { return errors.New("no record be changed, reset password failed") } return nil @@ -282,3 +290,11 @@ func CleanUser(id int64) error { } return nil } + +// MatchPassword returns true is password matched +func matchPassword(u *models.User, password string) bool { + if u.Password != utils.Encrypt(password, u.Salt, u.PasswordVersion) { + return false + } + return true +} diff --git a/src/common/job/client.go b/src/common/job/client.go index 01f3c18e2..3c5e060ac 100644 --- a/src/common/job/client.go +++ b/src/common/job/client.go @@ -6,6 +6,7 @@ import ( "fmt" "io/ioutil" "net/http" + "regexp" "strings" commonhttp "github.com/goharbor/harbor/src/common/http" @@ -18,7 +19,9 @@ import ( var ( // GlobalClient is an instance of the default client that can be used globally // Notes: the client needs to be initialized before can be used - GlobalClient Client + GlobalClient Client + statusBehindErrorPattern = "mismatch job status for stopping job: .*, job status (.*) is behind Running" + statusBehindErrorReg = regexp.MustCompile(statusBehindErrorPattern) ) // Client wraps interface to access jobservice. @@ -30,6 +33,21 @@ type Client interface { // TODO Redirect joblog when we see there's memory issue. } +// StatusBehindError represents the error got when trying to stop a success/failed job +type StatusBehindError struct { + status string +} + +// Error returns the detail message about the error +func (s *StatusBehindError) Error() string { + return "status behind error" +} + +// Status returns the current status of the job +func (s *StatusBehindError) Status() string { + return s.status +} + // DefaultClient is the default implementation of Client interface type DefaultClient struct { endpoint string @@ -156,5 +174,25 @@ func (d *DefaultClient) PostAction(uuid, action string) error { }{ Action: action, } - return d.client.Post(url, req) + if err := d.client.Post(url, req); err != nil { + status, flag := isStatusBehindError(err) + if flag { + return &StatusBehindError{ + status: status, + } + } + return err + } + return nil +} + +func isStatusBehindError(err error) (string, bool) { + if err == nil { + return "", false + } + strs := statusBehindErrorReg.FindStringSubmatch(err.Error()) + if len(strs) != 2 { + return "", false + } + return strs[1], true } diff --git a/src/common/job/client_test.go b/src/common/job/client_test.go index 8dd208841..53dfa5fe8 100644 --- a/src/common/job/client_test.go +++ b/src/common/job/client_test.go @@ -1,11 +1,13 @@ package job import ( + "errors" + "os" + "testing" + "github.com/goharbor/harbor/src/common/job/models" "github.com/goharbor/harbor/src/common/job/test" "github.com/stretchr/testify/assert" - "os" - "testing" ) var ( @@ -62,3 +64,20 @@ func TestPostAction(t *testing.T) { err2 := testClient.PostAction(ID, "stop") assert.Nil(err2) } + +func TestIsStatusBehindError(t *testing.T) { + // nil error + status, flag := isStatusBehindError(nil) + assert.False(t, flag) + + // not status behind error + err := errors.New("not status behind error") + status, flag = isStatusBehindError(err) + assert.False(t, flag) + + // status behind error + err = errors.New("mismatch job status for stopping job: 9feedf9933jffs, job status Error is behind Running") + status, flag = isStatusBehindError(err) + assert.True(t, flag) + assert.Equal(t, "Error", status) +} diff --git a/src/common/models/base.go b/src/common/models/base.go index de04d0285..c08b0f37c 100644 --- a/src/common/models/base.go +++ b/src/common/models/base.go @@ -35,7 +35,6 @@ func init() { new(UserGroup), new(AdminJob), new(JobLog), - new(Robot), new(OIDCUser), new(NotificationPolicy), new(NotificationJob), diff --git a/src/common/models/config.go b/src/common/models/config.go index dfd13d4bb..3f22e1b94 100644 --- a/src/common/models/config.go +++ b/src/common/models/config.go @@ -82,6 +82,7 @@ type OIDCSetting struct { VerifyCert bool `json:"verify_cert"` ClientID string `json:"client_id"` ClientSecret string `json:"client_secret"` + GroupsClaim string `json:"groups_claim"` RedirectURL string `json:"redirect_url"` Scope []string `json:"scope"` } diff --git a/src/common/models/repo.go b/src/common/models/repo.go index 9993fbcc6..6562e9531 100644 --- a/src/common/models/repo.go +++ b/src/common/models/repo.go @@ -54,11 +54,11 @@ type RepositoryQuery struct { // TagResp holds the information of one image tag type TagResp struct { TagDetail - Signature *model.Target `json:"signature"` - ScanOverview *ImgScanOverview `json:"scan_overview,omitempty"` - Labels []*Label `json:"labels"` - PushTime time.Time `json:"push_time"` - PullTime time.Time `json:"pull_time"` + Signature *model.Target `json:"signature"` + ScanOverview map[string]interface{} `json:"scan_overview,omitempty"` + Labels []*Label `json:"labels"` + PushTime time.Time `json:"push_time"` + PullTime time.Time `json:"pull_time"` } // TagDetail ... diff --git a/src/common/models/user.go b/src/common/models/user.go index c4299869f..c47ad03b8 100644 --- a/src/common/models/user.go +++ b/src/common/models/user.go @@ -23,14 +23,15 @@ const UserTable = "harbor_user" // User holds the details of a user. type User struct { - UserID int `orm:"pk;auto;column(user_id)" json:"user_id"` - Username string `orm:"column(username)" json:"username"` - Email string `orm:"column(email)" json:"email"` - Password string `orm:"column(password)" json:"password"` - Realname string `orm:"column(realname)" json:"realname"` - Comment string `orm:"column(comment)" json:"comment"` - Deleted bool `orm:"column(deleted)" json:"deleted"` - Rolename string `orm:"-" json:"role_name"` + UserID int `orm:"pk;auto;column(user_id)" json:"user_id"` + Username string `orm:"column(username)" json:"username"` + Email string `orm:"column(email)" json:"email"` + Password string `orm:"column(password)" json:"password"` + PasswordVersion string `orm:"column(password_version)" json:"password_version"` + Realname string `orm:"column(realname)" json:"realname"` + Comment string `orm:"column(comment)" json:"comment"` + Deleted bool `orm:"column(deleted)" json:"deleted"` + Rolename string `orm:"-" json:"role_name"` // if this field is named as "RoleID", beego orm can not map role_id // to it. Role int `orm:"-" json:"role_id"` diff --git a/src/common/rbac/const.go b/src/common/rbac/const.go index 6cadbddef..6b850b6e9 100755 --- a/src/common/rbac/const.go +++ b/src/common/rbac/const.go @@ -49,13 +49,15 @@ const ( ResourceReplicationTask = Resource("replication-task") ResourceRepository = Resource("repository") ResourceTagRetention = Resource("tag-retention") + ResourceImmutableTag = Resource("immutable-tag") ResourceRepositoryLabel = Resource("repository-label") ResourceRepositoryTag = Resource("repository-tag") ResourceRepositoryTagLabel = Resource("repository-tag-label") ResourceRepositoryTagManifest = Resource("repository-tag-manifest") - ResourceRepositoryTagScanJob = Resource("repository-tag-scan-job") - ResourceRepositoryTagVulnerability = Resource("repository-tag-vulnerability") + ResourceRepositoryTagScanJob = Resource("repository-tag-scan-job") // TODO: remove + ResourceRepositoryTagVulnerability = Resource("repository-tag-vulnerability") // TODO: remove ResourceRobot = Resource("robot") ResourceNotificationPolicy = Resource("notification-policy") + ResourceScan = Resource("scan") ResourceSelf = Resource("") // subresource for self ) diff --git a/src/common/rbac/project/util.go b/src/common/rbac/project/util.go index 3de3f5810..85116fe21 100644 --- a/src/common/rbac/project/util.go +++ b/src/common/rbac/project/util.go @@ -95,6 +95,11 @@ var ( {Resource: rbac.ResourceTagRetention, Action: rbac.ActionList}, {Resource: rbac.ResourceTagRetention, Action: rbac.ActionOperate}, + {Resource: rbac.ResourceImmutableTag, Action: rbac.ActionCreate}, + {Resource: rbac.ResourceImmutableTag, Action: rbac.ActionUpdate}, + {Resource: rbac.ResourceImmutableTag, Action: rbac.ActionDelete}, + {Resource: rbac.ResourceImmutableTag, Action: rbac.ActionList}, + {Resource: rbac.ResourceLabel, Action: rbac.ActionCreate}, {Resource: rbac.ResourceLabel, Action: rbac.ActionRead}, {Resource: rbac.ResourceLabel, Action: rbac.ActionUpdate}, @@ -157,6 +162,9 @@ var ( {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionDelete}, {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionList}, {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionRead}, + + {Resource: rbac.ResourceScan, Action: rbac.ActionCreate}, + {Resource: rbac.ResourceScan, Action: rbac.ActionRead}, } ) diff --git a/src/common/rbac/project/visitor_role.go b/src/common/rbac/project/visitor_role.go index 36202a602..d8d594f4f 100755 --- a/src/common/rbac/project/visitor_role.go +++ b/src/common/rbac/project/visitor_role.go @@ -68,6 +68,11 @@ var ( {Resource: rbac.ResourceTagRetention, Action: rbac.ActionList}, {Resource: rbac.ResourceTagRetention, Action: rbac.ActionOperate}, + {Resource: rbac.ResourceImmutableTag, Action: rbac.ActionCreate}, + {Resource: rbac.ResourceImmutableTag, Action: rbac.ActionUpdate}, + {Resource: rbac.ResourceImmutableTag, Action: rbac.ActionDelete}, + {Resource: rbac.ResourceImmutableTag, Action: rbac.ActionList}, + {Resource: rbac.ResourceRepositoryLabel, Action: rbac.ActionCreate}, {Resource: rbac.ResourceRepositoryLabel, Action: rbac.ActionDelete}, {Resource: rbac.ResourceRepositoryLabel, Action: rbac.ActionList}, @@ -114,6 +119,9 @@ var ( {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionDelete}, {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionList}, {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionRead}, + + {Resource: rbac.ResourceScan, Action: rbac.ActionCreate}, + {Resource: rbac.ResourceScan, Action: rbac.ActionRead}, }, "master": { @@ -153,6 +161,11 @@ var ( {Resource: rbac.ResourceTagRetention, Action: rbac.ActionList}, {Resource: rbac.ResourceTagRetention, Action: rbac.ActionOperate}, + {Resource: rbac.ResourceImmutableTag, Action: rbac.ActionCreate}, + {Resource: rbac.ResourceImmutableTag, Action: rbac.ActionUpdate}, + {Resource: rbac.ResourceImmutableTag, Action: rbac.ActionDelete}, + {Resource: rbac.ResourceImmutableTag, Action: rbac.ActionList}, + {Resource: rbac.ResourceRepositoryLabel, Action: rbac.ActionCreate}, {Resource: rbac.ResourceRepositoryLabel, Action: rbac.ActionDelete}, {Resource: rbac.ResourceRepositoryLabel, Action: rbac.ActionList}, @@ -191,6 +204,9 @@ var ( {Resource: rbac.ResourceRobot, Action: rbac.ActionList}, {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionList}, + + {Resource: rbac.ResourceScan, Action: rbac.ActionCreate}, + {Resource: rbac.ResourceScan, Action: rbac.ActionRead}, }, "developer": { @@ -241,6 +257,9 @@ var ( {Resource: rbac.ResourceRobot, Action: rbac.ActionRead}, {Resource: rbac.ResourceRobot, Action: rbac.ActionList}, + + {Resource: rbac.ResourceScan, Action: rbac.ActionCreate}, + {Resource: rbac.ResourceScan, Action: rbac.ActionRead}, }, "guest": { diff --git a/src/common/rbac/rbac.go b/src/common/rbac/rbac.go index 45d91dcfe..62f0a2d5d 100644 --- a/src/common/rbac/rbac.go +++ b/src/common/rbac/rbac.go @@ -110,6 +110,10 @@ func (p *Policy) GetEffect() string { return eft.String() } +func (p *Policy) String() string { + return p.Resource.String() + ":" + p.Action.String() + ":" + p.GetEffect() +} + // Role the interface of rbac role type Role interface { // GetRoleName returns the role identity, if empty string role's policies will be ignore diff --git a/src/common/security/robot/context.go b/src/common/security/robot/context.go index 8fc622fe0..43ebf5921 100644 --- a/src/common/security/robot/context.go +++ b/src/common/security/robot/context.go @@ -18,17 +18,18 @@ import ( "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/common/rbac" "github.com/goharbor/harbor/src/core/promgr" + "github.com/goharbor/harbor/src/pkg/robot/model" ) // SecurityContext implements security.Context interface based on database type SecurityContext struct { - robot *models.Robot + robot *model.Robot pm promgr.ProjectManager policy []*rbac.Policy } // NewSecurityContext ... -func NewSecurityContext(robot *models.Robot, pm promgr.ProjectManager, policy []*rbac.Policy) *SecurityContext { +func NewSecurityContext(robot *model.Robot, pm promgr.ProjectManager, policy []*rbac.Policy) *SecurityContext { return &SecurityContext{ robot: robot, pm: pm, diff --git a/src/common/security/robot/context_test.go b/src/common/security/robot/context_test.go index 36a8a5316..a16cd40fe 100644 --- a/src/common/security/robot/context_test.go +++ b/src/common/security/robot/context_test.go @@ -26,6 +26,7 @@ import ( "github.com/goharbor/harbor/src/common/utils/log" "github.com/goharbor/harbor/src/core/promgr" "github.com/goharbor/harbor/src/core/promgr/pmsdriver/local" + "github.com/goharbor/harbor/src/pkg/robot/model" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -96,7 +97,7 @@ func TestIsAuthenticated(t *testing.T) { assert.False(t, ctx.IsAuthenticated()) // authenticated - ctx = NewSecurityContext(&models.Robot{ + ctx = NewSecurityContext(&model.Robot{ Name: "test", Disabled: false, }, nil, nil) @@ -109,7 +110,7 @@ func TestGetUsername(t *testing.T) { assert.Equal(t, "", ctx.GetUsername()) // authenticated - ctx = NewSecurityContext(&models.Robot{ + ctx = NewSecurityContext(&model.Robot{ Name: "test", Disabled: false, }, nil, nil) @@ -122,7 +123,7 @@ func TestIsSysAdmin(t *testing.T) { assert.False(t, ctx.IsSysAdmin()) // authenticated, non admin - ctx = NewSecurityContext(&models.Robot{ + ctx = NewSecurityContext(&model.Robot{ Name: "test", Disabled: false, }, nil, nil) @@ -141,7 +142,7 @@ func TestHasPullPerm(t *testing.T) { Action: rbac.ActionPull, }, } - robot := &models.Robot{ + robot := &model.Robot{ Name: "test_robot_1", Description: "desc", } @@ -158,7 +159,7 @@ func TestHasPushPerm(t *testing.T) { Action: rbac.ActionPush, }, } - robot := &models.Robot{ + robot := &model.Robot{ Name: "test_robot_2", Description: "desc", } @@ -179,7 +180,7 @@ func TestHasPushPullPerm(t *testing.T) { Action: rbac.ActionPull, }, } - robot := &models.Robot{ + robot := &model.Robot{ Name: "test_robot_3", Description: "desc", } diff --git a/src/common/security/robot/robot.go b/src/common/security/robot/robot.go index 9bfec53a9..c900f672d 100644 --- a/src/common/security/robot/robot.go +++ b/src/common/security/robot/robot.go @@ -9,7 +9,7 @@ import ( type robot struct { username string namespace rbac.Namespace - policy []*rbac.Policy + policies []*rbac.Policy } // GetUserName get the robot name. @@ -23,7 +23,7 @@ func (r *robot) GetPolicies() []*rbac.Policy { if r.namespace.IsPublic() { policies = append(policies, project.PoliciesForPublicProject(r.namespace)...) } - policies = append(policies, r.policy...) + policies = append(policies, r.policies...) return policies } @@ -33,10 +33,30 @@ func (r *robot) GetRoles() []rbac.Role { } // NewRobot ... -func NewRobot(username string, namespace rbac.Namespace, policy []*rbac.Policy) rbac.User { +func NewRobot(username string, namespace rbac.Namespace, policies []*rbac.Policy) rbac.User { return &robot{ username: username, namespace: namespace, - policy: policy, + policies: filterPolicies(namespace, policies), } } + +func filterPolicies(namespace rbac.Namespace, policies []*rbac.Policy) []*rbac.Policy { + var results []*rbac.Policy + if len(policies) == 0 { + return results + } + + mp := map[string]bool{} + for _, policy := range project.GetAllPolicies(namespace) { + mp[policy.String()] = true + } + + for _, policy := range policies { + if mp[policy.String()] { + results = append(results, policy) + } + } + + return results +} diff --git a/src/common/security/robot/robot_test.go b/src/common/security/robot/robot_test.go index ba89fac41..617c4c3fa 100644 --- a/src/common/security/robot/robot_test.go +++ b/src/common/security/robot/robot_test.go @@ -33,10 +33,21 @@ func TestGetPolicies(t *testing.T) { robot := robot{ username: "test", namespace: rbac.NewProjectNamespace(1, false), - policy: policies, + policies: policies, } assert.Equal(t, robot.GetUserName(), "test") assert.NotNil(t, robot.GetPolicies()) assert.Nil(t, robot.GetRoles()) } + +func TestNewRobot(t *testing.T) { + policies := []*rbac.Policy{ + {Resource: "/project/1/repository", Action: "pull"}, + {Resource: "/project/library/repository", Action: "pull"}, + {Resource: "/project/library/repository", Action: "push"}, + } + + robot := NewRobot("test", rbac.NewProjectNamespace(1, false), policies) + assert.Len(t, robot.GetPolicies(), 1) +} diff --git a/src/common/utils/encrypt.go b/src/common/utils/encrypt.go index 473880843..e68da9430 100644 --- a/src/common/utils/encrypt.go +++ b/src/common/utils/encrypt.go @@ -19,25 +19,37 @@ import ( "crypto/cipher" "crypto/rand" "crypto/sha1" + "crypto/sha256" "encoding/base64" "errors" "fmt" + "hash" "io" "strings" "golang.org/x/crypto/pbkdf2" ) -// Encrypt encrypts the content with salt -func Encrypt(content string, salt string) string { - return fmt.Sprintf("%x", pbkdf2.Key([]byte(content), []byte(salt), 4096, 16, sha1.New)) -} - const ( // EncryptHeaderV1 ... EncryptHeaderV1 = "" + // SHA1 is the name of sha1 hash alg + SHA1 = "sha1" + // SHA256 is the name of sha256 hash alg + SHA256 = "sha256" ) +// HashAlg used to get correct alg for hash +var HashAlg = map[string]func() hash.Hash{ + SHA1: sha1.New, + SHA256: sha256.New, +} + +// Encrypt encrypts the content with salt +func Encrypt(content string, salt string, encrptAlg string) string { + return fmt.Sprintf("%x", pbkdf2.Key([]byte(content), []byte(salt), 4096, 16, HashAlg[encrptAlg])) +} + // ReversibleEncrypt encrypts the str with aes/base64 func ReversibleEncrypt(str, key string) (string, error) { keyBytes := []byte(key) diff --git a/src/common/utils/ldap/ldap.go b/src/common/utils/ldap/ldap.go index 512af7618..7573960b6 100644 --- a/src/common/utils/ldap/ldap.go +++ b/src/common/utils/ldap/ldap.go @@ -444,7 +444,7 @@ func createGroupSearchFilter(oldFilter, groupName, groupNameAttribute string) st func createNestedGroupFilter(userDN string) string { filter := "" - filter = "(&(objectClass=group)(member:1.2.840.113556.1.4.1941:=" + userDN + "))" + filter = "(&(objectClass=group)(member:1.2.840.113556.1.4.1941:=" + goldap.EscapeFilter(userDN) + "))" return filter } diff --git a/src/common/utils/oidc/helper.go b/src/common/utils/oidc/helper.go index 30f14e209..2d59e09ed 100644 --- a/src/common/utils/oidc/helper.go +++ b/src/common/utils/oidc/helper.go @@ -207,6 +207,45 @@ func RefreshToken(ctx context.Context, token *Token) (*Token, error) { return &Token{Token: *t, IDToken: it}, nil } +// GroupsFromToken returns the list of group name in the token, the claim of the group list is set in OIDCSetting. +// It's designed not to return errors, in case of unexpected situation it will log and return empty list. +func GroupsFromToken(token *gooidc.IDToken) []string { + if token == nil { + log.Warning("Return empty list for nil token") + return []string{} + } + setting := provider.setting.Load().(models.OIDCSetting) + if len(setting.GroupsClaim) == 0 { + log.Warning("Group claim is not set in OIDC setting returning empty group list.") + return []string{} + } + var c map[string]interface{} + err := token.Claims(&c) + if err != nil { + log.Warningf("Failed to get claims map, error: %v", err) + return []string{} + } + return groupsFromClaim(c, setting.GroupsClaim) +} + +func groupsFromClaim(claimMap map[string]interface{}, k string) []string { + var res []string + g, ok := claimMap[k].([]interface{}) + if !ok { + log.Warningf("Unable to get groups from claims, claims: %+v, groups claim key: %s", claimMap, k) + return res + } + for _, e := range g { + s, ok := e.(string) + if !ok { + log.Warningf("Element in group list is not string: %v, list: %v", e, g) + continue + } + res = append(res, s) + } + return res +} + // Conn wraps connection info of an OIDC endpoint type Conn struct { URL string `json:"url"` diff --git a/src/common/utils/oidc/helper_test.go b/src/common/utils/oidc/helper_test.go index d706836b8..8270b44f7 100644 --- a/src/common/utils/oidc/helper_test.go +++ b/src/common/utils/oidc/helper_test.go @@ -15,6 +15,7 @@ package oidc import ( + gooidc "github.com/coreos/go-oidc" "github.com/goharbor/harbor/src/common" config2 "github.com/goharbor/harbor/src/common/config" "github.com/goharbor/harbor/src/common/models" @@ -110,3 +111,50 @@ func TestTestEndpoint(t *testing.T) { assert.Nil(t, TestEndpoint(c1)) assert.NotNil(t, TestEndpoint(c2)) } + +func TestGroupsFromToken(t *testing.T) { + res := GroupsFromToken(nil) + assert.Equal(t, []string{}, res) + res = GroupsFromToken(&gooidc.IDToken{}) + assert.Equal(t, []string{}, res) +} + +func TestGroupsFromClaim(t *testing.T) { + in := map[string]interface{}{ + "user": "user1", + "groups": []interface{}{"group1", "group2"}, + "groups_2": []interface{}{"group1", "group2", 2}, + } + + m := []struct { + input map[string]interface{} + key string + expect []string + }{ + { + in, + "user", + nil, + }, + { + in, + "prg", + nil, + }, + { + in, + "groups", + []string{"group1", "group2"}, + }, + { + in, + "groups_2", + []string{"group1", "group2"}, + }, + } + + for _, tc := range m { + r := groupsFromClaim(tc.input, tc.key) + assert.Equal(t, tc.expect, r) + } +} diff --git a/src/common/utils/registry/auth/apikey.go b/src/common/utils/registry/auth/apikey.go new file mode 100644 index 000000000..1dd02b16e --- /dev/null +++ b/src/common/utils/registry/auth/apikey.go @@ -0,0 +1,45 @@ +package auth + +import ( + "fmt" + "net/http" + + "github.com/goharbor/harbor/src/common/http/modifier" +) + +type apiKeyType = string + +const ( + // APIKeyInHeader sets auth content in header + APIKeyInHeader apiKeyType = "header" + // APIKeyInQuery sets auth content in url query + APIKeyInQuery apiKeyType = "query" +) + +type apiKeyAuthorizer struct { + key, value, in apiKeyType +} + +// NewAPIKeyAuthorizer returns a apikey authorizer +func NewAPIKeyAuthorizer(key, value, in apiKeyType) modifier.Modifier { + return &apiKeyAuthorizer{ + key: key, + value: value, + in: in, + } +} + +// Modify implements modifier.Modifier +func (a *apiKeyAuthorizer) Modify(r *http.Request) error { + switch a.in { + case APIKeyInHeader: + r.Header.Set(a.key, a.value) + return nil + case APIKeyInQuery: + query := r.URL.Query() + query.Add(a.key, a.value) + r.URL.RawQuery = query.Encode() + return nil + } + return fmt.Errorf("set api key in %s is invalid", a.in) +} diff --git a/src/common/utils/registry/auth/apikey_test.go b/src/common/utils/registry/auth/apikey_test.go new file mode 100644 index 000000000..ff6ef4133 --- /dev/null +++ b/src/common/utils/registry/auth/apikey_test.go @@ -0,0 +1,50 @@ +package auth + +import ( + "net/http" + "testing" + + "github.com/goharbor/harbor/src/common/http/modifier" + "github.com/stretchr/testify/assert" +) + +func TestAPIKeyAuthorizer(t *testing.T) { + type suite struct { + key string + value string + in string + } + + var ( + s suite + authorizer modifier.Modifier + request *http.Request + err error + ) + + // set in header + s = suite{key: "Authorization", value: "Basic abc", in: "header"} + authorizer = NewAPIKeyAuthorizer(s.key, s.value, s.in) + request, err = http.NewRequest(http.MethodGet, "http://example.com", nil) + assert.Nil(t, err) + err = authorizer.Modify(request) + assert.Nil(t, err) + assert.Equal(t, s.value, request.Header.Get(s.key)) + + // set in query + s = suite{key: "private_token", value: "abc", in: "query"} + authorizer = NewAPIKeyAuthorizer(s.key, s.value, s.in) + request, err = http.NewRequest(http.MethodGet, "http://example.com", nil) + assert.Nil(t, err) + err = authorizer.Modify(request) + assert.Nil(t, err) + assert.Equal(t, s.value, request.URL.Query().Get(s.key)) + + // set in invalid location + s = suite{key: "", value: "", in: "invalid"} + authorizer = NewAPIKeyAuthorizer(s.key, s.value, s.in) + request, err = http.NewRequest(http.MethodGet, "http://example.com", nil) + assert.Nil(t, err) + err = authorizer.Modify(request) + assert.NotNil(t, err) +} diff --git a/src/common/utils/test/database.go b/src/common/utils/test/database.go index 560c950b7..970109b51 100644 --- a/src/common/utils/test/database.go +++ b/src/common/utils/test/database.go @@ -89,7 +89,6 @@ func updateUserInitialPassword(userID int, password string) error { if err != nil { return fmt.Errorf("Failed to update user encrypted password, userID: %d, err: %v", userID, err) } - } else { } return nil } diff --git a/src/common/utils/utils_test.go b/src/common/utils/utils_test.go index 437f16152..b81da95ed 100644 --- a/src/common/utils/utils_test.go +++ b/src/common/utils/utils_test.go @@ -17,6 +17,7 @@ package utils import ( "encoding/base64" "net/http/httptest" + "reflect" "strconv" "strings" "testing" @@ -91,12 +92,21 @@ func TestParseRepository(t *testing.T) { } func TestEncrypt(t *testing.T) { - content := "content" - salt := "salt" - result := Encrypt(content, salt) + tests := map[string]struct { + content string + salt string + alg string + want string + }{ + "sha1 test": {content: "content", salt: "salt", alg: SHA1, want: "dc79e76c88415c97eb089d9cc80b4ab0"}, + "sha256 test": {content: "content", salt: "salt", alg: SHA256, want: "83d3d6f3e7cacb040423adf7ced63d21"}, + } - if result != "dc79e76c88415c97eb089d9cc80b4ab0" { - t.Errorf("unexpected result: %s != %s", result, "dc79e76c88415c97eb089d9cc80b4ab0") + for name, tc := range tests { + got := Encrypt(tc.content, tc.salt, tc.alg) + if !reflect.DeepEqual(tc.want, got) { + t.Errorf("%s: expected: %v, got: %v", name, tc.want, got) + } } } diff --git a/src/core/api/admin_job.go b/src/core/api/admin_job.go index 14bdbcae6..71a486c50 100644 --- a/src/core/api/admin_job.go +++ b/src/core/api/admin_job.go @@ -62,9 +62,12 @@ func (aj *AJAPI) updateSchedule(ajr models.AdminJobReq) { // stop the scheduled job and remove it. if err = utils_core.GetJobServiceClient().PostAction(jobs[0].UUID, common_job.JobActionStop); err != nil { - if e, ok := err.(*common_http.Error); !ok || e.Code != http.StatusNotFound { - aj.SendInternalServerError(err) - return + _, ok := err.(*common_job.StatusBehindError) + if !ok { + if e, ok := err.(*common_http.Error); !ok || e.Code != http.StatusNotFound { + aj.SendInternalServerError(err) + return + } } } diff --git a/src/core/api/api_test.go b/src/core/api/api_test.go index f8e1ccdd0..ae1970ab1 100644 --- a/src/core/api/api_test.go +++ b/src/core/api/api_test.go @@ -180,6 +180,7 @@ func runCodeCheckingCases(t *testing.T, cases ...*codeCheckingCase) { if c.postFunc != nil { if err := c.postFunc(resp); err != nil { t.Logf("error in running post function: %v", err) + t.Error(err) } } } diff --git a/src/core/api/base.go b/src/core/api/base.go index 195f7f9c8..30b7623f6 100644 --- a/src/core/api/base.go +++ b/src/core/api/base.go @@ -18,6 +18,7 @@ import ( "encoding/json" "errors" "fmt" + "github.com/goharbor/harbor/src/common/models" "net/http" "github.com/ghodss/yaml" @@ -37,6 +38,7 @@ import ( const ( yamlFileContentType = "application/x-yaml" + userSessionKey = "user" ) // the managers/controllers used globally @@ -168,6 +170,12 @@ func (b *BaseController) WriteYamlData(object interface{}) { _, _ = w.Write(yData) } +// PopulateUserSession generates a new session ID and fill the user model in parm to the session +func (b *BaseController) PopulateUserSession(u models.User) { + b.SessionRegenerateID() + b.SetSession(userSessionKey, u) +} + // Init related objects/configurations for the API controllers func Init() error { registerHealthCheckers() diff --git a/src/core/api/harborapi_test.go b/src/core/api/harborapi_test.go index b6ed840b2..f60b500b4 100644 --- a/src/core/api/harborapi_test.go +++ b/src/core/api/harborapi_test.go @@ -177,7 +177,8 @@ func init() { beego.Router("/api/projects/:pid([0-9]+)/webhook/policies/test", &NotificationPolicyAPI{}, "post:Test") beego.Router("/api/projects/:pid([0-9]+)/webhook/lasttrigger", &NotificationPolicyAPI{}, "get:ListGroupByEventType") beego.Router("/api/projects/:pid([0-9]+)/webhook/jobs/", &NotificationJobAPI{}, "get:List") - + beego.Router("/api/projects/:pid([0-9]+)/immutabletagrules", &ImmutableTagRuleAPI{}, "get:List;post:Post") + beego.Router("/api/projects/:pid([0-9]+)/immutabletagrules/:id([0-9]+)", &ImmutableTagRuleAPI{}) // Charts are controlled under projects chartRepositoryAPIType := &ChartRepositoryAPI{} beego.Router("/api/chartrepo/health", chartRepositoryAPIType, "get:GetHealthStatus") @@ -206,6 +207,22 @@ func init() { beego.Router("/api/internal/switchquota", &InternalAPI{}, "put:SwitchQuota") beego.Router("/api/internal/syncquota", &InternalAPI{}, "post:SyncQuota") + // Add routes for plugin scanner management + scannerAPI := &ScannerAPI{} + beego.Router("/api/scanners", scannerAPI, "post:Create;get:List") + beego.Router("/api/scanners/:uuid", scannerAPI, "get:Get;delete:Delete;put:Update;patch:SetAsDefault") + beego.Router("/api/scanners/:uuid/metadata", scannerAPI, "get:Metadata") + beego.Router("/api/scanners/ping", scannerAPI, "post:Ping") + + // Add routes for project level scanner + proScannerAPI := &ProjectScannerAPI{} + beego.Router("/api/projects/:pid([0-9]+)/scanner", proScannerAPI, "get:GetProjectScanner;put:SetProjectScanner") + + // Add routes for scan + scanAPI := &ScanAPI{} + beego.Router("/api/repositories/*/tags/:tag/scan", scanAPI, "post:Scan;get:Report") + beego.Router("/api/repositories/*/tags/:tag/scan/:uuid/log", scanAPI, "get:Log") + // syncRegistry if err := SyncRegistry(config.GlobalProjectMgr); err != nil { log.Fatalf("failed to sync repositories from registry: %v", err) diff --git a/src/core/api/immutabletagrule.go b/src/core/api/immutabletagrule.go new file mode 100644 index 000000000..09163d641 --- /dev/null +++ b/src/core/api/immutabletagrule.go @@ -0,0 +1,135 @@ +package api + +import ( + "errors" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/goharbor/harbor/src/common/rbac" + "github.com/goharbor/harbor/src/pkg/immutabletag" + "github.com/goharbor/harbor/src/pkg/immutabletag/model" +) + +// ImmutableTagRuleAPI ... +type ImmutableTagRuleAPI struct { + BaseController + ctr immutabletag.APIController + projectID int64 + ID int64 +} + +// Prepare validates the user and projectID +func (itr *ImmutableTagRuleAPI) Prepare() { + itr.BaseController.Prepare() + if !itr.SecurityCtx.IsAuthenticated() { + itr.SendUnAuthorizedError(errors.New("Unauthorized")) + return + } + + pid, err := itr.GetInt64FromPath(":pid") + if err != nil || pid <= 0 { + text := "invalid project ID: " + if err != nil { + text += err.Error() + } else { + text += fmt.Sprintf("%d", pid) + } + itr.SendBadRequestError(errors.New(text)) + return + } + itr.projectID = pid + + ruleID, err := itr.GetInt64FromPath(":id") + if err == nil || ruleID > 0 { + itr.ID = ruleID + } + + itr.ctr = immutabletag.ImmuCtr + + if strings.EqualFold(itr.Ctx.Request.Method, "get") { + if !itr.requireAccess(rbac.ActionList) { + return + } + } else if strings.EqualFold(itr.Ctx.Request.Method, "put") { + if !itr.requireAccess(rbac.ActionUpdate) { + return + } + } else if strings.EqualFold(itr.Ctx.Request.Method, "post") { + if !itr.requireAccess(rbac.ActionCreate) { + return + } + + } else if strings.EqualFold(itr.Ctx.Request.Method, "delete") { + if !itr.requireAccess(rbac.ActionDelete) { + return + } + } +} + +func (itr *ImmutableTagRuleAPI) requireAccess(action rbac.Action) bool { + return itr.RequireProjectAccess(itr.projectID, action, rbac.ResourceImmutableTag) +} + +// List list all immutable tag rules of current project +func (itr *ImmutableTagRuleAPI) List() { + rules, err := itr.ctr.ListImmutableRules(itr.projectID) + if err != nil { + itr.SendInternalServerError(err) + return + } + itr.WriteJSONData(rules) +} + +// Post create immutable tag rule +func (itr *ImmutableTagRuleAPI) Post() { + ir := &model.Metadata{} + isValid, err := itr.DecodeJSONReqAndValidate(ir) + if !isValid { + itr.SendBadRequestError(err) + return + } + ir.ProjectID = itr.projectID + id, err := itr.ctr.CreateImmutableRule(ir) + if err != nil { + itr.SendInternalServerError(err) + return + } + itr.Redirect(http.StatusCreated, strconv.FormatInt(id, 10)) + +} + +// Delete delete immutable tag rule +func (itr *ImmutableTagRuleAPI) Delete() { + if itr.ID <= 0 { + itr.SendBadRequestError(fmt.Errorf("invalid immutable rule id %d", itr.ID)) + return + } + err := itr.ctr.DeleteImmutableRule(itr.ID) + if err != nil { + itr.SendInternalServerError(err) + return + } +} + +// Put update an immutable tag rule +func (itr *ImmutableTagRuleAPI) Put() { + ir := &model.Metadata{} + if err := itr.DecodeJSONReq(ir); err != nil { + itr.SendBadRequestError(err) + return + } + ir.ID = itr.ID + ir.ProjectID = itr.projectID + + if itr.ID <= 0 { + itr.SendBadRequestError(fmt.Errorf("invalid immutable rule id %d", itr.ID)) + return + } + + if err := itr.ctr.UpdateImmutableRule(itr.projectID, ir); err != nil { + itr.SendInternalServerError(err) + return + } +} diff --git a/src/core/api/immutabletagrule_test.go b/src/core/api/immutabletagrule_test.go new file mode 100644 index 000000000..8a65e9e1a --- /dev/null +++ b/src/core/api/immutabletagrule_test.go @@ -0,0 +1,335 @@ +package api + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/goharbor/harbor/src/pkg/immutabletag" + "github.com/goharbor/harbor/src/pkg/immutabletag/model" +) + +func TestImmutableTagRuleAPI_List(t *testing.T) { + + metadata := &model.Metadata{ + ProjectID: 1, + Disabled: false, + TagSelectors: []*model.Selector{ + { + Kind: "doublestar", + Decoration: "matches", + Pattern: "release-[\\d\\.]+", + }, + }, + ScopeSelectors: map[string][]*model.Selector{ + "repository": { + { + Kind: "doublestar", + Decoration: "matches", + Pattern: ".+", + }, + }, + }, + } + mgr := immutabletag.NewDefaultRuleManager() + id, err := mgr.CreateImmutableRule(metadata) + if err != nil { + t.Error(err) + } + defer mgr.DeleteImmutableRule(id) + cases := []*codeCheckingCase{ + // 401 + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/projects/1/immutabletagrules", + }, + code: http.StatusUnauthorized, + }, + // 200 + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/projects/1/immutabletagrules", + credential: admin, + }, + postFunc: func(responseRecorder *httptest.ResponseRecorder) error { + var rules []model.Metadata + err := json.Unmarshal([]byte(responseRecorder.Body.String()), &rules) + if err != nil { + return err + } + if len(rules) <= 0 { + return fmt.Errorf("no rules found") + } + if rules[0].TagSelectors[0].Kind != "doublestar" { + return fmt.Errorf("rule is not expected. actual: %v", responseRecorder.Body.String()) + } + return nil + }, + code: http.StatusOK, + }, + // 200 + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/projects/1/immutabletagrules", + credential: projAdmin, + }, + code: http.StatusOK, + }, + // 403 + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/projects/1/immutabletagrules", + credential: projGuest, + }, + code: http.StatusForbidden, + }, + } + runCodeCheckingCases(t, cases...) + +} + +func TestImmutableTagRuleAPI_Post(t *testing.T) { + + // body := `{ + // "projectID":1, + // "priority":0, + // "template": "immutable_template", + // "action": "immutable", + // "disabled":false, + // "action":"immutable", + // "template":"immutable_template", + // "tag_selectors":[{"kind":"doublestar","decoration":"matches","pattern":"**"}], + // "scope_selectors":{"repository":[{"kind":"doublestar","decoration":"repoMatches","pattern":"**"}]} + // }` + + metadata := &model.Metadata{ + ProjectID: 1, + Disabled: false, + Priority: 0, + Template: "immutable_template", + Action: "immutable", + TagSelectors: []*model.Selector{ + { + Kind: "doublestar", + Decoration: "matches", + Pattern: "release-[\\d\\.]+", + }, + }, + ScopeSelectors: map[string][]*model.Selector{ + "repository": { + { + Kind: "doublestar", + Decoration: "matches", + Pattern: ".+", + }, + }, + }, + } + + cases := []*codeCheckingCase{ + // 401 + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/projects/1/immutabletagrules", + bodyJSON: metadata, + }, + code: http.StatusUnauthorized, + }, + // 201 + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/projects/1/immutabletagrules", + credential: admin, + bodyJSON: metadata, + }, + code: http.StatusCreated, + }, + // 201 + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/projects/1/immutabletagrules", + credential: projAdmin, + bodyJSON: metadata, + }, + code: http.StatusCreated, + }, + // 403 + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/projects/1/immutabletagrules", + credential: projGuest, + bodyJSON: metadata, + }, + code: http.StatusForbidden, + }, + } + runCodeCheckingCases(t, cases...) + +} + +func TestImmutableTagRuleAPI_Put(t *testing.T) { + + metadata := &model.Metadata{ + ProjectID: 1, + Disabled: false, + TagSelectors: []*model.Selector{ + { + Kind: "doublestar", + Decoration: "matches", + Pattern: "release-[\\d\\.]+", + }, + }, + ScopeSelectors: map[string][]*model.Selector{ + "repository": { + { + Kind: "doublestar", + Decoration: "matches", + Pattern: ".+", + }, + }, + }, + } + + metadata2 := &model.Metadata{ + ProjectID: 1, + Disabled: false, + TagSelectors: []*model.Selector{ + { + Kind: "doublestar", + Decoration: "matches", + Pattern: "latest", + }, + }, + ScopeSelectors: map[string][]*model.Selector{ + "repository": { + { + Kind: "doublestar", + Decoration: "matches", + Pattern: ".+", + }, + }, + }, + } + mgr := immutabletag.NewDefaultRuleManager() + id, err := mgr.CreateImmutableRule(metadata) + if err != nil { + t.Error(err) + } + defer mgr.DeleteImmutableRule(id) + + url := fmt.Sprintf("/api/projects/1/immutabletagrules/%d", id) + cases := []*codeCheckingCase{ + // 401 + { + request: &testingRequest{ + method: http.MethodPut, + url: url, + bodyJSON: metadata2, + }, + code: http.StatusUnauthorized, + }, + // 200 + { + request: &testingRequest{ + method: http.MethodPut, + url: url, + credential: admin, + bodyJSON: metadata2, + }, + code: http.StatusOK, + }, + // 200 + { + request: &testingRequest{ + method: http.MethodPut, + url: url, + credential: projAdmin, + bodyJSON: metadata2, + }, + code: http.StatusOK, + }, + // 403 + { + request: &testingRequest{ + method: http.MethodPut, + url: url, + credential: projGuest, + bodyJSON: metadata2, + }, + code: http.StatusForbidden, + }, + } + runCodeCheckingCases(t, cases...) +} + +func TestImmutableTagRuleAPI_Delete(t *testing.T) { + metadata := &model.Metadata{ + ProjectID: 1, + Disabled: false, + TagSelectors: []*model.Selector{ + { + Kind: "doublestar", + Decoration: "matches", + Pattern: "latest", + }, + }, + ScopeSelectors: map[string][]*model.Selector{ + "repository": { + { + Kind: "doublestar", + Decoration: "matches", + Pattern: ".+", + }, + }, + }, + } + + mgr := immutabletag.NewDefaultRuleManager() + id, err := mgr.CreateImmutableRule(metadata) + if err != nil { + t.Error(err) + } + defer mgr.DeleteImmutableRule(id) + + url := fmt.Sprintf("/api/projects/1/immutabletagrules/%d", id) + + cases := []*codeCheckingCase{ + // 401 + { + request: &testingRequest{ + method: http.MethodDelete, + url: url, + }, + code: http.StatusUnauthorized, + }, + // 403 + { + request: &testingRequest{ + method: http.MethodDelete, + url: url, + credential: projGuest, + }, + code: http.StatusForbidden, + }, + // 200 + { + request: &testingRequest{ + method: http.MethodDelete, + url: url, + credential: projAdmin, + }, + code: http.StatusOK, + }, + } + runCodeCheckingCases(t, cases...) +} diff --git a/src/core/api/notification_policy.go b/src/core/api/notification_policy.go index 597e3e4f1..324227d15 100755 --- a/src/core/api/notification_policy.go +++ b/src/core/api/notification_policy.go @@ -7,6 +7,8 @@ import ( "strconv" "time" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/common/rbac" "github.com/goharbor/harbor/src/common/utils" @@ -273,7 +275,8 @@ func (w *NotificationPolicyAPI) Test() { } if err := notification.PolicyMgr.Test(policy); err != nil { - w.SendBadRequestError(fmt.Errorf("notification policy %s test failed: %v", policy.Name, err)) + log.Errorf("notification policy %s test failed: %v", policy.Name, err) + w.SendBadRequestError(fmt.Errorf("notification policy %s test failed", policy.Name)) return } } diff --git a/src/core/api/oidc.go b/src/core/api/oidc.go index ed4688cf8..c19ede0a5 100644 --- a/src/core/api/oidc.go +++ b/src/core/api/oidc.go @@ -16,6 +16,7 @@ package api import ( "errors" + "github.com/goharbor/harbor/src/common/utils/log" "github.com/goharbor/harbor/src/common/utils/oidc" ) @@ -50,7 +51,7 @@ func (oa *OIDCAPI) Ping() { } if err := oidc.TestEndpoint(c); err != nil { log.Errorf("Failed to verify connection: %+v, err: %v", c, err) - oa.SendBadRequestError(err) + oa.SendBadRequestError(errors.New("failed to verify connection")) return } } diff --git a/src/core/api/pro_scanner.go b/src/core/api/pro_scanner.go new file mode 100644 index 000000000..ff0e45436 --- /dev/null +++ b/src/core/api/pro_scanner.go @@ -0,0 +1,112 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "github.com/goharbor/harbor/src/common/rbac" + "github.com/goharbor/harbor/src/pkg/scan/api/scanner" + "github.com/pkg/errors" +) + +// ProjectScannerAPI provides rest API for managing the project level scanner(s). +type ProjectScannerAPI struct { + // The base controller to provide common utilities + BaseController + // Scanner controller for operating scanner registrations. + c scanner.Controller + // ID of the project + pid int64 +} + +// Prepare sth. for the subsequent actions +func (sa *ProjectScannerAPI) Prepare() { + // Call super prepare method + sa.BaseController.Prepare() + + // Check access permissions + if !sa.RequireAuthenticated() { + return + } + + // Get ID of the project + pid, err := sa.GetInt64FromPath(":pid") + if err != nil { + sa.SendBadRequestError(errors.Wrap(err, "project scanner API")) + return + } + + // Check if the project exists + exists, err := sa.ProjectMgr.Exists(pid) + if err != nil { + sa.SendInternalServerError(errors.Wrap(err, "project scanner API")) + return + } + + if !exists { + sa.SendNotFoundError(errors.Errorf("project with id %d", sa.pid)) + return + } + + sa.pid = pid + + sa.c = scanner.DefaultController +} + +// GetProjectScanner gets the project level scanner +func (sa *ProjectScannerAPI) GetProjectScanner() { + // Check access permissions + if !sa.RequireProjectAccess(sa.pid, rbac.ActionRead, rbac.ResourceConfiguration) { + return + } + + r, err := sa.c.GetRegistrationByProject(sa.pid) + if err != nil { + sa.SendInternalServerError(errors.Wrap(err, "scanner API: get project scanners")) + return + } + + if r != nil { + sa.Data["json"] = r + } else { + sa.Data["json"] = make(map[string]interface{}) + } + + sa.ServeJSON() +} + +// SetProjectScanner sets the project level scanner +func (sa *ProjectScannerAPI) SetProjectScanner() { + // Check access permissions + if !sa.RequireProjectAccess(sa.pid, rbac.ActionUpdate, rbac.ResourceConfiguration) { + return + } + + body := make(map[string]string) + if err := sa.DecodeJSONReq(&body); err != nil { + sa.SendBadRequestError(errors.Wrap(err, "scanner API: set project scanners")) + return + } + + uuid, ok := body["uuid"] + if !ok || len(uuid) == 0 { + sa.SendBadRequestError(errors.New("missing scanner uuid when setting project scanner")) + return + } + + if err := sa.c.SetRegistrationByProject(sa.pid, uuid); err != nil { + sa.SendInternalServerError(errors.Wrap(err, "scanner API: set project scanners")) + return + } +} diff --git a/src/core/api/pro_scanner_test.go b/src/core/api/pro_scanner_test.go new file mode 100644 index 000000000..42d64f305 --- /dev/null +++ b/src/core/api/pro_scanner_test.go @@ -0,0 +1,95 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "fmt" + "net/http" + "testing" + + "github.com/goharbor/harbor/src/pkg/scan/dao/scanner" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + sc "github.com/goharbor/harbor/src/pkg/scan/api/scanner" + "github.com/stretchr/testify/suite" +) + +// ProScannerAPITestSuite is test suite for testing the project scanner API +type ProScannerAPITestSuite struct { + suite.Suite + + originC sc.Controller + mockC *MockScannerAPIController +} + +// TestProScannerAPI is the entry of ProScannerAPITestSuite +func TestProScannerAPI(t *testing.T) { + suite.Run(t, new(ProScannerAPITestSuite)) +} + +// SetupSuite prepares testing env +func (suite *ProScannerAPITestSuite) SetupTest() { + suite.originC = sc.DefaultController + m := &MockScannerAPIController{} + sc.DefaultController = m + + suite.mockC = m +} + +// TearDownTest clears test case env +func (suite *ProScannerAPITestSuite) TearDownTest() { + // Restore + sc.DefaultController = suite.originC +} + +// TestScannerAPIProjectScanner tests the API of getting/setting project level scanner +func (suite *ProScannerAPITestSuite) TestScannerAPIProjectScanner() { + suite.mockC.On("SetRegistrationByProject", int64(1), "uuid").Return(nil) + + // Set + body := make(map[string]interface{}, 1) + body["uuid"] = "uuid" + runCodeCheckingCases(suite.T(), &codeCheckingCase{ + request: &testingRequest{ + url: fmt.Sprintf("/api/projects/%d/scanner", 1), + method: http.MethodPut, + credential: projAdmin, + bodyJSON: body, + }, + code: http.StatusOK, + }) + + r := &scanner.Registration{ + ID: 1004, + UUID: "uuid", + Name: "TestScannerAPIProjectScanner", + Description: "JUST FOR TEST", + URL: "https://a.b.c", + } + suite.mockC.On("GetRegistrationByProject", int64(1)).Return(r, nil) + + // Get + rr := &scanner.Registration{} + err := handleAndParse(&testingRequest{ + url: fmt.Sprintf("/api/projects/%d/scanner", 1), + method: http.MethodGet, + credential: projAdmin, + }, rr) + require.NoError(suite.T(), err) + + assert.Equal(suite.T(), r.Name, rr.Name) + assert.Equal(suite.T(), r.UUID, rr.UUID) +} diff --git a/src/core/api/project.go b/src/core/api/project.go index 77285453c..40559991a 100644 --- a/src/core/api/project.go +++ b/src/core/api/project.go @@ -234,6 +234,12 @@ func (p *ProjectAPI) Post() { // Head ... func (p *ProjectAPI) Head() { + + if !p.SecurityCtx.IsAuthenticated() { + p.SendUnAuthorizedError(errors.New("Unauthorized")) + return + } + name := p.GetString("project_name") if len(name) == 0 { p.SendBadRequestError(errors.New("project_name is needed")) diff --git a/src/core/api/project_test.go b/src/core/api/project_test.go index 2c2d3d8fe..9944fbcbb 100644 --- a/src/core/api/project_test.go +++ b/src/core/api/project_test.go @@ -329,13 +329,13 @@ func TestDeleteProject(t *testing.T) { } func TestProHead(t *testing.T) { - fmt.Println("\nTest for Project HEAD API") + t.Log("\nTest for Project HEAD API") assert := assert.New(t) apiTest := newHarborAPI() // ----------------------------case 1 : Response Code=200----------------------------// - fmt.Println("case 1: response code:200") + t.Log("case 1: response code:200") httpStatusCode, err := apiTest.ProjectsHead(*admin, "library") if err != nil { t.Error("Error while search project by proName", err.Error()) @@ -345,7 +345,7 @@ func TestProHead(t *testing.T) { } // ----------------------------case 2 : Response Code=404:Project name does not exist.----------------------------// - fmt.Println("case 2: response code:404,Project name does not exist.") + t.Log("case 2: response code:404,Project name does not exist.") httpStatusCode, err = apiTest.ProjectsHead(*admin, "libra") if err != nil { t.Error("Error while search project by proName", err.Error()) @@ -354,6 +354,24 @@ func TestProHead(t *testing.T) { assert.Equal(int(404), httpStatusCode, "httpStatusCode should be 404") } + t.Log("case 3: response code:401. Project exist with unauthenticated user") + httpStatusCode, err = apiTest.ProjectsHead(*unknownUsr, "library") + if err != nil { + t.Error("Error while search project by proName", err.Error()) + t.Log(err) + } else { + assert.Equal(int(401), httpStatusCode, "httpStatusCode should be 404") + } + + t.Log("case 4: response code:401. Project name does not exist with unauthenticated user") + httpStatusCode, err = apiTest.ProjectsHead(*unknownUsr, "libra") + if err != nil { + t.Error("Error while search project by proName", err.Error()) + t.Log(err) + } else { + assert.Equal(int(401), httpStatusCode, "httpStatusCode should be 404") + } + fmt.Printf("\n") } diff --git a/src/core/api/projectmember.go b/src/core/api/projectmember.go index c836016f7..b704ad5c6 100644 --- a/src/core/api/projectmember.go +++ b/src/core/api/projectmember.go @@ -251,8 +251,8 @@ func AddProjectMember(projectID int64, request models.MemberReq) (int, error) { return 0, err } member.EntityID = groupID - } else if len(request.MemberGroup.GroupName) > 0 && request.MemberGroup.GroupType == common.HTTPGroupType { - ugs, err := group.QueryUserGroup(models.UserGroup{GroupName: request.MemberGroup.GroupName, GroupType: common.HTTPGroupType}) + } else if len(request.MemberGroup.GroupName) > 0 && request.MemberGroup.GroupType == common.HTTPGroupType || request.MemberGroup.GroupType == common.OIDCGroupType { + ugs, err := group.QueryUserGroup(models.UserGroup{GroupName: request.MemberGroup.GroupName, GroupType: request.MemberGroup.GroupType}) if err != nil { return 0, err } diff --git a/src/core/api/projectmember_test.go b/src/core/api/projectmember_test.go index 88e47851f..89aab304b 100644 --- a/src/core/api/projectmember_test.go +++ b/src/core/api/projectmember_test.go @@ -196,7 +196,7 @@ func TestProjectMemberAPI_Post(t *testing.T) { }, }, }, - code: http.StatusBadRequest, + code: http.StatusInternalServerError, }, { request: &testingRequest{ @@ -241,7 +241,7 @@ func TestProjectMemberAPI_Post(t *testing.T) { }, }, }, - code: http.StatusBadRequest, + code: http.StatusInternalServerError, }, } runCodeCheckingCases(t, cases...) diff --git a/src/core/api/quota/registry/registry.go b/src/core/api/quota/registry/registry.go index eb4e71b3f..a68568afd 100644 --- a/src/core/api/quota/registry/registry.go +++ b/src/core/api/quota/registry/registry.go @@ -284,6 +284,9 @@ func persistPB(projects []quota.ProjectInfo) error { } _, err = dao.AddBlobsToProject(pro.ProjectID, blobsOfPro...) if err != nil { + if err == dao.ErrDupRows { + continue + } log.Error(err) return err } diff --git a/src/core/api/repository.go b/src/core/api/repository.go index 8a40d5f49..8227a5cae 100755 --- a/src/core/api/repository.go +++ b/src/core/api/repository.go @@ -25,6 +25,11 @@ import ( "strings" "time" + "github.com/goharbor/harbor/src/jobservice/logger" + + "github.com/goharbor/harbor/src/pkg/scan/api/scan" + v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1" + "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/goharbor/harbor/src/common" @@ -40,7 +45,6 @@ import ( "github.com/goharbor/harbor/src/core/config" notifierEvt "github.com/goharbor/harbor/src/core/notifier/event" coreutils "github.com/goharbor/harbor/src/core/utils" - "github.com/goharbor/harbor/src/pkg/scan" "github.com/goharbor/harbor/src/replication" "github.com/goharbor/harbor/src/replication/event" "github.com/goharbor/harbor/src/replication/model" @@ -397,6 +401,13 @@ func (ra *RepositoryAPI) GetTag() { return } + project, err := ra.ProjectMgr.Get(projectName) + if err != nil { + ra.ParseAndHandleError(fmt.Sprintf("failed to get the project %s", + projectName), err) + return + } + client, err := coreutils.NewRepositoryClientForUI(ra.SecurityCtx.GetUsername(), repository) if err != nil { ra.SendInternalServerError(fmt.Errorf("failed to initialize the client for %s: %v", @@ -414,7 +425,7 @@ func (ra *RepositoryAPI) GetTag() { return } - result := assembleTagsInParallel(client, repository, []string{tag}, + result := assembleTagsInParallel(client, project.ProjectID, repository, []string{tag}, ra.SecurityCtx.GetUsername()) ra.Data["json"] = result[0] ra.ServeJSON() @@ -523,14 +534,14 @@ func (ra *RepositoryAPI) GetTags() { } projectName, _ := utils.ParseRepository(repoName) - exist, err := ra.ProjectMgr.Exists(projectName) + project, err := ra.ProjectMgr.Get(projectName) if err != nil { - ra.ParseAndHandleError(fmt.Sprintf("failed to check the existence of project %s", + ra.ParseAndHandleError(fmt.Sprintf("failed to get the project %s", projectName), err) return } - if !exist { + if project == nil { ra.SendNotFoundError(fmt.Errorf("project %s not found", projectName)) return } @@ -587,8 +598,13 @@ func (ra *RepositoryAPI) GetTags() { return } - ra.Data["json"] = assembleTagsInParallel(client, repoName, tags, - ra.SecurityCtx.GetUsername()) + ra.Data["json"] = assembleTagsInParallel( + client, + project.ProjectID, + repoName, + tags, + ra.SecurityCtx.GetUsername(), + ) ra.ServeJSON() } @@ -607,7 +623,7 @@ func simpleTags(tags []string) []*models.TagResp { // get config, signature and scan overview and assemble them into one // struct for each tag in tags -func assembleTagsInParallel(client *registry.Repository, repository string, +func assembleTagsInParallel(client *registry.Repository, projectID int64, repository string, tags []string, username string) []*models.TagResp { var err error signatures := map[string][]notarymodel.Target{} @@ -621,8 +637,15 @@ func assembleTagsInParallel(client *registry.Repository, repository string, c := make(chan *models.TagResp) for _, tag := range tags { - go assembleTag(c, client, repository, tag, config.WithClair(), - config.WithNotary(), signatures) + go assembleTag( + c, + client, + projectID, + repository, + tag, + config.WithNotary(), + signatures, + ) } result := []*models.TagResp{} var item *models.TagResp @@ -636,8 +659,8 @@ func assembleTagsInParallel(client *registry.Repository, repository string, return result } -func assembleTag(c chan *models.TagResp, client *registry.Repository, - repository, tag string, clairEnabled, notaryEnabled bool, +func assembleTag(c chan *models.TagResp, client *registry.Repository, projectID int64, + repository, tag string, notaryEnabled bool, signatures map[string][]notarymodel.Target) { item := &models.TagResp{} // labels @@ -659,8 +682,9 @@ func assembleTag(c chan *models.TagResp, client *registry.Repository, } // scan overview - if clairEnabled { - item.ScanOverview = getScanOverview(item.Digest, item.Name) + so := getSummary(projectID, repository, item.Digest) + if len(so) > 0 { + item.ScanOverview = so } // signature, compare both digest and tag @@ -968,73 +992,6 @@ func (ra *RepositoryAPI) GetSignatures() { ra.ServeJSON() } -// ScanImage handles request POST /api/repository/$repository/tags/$tag/scan to trigger image scan manually. -func (ra *RepositoryAPI) ScanImage() { - if !config.WithClair() { - log.Warningf("Harbor is not deployed with Clair, scan is disabled.") - ra.SendInternalServerError(errors.New("harbor is not deployed with Clair, scan is disabled")) - return - } - repoName := ra.GetString(":splat") - tag := ra.GetString(":tag") - projectName, _ := utils.ParseRepository(repoName) - exist, err := ra.ProjectMgr.Exists(projectName) - if err != nil { - ra.ParseAndHandleError(fmt.Sprintf("failed to check the existence of project %s", - projectName), err) - return - } - if !exist { - ra.SendNotFoundError(fmt.Errorf("project %s not found", projectName)) - return - } - if !ra.SecurityCtx.IsAuthenticated() { - ra.SendUnAuthorizedError(errors.New("Unauthorized")) - return - } - - if !ra.RequireProjectAccess(projectName, rbac.ActionCreate, rbac.ResourceRepositoryTagScanJob) { - return - } - err = coreutils.TriggerImageScan(repoName, tag) - if err != nil { - log.Errorf("Error while calling job service to trigger image scan: %v", err) - ra.SendInternalServerError(errors.New("Failed to scan image, please check log for details")) - return - } -} - -// VulnerabilityDetails fetch vulnerability info from clair, transform to Harbor's format and return to client. -func (ra *RepositoryAPI) VulnerabilityDetails() { - if !config.WithClair() { - log.Warningf("Harbor is not deployed with Clair, it's not impossible to get vulnerability details.") - ra.SendInternalServerError(errors.New("harbor is not deployed with Clair, it's not impossible to get vulnerability details")) - return - } - repository := ra.GetString(":splat") - tag := ra.GetString(":tag") - exist, digest, err := ra.checkExistence(repository, tag) - if err != nil { - ra.SendInternalServerError(fmt.Errorf("failed to check the existence of resource, error: %v", err)) - return - } - if !exist { - ra.SendNotFoundError(fmt.Errorf("resource: %s:%s not found", repository, tag)) - return - } - - projectName, _ := utils.ParseRepository(repository) - if !ra.RequireProjectAccess(projectName, rbac.ActionList, rbac.ResourceRepositoryTagVulnerability) { - return - } - res, err := scan.VulnListByDigest(digest) - if err != nil { - log.Errorf("Failed to get vulnerability list for image: %s:%s", repository, tag) - } - ra.Data["json"] = res - ra.ServeJSON() -} - func getSignatures(username, repository string) (map[string][]notarymodel.Target, error) { targets, err := notary.GetInternalTargets(config.InternalNotaryEndpoint(), username, repository) @@ -1079,33 +1036,19 @@ func (ra *RepositoryAPI) checkExistence(repository, tag string) (bool, string, e return true, digest, nil } -// will return nil when it failed to get data. The parm "tag" is for logging only. -func getScanOverview(digest string, tag string) *models.ImgScanOverview { - if len(digest) == 0 { - log.Debug("digest is nil") - return nil +func getSummary(pid int64, repository string, digest string) map[string]interface{} { + // At present, only get harbor native report as default behavior. + artifact := &v1.Artifact{ + NamespaceID: pid, + Repository: repository, + Digest: digest, + MimeType: v1.MimeTypeDockerArtifact, } - data, err := dao.GetImgScanOverview(digest) + + sum, err := scan.DefaultController.GetSummary(artifact, []string{v1.MimeTypeNativeReport}) if err != nil { - log.Errorf("Failed to get scan result for tag:%s, digest: %s, error: %v", tag, digest, err) + logger.Errorf("Failed to get scan report summary with error: %s", err) } - if data == nil { - return nil - } - job, err := dao.GetScanJob(data.JobID) - if err != nil { - log.Errorf("Failed to get scan job for id:%d, error: %v", data.JobID, err) - return nil - } else if job == nil { // job does not exist - log.Errorf("The scan job with id: %d does not exist, returning nil", data.JobID) - return nil - } - data.Status = job.Status - if data.Status != models.JobFinished { - log.Debugf("Unsetting vulnerable related historical values, job status: %s", data.Status) - data.Sev = 0 - data.CompOverview = nil - data.DetailsKey = "" - } - return data + + return sum } diff --git a/src/core/api/repository_test.go b/src/core/api/repository_test.go index 7aa17a0b2..b51a38aeb 100644 --- a/src/core/api/repository_test.go +++ b/src/core/api/repository_test.go @@ -42,7 +42,7 @@ func TestGetRepos(t *testing.T) { } else { assert.Equal(int(200), code, "response code should be 200") if repos, ok := repositories.([]repoResp); ok { - assert.Equal(int(1), len(repos), "the length of repositories should be 1") + require.Equal(t, int(1), len(repos), "the length of repositories should be 1") assert.Equal(repos[0].Name, "library/hello-world", "unexpected repository name") } else { t.Error("unexpected response") diff --git a/src/core/api/robot.go b/src/core/api/robot.go index d098c4059..ded3a8d58 100644 --- a/src/core/api/robot.go +++ b/src/core/api/robot.go @@ -15,29 +15,30 @@ package api import ( - "errors" "fmt" - "net/http" - "strconv" - "time" - - "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common/dao" "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/common/rbac" - "github.com/goharbor/harbor/src/common/token" - "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/common/rbac/project" + "github.com/goharbor/harbor/src/pkg/q" + "github.com/goharbor/harbor/src/pkg/robot" + "github.com/goharbor/harbor/src/pkg/robot/model" + "github.com/pkg/errors" + "net/http" + "strconv" ) // RobotAPI ... type RobotAPI struct { BaseController project *models.Project - robot *models.Robot + ctr robot.Controller + robot *model.Robot } // Prepare ... func (r *RobotAPI) Prepare() { + r.BaseController.Prepare() method := r.Ctx.Request.Method @@ -67,6 +68,7 @@ func (r *RobotAPI) Prepare() { return } r.project = project + r.ctr = robot.RobotCtr if method == http.MethodPut || method == http.MethodDelete { id, err := r.GetInt64FromPath(":id") @@ -74,8 +76,7 @@ func (r *RobotAPI) Prepare() { r.SendBadRequestError(errors.New("invalid robot ID")) return } - - robot, err := dao.GetRobotByID(id) + robot, err := r.ctr.GetRobotAccount(id) if err != nil { r.SendInternalServerError(fmt.Errorf("failed to get robot %d: %v", id, err)) return @@ -100,62 +101,39 @@ func (r *RobotAPI) Post() { return } - var robotReq models.RobotReq + var robotReq model.RobotCreate isValid, err := r.DecodeJSONReqAndValidate(&robotReq) if !isValid { r.SendBadRequestError(err) return } + robotReq.Visible = true + robotReq.ProjectID = r.project.ProjectID - // Token duration in minutes - tokenDuration := time.Duration(config.RobotTokenDuration()) * time.Minute - expiresAt := time.Now().UTC().Add(tokenDuration).Unix() - createdName := common.RobotPrefix + robotReq.Name - - // first to add a robot account, and get its id. - robot := models.Robot{ - Name: createdName, - Description: robotReq.Description, - ProjectID: r.project.ProjectID, - ExpiresAt: expiresAt, + if err := validateRobotReq(r.project, &robotReq); err != nil { + r.SendBadRequestError(err) + return } - id, err := dao.AddRobot(&robot) + + robot, err := r.ctr.CreateRobotAccount(&robotReq) if err != nil { if err == dao.ErrDupRows { r.SendConflictError(errors.New("conflict robot account")) return } - r.SendInternalServerError(fmt.Errorf("failed to create robot account: %v", err)) + r.SendInternalServerError(errors.Wrap(err, "robot API: post")) return } - // generate the token, and return it with response data. - // token is not stored in the database. - jwtToken, err := token.New(id, r.project.ProjectID, expiresAt, robotReq.Access) - if err != nil { - r.SendInternalServerError(fmt.Errorf("failed to valid parameters to generate token for robot account, %v", err)) - err := dao.DeleteRobot(id) - if err != nil { - r.SendInternalServerError(fmt.Errorf("failed to delete the robot account: %d, %v", id, err)) - } - return - } + w := r.Ctx.ResponseWriter + w.Header().Set("Content-Type", "application/json") - rawTk, err := jwtToken.Raw() - if err != nil { - r.SendInternalServerError(fmt.Errorf("failed to sign token for robot account, %v", err)) - err := dao.DeleteRobot(id) - if err != nil { - r.SendInternalServerError(fmt.Errorf("failed to delete the robot account: %d, %v", id, err)) - } - return - } - - robotRep := models.RobotRep{ + robotRep := model.RobotRep{ Name: robot.Name, - Token: rawTk, + Token: robot.Token, } - r.Redirect(http.StatusCreated, strconv.FormatInt(id, 10)) + + r.Redirect(http.StatusCreated, strconv.FormatInt(robot.ID, 10)) r.Data["json"] = robotRep r.ServeJSON() } @@ -166,28 +144,25 @@ func (r *RobotAPI) List() { return } - query := models.RobotQuery{ - ProjectID: r.project.ProjectID, + keywords := make(map[string]interface{}) + keywords["ProjectID"] = r.project.ProjectID + keywords["Visible"] = true + query := &q.Query{ + Keywords: keywords, } - - count, err := dao.CountRobot(&query) + robots, err := r.ctr.ListRobotAccount(query) if err != nil { - r.SendInternalServerError(fmt.Errorf("failed to list robots on project: %d, %v", r.project.ProjectID, err)) + r.SendInternalServerError(errors.Wrap(err, "robot API: list")) return } - query.Page, query.Size, err = r.GetPaginationParams() + count := len(robots) + page, size, err := r.GetPaginationParams() if err != nil { r.SendBadRequestError(err) return } - robots, err := dao.ListRobots(&query) - if err != nil { - r.SendInternalServerError(fmt.Errorf("failed to get robots %v", err)) - return - } - - r.SetPaginationHeader(count, query.Page, query.Size) + r.SetPaginationHeader(int64(count), page, size) r.Data["json"] = robots r.ServeJSON() } @@ -204,13 +179,17 @@ func (r *RobotAPI) Get() { return } - robot, err := dao.GetRobotByID(id) + robot, err := r.ctr.GetRobotAccount(id) if err != nil { - r.SendInternalServerError(fmt.Errorf("failed to get robot %d: %v", id, err)) + r.SendInternalServerError(errors.Wrap(err, "robot API: get robot")) return } if robot == nil { - r.SendNotFoundError(fmt.Errorf("robot %d not found", id)) + r.SendNotFoundError(fmt.Errorf("robot API: robot %d not found", id)) + return + } + if !robot.Visible { + r.SendForbiddenError(fmt.Errorf("robot API: robot %d is invisible", id)) return } @@ -224,7 +203,7 @@ func (r *RobotAPI) Put() { return } - var robotReq models.RobotReq + var robotReq model.RobotCreate if err := r.DecodeJSONReq(&robotReq); err != nil { r.SendBadRequestError(err) return @@ -232,8 +211,8 @@ func (r *RobotAPI) Put() { r.robot.Disabled = robotReq.Disabled - if err := dao.UpdateRobot(r.robot); err != nil { - r.SendInternalServerError(fmt.Errorf("failed to update robot %d: %v", r.robot.ID, err)) + if err := r.ctr.UpdateRobotAccount(r.robot); err != nil { + r.SendInternalServerError(errors.Wrap(err, "robot API: update")) return } @@ -245,8 +224,30 @@ func (r *RobotAPI) Delete() { return } - if err := dao.DeleteRobot(r.robot.ID); err != nil { - r.SendInternalServerError(fmt.Errorf("failed to delete robot %d: %v", r.robot.ID, err)) + if err := r.ctr.DeleteRobotAccount(r.robot.ID); err != nil { + r.SendInternalServerError(errors.Wrap(err, "robot API: delete")) return } } + +func validateRobotReq(p *models.Project, robotReq *model.RobotCreate) error { + if len(robotReq.Access) == 0 { + return errors.New("access required") + } + + namespace, _ := rbac.Resource(fmt.Sprintf("/project/%d", p.ProjectID)).GetNamespace() + policies := project.GetAllPolicies(namespace) + + mp := map[string]bool{} + for _, policy := range policies { + mp[policy.String()] = true + } + + for _, policy := range robotReq.Access { + if !mp[policy.String()] { + return fmt.Errorf("%s action of %s resource not exist in project %s", policy.Action, policy.Resource, p.Name) + } + } + + return nil +} diff --git a/src/core/api/robot_test.go b/src/core/api/robot_test.go index baecb67b5..c6644ca13 100644 --- a/src/core/api/robot_test.go +++ b/src/core/api/robot_test.go @@ -16,10 +16,11 @@ package api import ( "fmt" - "github.com/goharbor/harbor/src/common/models" - "github.com/goharbor/harbor/src/common/rbac" "net/http" "testing" + + "github.com/goharbor/harbor/src/common/rbac" + "github.com/goharbor/harbor/src/pkg/robot/model" ) var ( @@ -28,9 +29,10 @@ var ( ) func TestRobotAPIPost(t *testing.T) { + res := rbac.Resource("/project/1") rbacPolicy := &rbac.Policy{ - Resource: "/project/libray/repository", + Resource: res.Subresource(rbac.ResourceRepository), Action: "pull", } policies := []*rbac.Policy{} @@ -51,7 +53,7 @@ func TestRobotAPIPost(t *testing.T) { request: &testingRequest{ method: http.MethodPost, url: robotPath, - bodyJSON: &models.RobotReq{}, + bodyJSON: &model.RobotCreate{}, credential: nonSysAdmin, }, code: http.StatusForbidden, @@ -61,7 +63,7 @@ func TestRobotAPIPost(t *testing.T) { request: &testingRequest{ method: http.MethodPost, url: robotPath, - bodyJSON: &models.RobotReq{ + bodyJSON: &model.RobotCreate{ Name: "test", Description: "test desc", Access: policies, @@ -75,7 +77,7 @@ func TestRobotAPIPost(t *testing.T) { request: &testingRequest{ method: http.MethodPost, url: robotPath, - bodyJSON: &models.RobotReq{ + bodyJSON: &model.RobotCreate{ Name: "testIllgel#", Description: "test desc", }, @@ -83,12 +85,57 @@ func TestRobotAPIPost(t *testing.T) { }, code: http.StatusBadRequest, }, + { + request: &testingRequest{ + method: http.MethodPost, + url: robotPath, + bodyJSON: &model.RobotCreate{ + Name: "test", + Description: "resource not exist", + Access: []*rbac.Policy{ + {Resource: res.Subresource("foo"), Action: rbac.ActionCreate}, + }, + }, + credential: projAdmin4Robot, + }, + code: http.StatusBadRequest, + }, + { + request: &testingRequest{ + method: http.MethodPost, + url: robotPath, + bodyJSON: &model.RobotCreate{ + Name: "test", + Description: "action not exist", + Access: []*rbac.Policy{ + {Resource: res.Subresource(rbac.ResourceRepository), Action: "foo"}, + }, + }, + credential: projAdmin4Robot, + }, + code: http.StatusBadRequest, + }, + { + request: &testingRequest{ + method: http.MethodPost, + url: robotPath, + bodyJSON: &model.RobotCreate{ + Name: "test", + Description: "policy not exit", + Access: []*rbac.Policy{ + {Resource: res.Subresource(rbac.ResourceMember), Action: rbac.ActionPush}, + }, + }, + credential: projAdmin4Robot, + }, + code: http.StatusBadRequest, + }, // 403 -- developer { request: &testingRequest{ method: http.MethodPost, url: robotPath, - bodyJSON: &models.RobotReq{ + bodyJSON: &model.RobotCreate{ Name: "test2", Description: "test2 desc", }, @@ -102,7 +149,7 @@ func TestRobotAPIPost(t *testing.T) { request: &testingRequest{ method: http.MethodPost, url: robotPath, - bodyJSON: &models.RobotReq{ + bodyJSON: &model.RobotCreate{ Name: "test", Description: "test desc", Access: policies, @@ -259,7 +306,7 @@ func TestRobotAPIPut(t *testing.T) { request: &testingRequest{ method: http.MethodPut, url: fmt.Sprintf("%s/%d", robotPath, 1), - bodyJSON: &models.Robot{ + bodyJSON: &model.Robot{ Disabled: true, }, credential: projAdmin4Robot, diff --git a/src/core/api/scan.go b/src/core/api/scan.go new file mode 100644 index 000000000..aac29c22f --- /dev/null +++ b/src/core/api/scan.go @@ -0,0 +1,192 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "net/http" + "strconv" + + "github.com/goharbor/harbor/src/pkg/scan/report" + + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/rbac" + "github.com/goharbor/harbor/src/common/utils" + coreutils "github.com/goharbor/harbor/src/core/utils" + "github.com/goharbor/harbor/src/jobservice/logger" + "github.com/goharbor/harbor/src/pkg/scan/api/scan" + v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1" + "github.com/pkg/errors" +) + +var digestFunc digestGetter = getDigest + +// ScanAPI handles the scan related actions +type ScanAPI struct { + BaseController + + // Target artifact + artifact *v1.Artifact + // Project reference + pro *models.Project +} + +// Prepare sth. for the subsequent actions +func (sa *ScanAPI) Prepare() { + // Call super prepare method + sa.BaseController.Prepare() + + // Parse parameters + repoName := sa.GetString(":splat") + tag := sa.GetString(":tag") + projectName, _ := utils.ParseRepository(repoName) + + pro, err := sa.ProjectMgr.Get(projectName) + if err != nil { + sa.SendInternalServerError(errors.Wrap(err, "scan API: prepare")) + return + } + if pro == nil { + sa.SendNotFoundError(errors.Errorf("project %s not found", projectName)) + return + } + sa.pro = pro + + // Check authentication + if !sa.RequireAuthenticated() { + return + } + + // Assemble artifact object + digest, err := digestFunc(repoName, tag, sa.SecurityCtx.GetUsername()) + if err != nil { + sa.SendInternalServerError(errors.Wrap(err, "scan API: prepare")) + return + } + + sa.artifact = &v1.Artifact{ + NamespaceID: pro.ProjectID, + Repository: repoName, + Tag: tag, + Digest: digest, + MimeType: v1.MimeTypeDockerArtifact, + } + + logger.Debugf("Scan API receives artifact: %#v", sa.artifact) +} + +// Scan artifact +func (sa *ScanAPI) Scan() { + // Check access permissions + if !sa.RequireProjectAccess(sa.pro.ProjectID, rbac.ActionCreate, rbac.ResourceScan) { + return + } + + if err := scan.DefaultController.Scan(sa.artifact); err != nil { + sa.SendInternalServerError(errors.Wrap(err, "scan API: scan")) + return + } + + sa.Ctx.ResponseWriter.WriteHeader(http.StatusAccepted) +} + +// Report returns the required reports with the given mime types. +func (sa *ScanAPI) Report() { + // Check access permissions + if !sa.RequireProjectAccess(sa.pro.ProjectID, rbac.ActionRead, rbac.ResourceScan) { + return + } + + // Extract mime types + producesMimes := make([]string, 0) + if hl, ok := sa.Ctx.Request.Header[v1.HTTPAcceptHeader]; ok && len(hl) > 0 { + producesMimes = append(producesMimes, hl...) + } + + // Get the reports + reports, err := scan.DefaultController.GetReport(sa.artifact, producesMimes) + if err != nil { + sa.SendInternalServerError(errors.Wrap(err, "scan API: get report")) + return + } + + vulItems := make(map[string]interface{}) + for _, rp := range reports { + // Resolve scan report data only when it is ready + if len(rp.Report) == 0 { + continue + } + + vrp, err := report.ResolveData(rp.MimeType, []byte(rp.Report)) + if err != nil { + sa.SendInternalServerError(errors.Wrap(err, "scan API: get report")) + return + } + + vulItems[rp.MimeType] = vrp + } + + sa.Data["json"] = vulItems + sa.ServeJSON() +} + +// Log returns the log stream +func (sa *ScanAPI) Log() { + // Check access permissions + if !sa.RequireProjectAccess(sa.pro.ProjectID, rbac.ActionRead, rbac.ResourceScan) { + return + } + + uuid := sa.GetString(":uuid") + bytes, err := scan.DefaultController.GetScanLog(uuid) + if err != nil { + sa.SendInternalServerError(errors.Wrap(err, "scan API: log")) + return + } + + if bytes == nil { + // Not found + sa.SendNotFoundError(errors.Errorf("report with uuid %s does not exist", uuid)) + return + } + + sa.Ctx.ResponseWriter.Header().Set(http.CanonicalHeaderKey("Content-Length"), strconv.Itoa(len(bytes))) + sa.Ctx.ResponseWriter.Header().Set(http.CanonicalHeaderKey("Content-Type"), "text/plain") + _, err = sa.Ctx.ResponseWriter.Write(bytes) + if err != nil { + sa.SendInternalServerError(errors.Wrap(err, "scan API: log")) + } +} + +// digestGetter is a function template for getting digest. +// TODO: This can be removed if the registry access interface is ready. +type digestGetter func(repo, tag string, username string) (string, error) + +func getDigest(repo, tag string, username string) (string, error) { + client, err := coreutils.NewRepositoryClientForUI(username, repo) + if err != nil { + return "", err + } + + digest, exists, err := client.ManifestExist(tag) + if err != nil { + return "", err + } + + if !exists { + return "", errors.Errorf("tag %s does exist", tag) + } + + return digest, nil +} diff --git a/src/core/api/scan_job.go b/src/core/api/scan_job.go deleted file mode 100644 index 7cc38d61e..000000000 --- a/src/core/api/scan_job.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2018 Project Harbor Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package api - -import ( - "github.com/goharbor/harbor/src/common/dao" - "github.com/goharbor/harbor/src/common/rbac" - "github.com/goharbor/harbor/src/common/utils/log" - "github.com/goharbor/harbor/src/core/utils" - - "errors" - "fmt" - "net/http" - "strconv" - "strings" -) - -// ScanJobAPI handles request to /api/scanJobs/:id/log -type ScanJobAPI struct { - BaseController - jobID int64 - projectName string - jobUUID string -} - -// Prepare validates that whether user has read permission to the project of the repo the scan job scanned. -func (sj *ScanJobAPI) Prepare() { - sj.BaseController.Prepare() - if !sj.SecurityCtx.IsAuthenticated() { - sj.SendUnAuthorizedError(errors.New("UnAuthorized")) - return - } - id, err := sj.GetInt64FromPath(":id") - if err != nil { - sj.SendBadRequestError(errors.New("invalid ID")) - return - } - sj.jobID = id - - data, err := dao.GetScanJob(id) - if err != nil { - log.Errorf("Failed to load job data for job: %d, error: %v", id, err) - sj.SendInternalServerError(errors.New("Failed to get Job data")) - return - } - - projectName := strings.SplitN(data.Repository, "/", 2)[0] - if !sj.RequireProjectAccess(projectName, rbac.ActionRead, rbac.ResourceRepositoryTagScanJob) { - log.Errorf("User does not have read permission for project: %s", projectName) - return - } - sj.projectName = projectName - sj.jobUUID = data.UUID -} - -// GetLog ... -func (sj *ScanJobAPI) GetLog() { - logBytes, err := utils.GetJobServiceClient().GetJobLog(sj.jobUUID) - if err != nil { - sj.ParseAndHandleError(fmt.Sprintf("Failed to get job logs, uuid: %s, error: %v", sj.jobUUID, err), err) - return - } - sj.Ctx.ResponseWriter.Header().Set(http.CanonicalHeaderKey("Content-Length"), strconv.Itoa(len(logBytes))) - sj.Ctx.ResponseWriter.Header().Set(http.CanonicalHeaderKey("Content-Type"), "text/plain") - _, err = sj.Ctx.ResponseWriter.Write(logBytes) - if err != nil { - sj.SendInternalServerError(fmt.Errorf("Failed to write job logs, uuid: %s, error: %v", sj.jobUUID, err)) - } - -} diff --git a/src/core/api/scan_test.go b/src/core/api/scan_test.go new file mode 100644 index 000000000..2e80a35d0 --- /dev/null +++ b/src/core/api/scan_test.go @@ -0,0 +1,214 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "fmt" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stretchr/testify/require" + + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/pkg/scan/api/scan" + dscan "github.com/goharbor/harbor/src/pkg/scan/dao/scan" + v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" +) + +var scanBaseURL = "/api/repositories/library/hello-world/tags/latest/scan" + +// ScanAPITestSuite is the test suite for scan API. +type ScanAPITestSuite struct { + suite.Suite + + originalC scan.Controller + c *MockScanAPIController + + originalDigestGetter digestGetter + + artifact *v1.Artifact +} + +// TestScanAPI is the entry point of ScanAPITestSuite. +func TestScanAPI(t *testing.T) { + suite.Run(t, new(ScanAPITestSuite)) +} + +// SetupSuite prepares test env for suite. +func (suite *ScanAPITestSuite) SetupSuite() { + suite.artifact = &v1.Artifact{ + NamespaceID: (int64)(1), + Repository: "library/hello-world", + Tag: "latest", + Digest: "digest-code-001", + MimeType: v1.MimeTypeDockerArtifact, + } +} + +// SetupTest prepares test env for test cases. +func (suite *ScanAPITestSuite) SetupTest() { + suite.originalC = scan.DefaultController + suite.c = &MockScanAPIController{} + + scan.DefaultController = suite.c + + suite.originalDigestGetter = digestFunc + digestFunc = func(repo, tag string, username string) (s string, e error) { + return "digest-code-001", nil + } +} + +// TearDownTest ... +func (suite *ScanAPITestSuite) TearDownTest() { + scan.DefaultController = suite.originalC + digestFunc = suite.originalDigestGetter +} + +// TestScanAPIBase ... +func (suite *ScanAPITestSuite) TestScanAPIBase() { + suite.c.On("Scan", &v1.Artifact{}).Return(nil) + // Including general cases + cases := []*codeCheckingCase{ + // 401 + { + request: &testingRequest{ + url: scanBaseURL, + method: http.MethodGet, + }, + code: http.StatusUnauthorized, + }, + // 403 + { + request: &testingRequest{ + url: scanBaseURL, + method: http.MethodPost, + credential: projGuest, + }, + code: http.StatusForbidden, + }, + } + + runCodeCheckingCases(suite.T(), cases...) +} + +// TestScanAPIScan ... +func (suite *ScanAPITestSuite) TestScanAPIScan() { + suite.c.On("Scan", suite.artifact).Return(nil) + + // Including general cases + cases := []*codeCheckingCase{ + // 202 + { + request: &testingRequest{ + url: scanBaseURL, + method: http.MethodPost, + credential: projDeveloper, + }, + code: http.StatusAccepted, + }, + } + + runCodeCheckingCases(suite.T(), cases...) +} + +// TestScanAPIReport ... +func (suite *ScanAPITestSuite) TestScanAPIReport() { + suite.c.On("GetReport", suite.artifact, []string{v1.MimeTypeNativeReport}).Return([]*dscan.Report{}, nil) + + vulItems := make(map[string]interface{}) + + header := make(http.Header) + header.Add("Accept", v1.MimeTypeNativeReport) + err := handleAndParse( + &testingRequest{ + url: scanBaseURL, + method: http.MethodGet, + credential: projDeveloper, + header: header, + }, &vulItems) + require.NoError(suite.T(), err) +} + +// TestScanAPILog ... +func (suite *ScanAPITestSuite) TestScanAPILog() { + suite.c.On("GetScanLog", "the-uuid-001").Return([]byte(`{"log": "this is my log"}`), nil) + + logs := make(map[string]string) + err := handleAndParse( + &testingRequest{ + url: fmt.Sprintf("%s/%s", scanBaseURL, "the-uuid-001/log"), + method: http.MethodGet, + credential: projDeveloper, + }, &logs) + require.NoError(suite.T(), err) + assert.Condition(suite.T(), func() (success bool) { + success = len(logs) > 0 + return + }) +} + +// Mock things + +// MockScanAPIController ... +type MockScanAPIController struct { + mock.Mock +} + +// Scan ... +func (msc *MockScanAPIController) Scan(artifact *v1.Artifact) error { + args := msc.Called(artifact) + + return args.Error(0) +} + +func (msc *MockScanAPIController) GetReport(artifact *v1.Artifact, mimeTypes []string) ([]*dscan.Report, error) { + args := msc.Called(artifact, mimeTypes) + + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).([]*dscan.Report), args.Error(1) +} + +func (msc *MockScanAPIController) GetSummary(artifact *v1.Artifact, mimeTypes []string) (map[string]interface{}, error) { + args := msc.Called(artifact, mimeTypes) + + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).(map[string]interface{}), args.Error(1) +} + +func (msc *MockScanAPIController) GetScanLog(uuid string) ([]byte, error) { + args := msc.Called(uuid) + + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).([]byte), args.Error(1) +} + +func (msc *MockScanAPIController) HandleJobHooks(trackID string, change *job.StatusChange) error { + args := msc.Called(trackID, change) + + return args.Error(0) +} diff --git a/src/core/api/scanners.go b/src/core/api/scanners.go new file mode 100644 index 000000000..53321f64a --- /dev/null +++ b/src/core/api/scanners.go @@ -0,0 +1,322 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "fmt" + "net/http" + + "github.com/goharbor/harbor/src/pkg/q" + s "github.com/goharbor/harbor/src/pkg/scan/api/scanner" + "github.com/goharbor/harbor/src/pkg/scan/dao/scanner" + "github.com/pkg/errors" +) + +// ScannerAPI provides the API for managing the plugin scanners +type ScannerAPI struct { + // The base controller to provide common utilities + BaseController + + // Controller for the plug scanners + c s.Controller +} + +// Prepare sth. for the subsequent actions +func (sa *ScannerAPI) Prepare() { + // Call super prepare method + sa.BaseController.Prepare() + + // Check access permissions + if !sa.SecurityCtx.IsAuthenticated() { + sa.SendUnAuthorizedError(errors.New("UnAuthorized")) + return + } + + if !sa.SecurityCtx.IsSysAdmin() { + sa.SendForbiddenError(errors.New(sa.SecurityCtx.GetUsername())) + return + } + + // Use the default controller + sa.c = s.DefaultController +} + +// Get the specified scanner +func (sa *ScannerAPI) Get() { + if r := sa.get(); r != nil { + // Response to the client + sa.Data["json"] = r + sa.ServeJSON() + } +} + +// Metadata returns the metadata of the given scanner. +func (sa *ScannerAPI) Metadata() { + uuid := sa.GetStringFromPath(":uuid") + + meta, err := sa.c.GetMetadata(uuid) + if err != nil { + sa.SendInternalServerError(errors.Wrap(err, "scanner API: get metadata")) + return + } + + // Response to the client + sa.Data["json"] = meta + sa.ServeJSON() +} + +// List all the scanners +func (sa *ScannerAPI) List() { + p, pz, err := sa.GetPaginationParams() + if err != nil { + sa.SendBadRequestError(errors.Wrap(err, "scanner API: list all")) + return + } + + query := &q.Query{ + PageSize: pz, + PageNumber: p, + } + + // Get query key words + kws := make(map[string]interface{}) + properties := []string{"name", "description", "url", "ex_name", "ex_url"} + for _, k := range properties { + kw := sa.GetString(k) + if len(kw) > 0 { + kws[k] = kw + } + } + + if len(kws) > 0 { + query.Keywords = kws + } + + all, err := sa.c.ListRegistrations(query) + if err != nil { + sa.SendInternalServerError(errors.Wrap(err, "scanner API: list all")) + return + } + + // Response to the client + sa.Data["json"] = all + sa.ServeJSON() +} + +// Create a new scanner +func (sa *ScannerAPI) Create() { + r := &scanner.Registration{} + + if err := sa.DecodeJSONReq(r); err != nil { + sa.SendBadRequestError(errors.Wrap(err, "scanner API: create")) + return + } + + if err := r.Validate(false); err != nil { + sa.SendBadRequestError(errors.Wrap(err, "scanner API: create")) + return + } + + // Explicitly check if conflict + if !sa.checkDuplicated("name", r.Name) || + !sa.checkDuplicated("url", r.URL) { + return + } + + // All newly created should be non default one except the 1st one + r.IsDefault = false + + uuid, err := sa.c.CreateRegistration(r) + if err != nil { + sa.SendInternalServerError(errors.Wrap(err, "scanner API: create")) + return + } + + location := fmt.Sprintf("%s/%s", sa.Ctx.Request.RequestURI, uuid) + sa.Ctx.ResponseWriter.Header().Add("Location", location) + + resp := make(map[string]string, 1) + resp["uuid"] = uuid + + // Response to the client + sa.Ctx.ResponseWriter.WriteHeader(http.StatusCreated) + sa.Data["json"] = resp + sa.ServeJSON() +} + +// Update a scanner +func (sa *ScannerAPI) Update() { + r := sa.get() + if r == nil { + // meet error + return + } + + // full dose updated + rr := &scanner.Registration{} + if err := sa.DecodeJSONReq(rr); err != nil { + sa.SendBadRequestError(errors.Wrap(err, "scanner API: update")) + return + } + + if err := r.Validate(true); err != nil { + sa.SendBadRequestError(errors.Wrap(err, "scanner API: update")) + return + } + + // Name changed? + if r.Name != rr.Name { + if !sa.checkDuplicated("name", rr.Name) { + return + } + } + + // URL changed? + if r.URL != rr.URL { + if !sa.checkDuplicated("url", rr.URL) { + return + } + } + + getChanges(r, rr) + + if err := sa.c.UpdateRegistration(r); err != nil { + sa.SendInternalServerError(errors.Wrap(err, "scanner API: update")) + return + } + + location := fmt.Sprintf("%s/%s", sa.Ctx.Request.RequestURI, r.UUID) + sa.Ctx.ResponseWriter.Header().Add("Location", location) + + // Response to the client + sa.Data["json"] = r + sa.ServeJSON() +} + +// Delete the scanner +func (sa *ScannerAPI) Delete() { + uid := sa.GetStringFromPath(":uuid") + + deleted, err := sa.c.DeleteRegistration(uid) + if err != nil { + sa.SendInternalServerError(errors.Wrap(err, "scanner API: delete")) + return + } + + if deleted == nil { + // Not found + sa.SendNotFoundError(errors.Errorf("scanner registration: %s", uid)) + return + } + + sa.Data["json"] = deleted + sa.ServeJSON() +} + +// SetAsDefault sets the given registration as default one +func (sa *ScannerAPI) SetAsDefault() { + uid := sa.GetStringFromPath(":uuid") + + m := make(map[string]interface{}) + if err := sa.DecodeJSONReq(&m); err != nil { + sa.SendBadRequestError(errors.Wrap(err, "scanner API: set as default")) + return + } + + if v, ok := m["is_default"]; ok { + if isDefault, y := v.(bool); y && isDefault { + if err := sa.c.SetDefaultRegistration(uid); err != nil { + sa.SendInternalServerError(errors.Wrap(err, "scanner API: set as default")) + } + + return + } + } + + // Not supported + sa.SendForbiddenError(errors.Errorf("not supported: %#v", m)) +} + +// Ping the registration. +func (sa *ScannerAPI) Ping() { + r := &scanner.Registration{} + + if err := sa.DecodeJSONReq(r); err != nil { + sa.SendBadRequestError(errors.Wrap(err, "scanner API: ping")) + return + } + + if err := r.Validate(false); err != nil { + sa.SendBadRequestError(errors.Wrap(err, "scanner API: ping")) + return + } + + if _, err := sa.c.Ping(r); err != nil { + sa.SendInternalServerError(errors.Wrap(err, "scanner API: ping")) + return + } +} + +// get the specified scanner +func (sa *ScannerAPI) get() *scanner.Registration { + uid := sa.GetStringFromPath(":uuid") + + r, err := sa.c.GetRegistration(uid) + if err != nil { + sa.SendInternalServerError(errors.Wrap(err, "scanner API: get")) + return nil + } + + if r == nil { + // NOT found + sa.SendNotFoundError(errors.Errorf("scanner: %s", uid)) + return nil + } + + return r +} + +func (sa *ScannerAPI) checkDuplicated(property, value string) bool { + // Explicitly check if conflict + kw := make(map[string]interface{}) + kw[property] = value + + query := &q.Query{ + Keywords: kw, + } + + l, err := sa.c.ListRegistrations(query) + if err != nil { + sa.SendInternalServerError(errors.Wrap(err, "scanner API: check existence")) + return false + } + + if len(l) > 0 { + sa.SendConflictError(errors.Errorf("duplicated entries: %s:%s", property, value)) + return false + } + + return true +} + +func getChanges(e *scanner.Registration, eChange *scanner.Registration) { + e.Name = eChange.Name + e.Description = eChange.Description + e.URL = eChange.URL + e.Auth = eChange.Auth + e.AccessCredential = eChange.AccessCredential + e.Disabled = eChange.Disabled + e.SkipCertVerify = eChange.SkipCertVerify +} diff --git a/src/core/api/scanners_test.go b/src/core/api/scanners_test.go new file mode 100644 index 000000000..598ae7459 --- /dev/null +++ b/src/core/api/scanners_test.go @@ -0,0 +1,372 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "fmt" + "net/http" + "testing" + + v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1" + + "github.com/goharbor/harbor/src/pkg/q" + sc "github.com/goharbor/harbor/src/pkg/scan/api/scanner" + "github.com/goharbor/harbor/src/pkg/scan/dao/scanner" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +const ( + rootRoute = "/api/scanners" +) + +// ScannerAPITestSuite is test suite for testing the scanner API +type ScannerAPITestSuite struct { + suite.Suite + + originC sc.Controller + mockC *MockScannerAPIController +} + +// TestScannerAPI is the entry of ScannerAPITestSuite +func TestScannerAPI(t *testing.T) { + suite.Run(t, new(ScannerAPITestSuite)) +} + +// SetupSuite prepares testing env +func (suite *ScannerAPITestSuite) SetupTest() { + suite.originC = sc.DefaultController + m := &MockScannerAPIController{} + sc.DefaultController = m + + suite.mockC = m +} + +// TearDownTest clears test case env +func (suite *ScannerAPITestSuite) TearDownTest() { + // Restore + sc.DefaultController = suite.originC +} + +// TestScannerAPICreate tests the post request to create new one +func (suite *ScannerAPITestSuite) TestScannerAPIBase() { + // Including general cases + cases := []*codeCheckingCase{ + // 401 + { + request: &testingRequest{ + url: rootRoute, + method: http.MethodPost, + }, + code: http.StatusUnauthorized, + }, + // 403 + { + request: &testingRequest{ + url: rootRoute, + method: http.MethodPost, + credential: nonSysAdmin, + }, + code: http.StatusForbidden, + }, + // 400 + { + request: &testingRequest{ + url: rootRoute, + method: http.MethodPost, + credential: sysAdmin, + bodyJSON: &scanner.Registration{ + URL: "http://a.b.c", + }, + }, + code: http.StatusBadRequest, + }, + } + + runCodeCheckingCases(suite.T(), cases...) +} + +// TestScannerAPIGet tests api get +func (suite *ScannerAPITestSuite) TestScannerAPIGet() { + res := &scanner.Registration{ + ID: 1000, + UUID: "uuid", + Name: "TestScannerAPIGet", + Description: "JUST FOR TEST", + URL: "https://a.b.c", + } + suite.mockC.On("GetRegistration", "uuid").Return(res, nil) + + // Get + rr := &scanner.Registration{} + err := handleAndParse(&testingRequest{ + url: fmt.Sprintf("%s/%s", rootRoute, "uuid"), + method: http.MethodGet, + credential: sysAdmin, + }, rr) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), rr) + assert.Equal(suite.T(), res.Name, rr.Name) + assert.Equal(suite.T(), res.UUID, rr.UUID) +} + +// TestScannerAPICreate tests create. +func (suite *ScannerAPITestSuite) TestScannerAPICreate() { + r := &scanner.Registration{ + Name: "TestScannerAPICreate", + Description: "JUST FOR TEST", + URL: "https://a.b.c", + } + + suite.mockQuery(r) + suite.mockC.On("CreateRegistration", r).Return("uuid", nil) + + // Create + res := make(map[string]string, 1) + err := handleAndParse( + &testingRequest{ + url: rootRoute, + method: http.MethodPost, + credential: sysAdmin, + bodyJSON: r, + }, &res) + require.NoError(suite.T(), err) + require.Condition(suite.T(), func() (success bool) { + success = res["uuid"] == "uuid" + return + }) +} + +// TestScannerAPIList tests list +func (suite *ScannerAPITestSuite) TestScannerAPIList() { + query := &q.Query{ + PageNumber: 1, + PageSize: 500, + } + ll := []*scanner.Registration{ + { + ID: 1001, + UUID: "uuid", + Name: "TestScannerAPIList", + Description: "JUST FOR TEST", + URL: "https://a.b.c", + }} + suite.mockC.On("ListRegistrations", query).Return(ll, nil) + + // List + l := make([]*scanner.Registration, 0) + err := handleAndParse(&testingRequest{ + url: rootRoute, + method: http.MethodGet, + credential: sysAdmin, + }, &l) + require.NoError(suite.T(), err) + assert.Condition(suite.T(), func() (success bool) { + success = len(l) > 0 && l[0].Name == ll[0].Name + return + }) +} + +// TestScannerAPIUpdate tests the update API +func (suite *ScannerAPITestSuite) TestScannerAPIUpdate() { + before := &scanner.Registration{ + ID: 1002, + UUID: "uuid", + Name: "TestScannerAPIUpdate_before", + Description: "JUST FOR TEST", + URL: "https://a.b.c", + } + + updated := &scanner.Registration{ + ID: 1002, + UUID: "uuid", + Name: "TestScannerAPIUpdate", + Description: "JUST FOR TEST", + URL: "https://a.b.c", + } + + suite.mockQuery(updated) + suite.mockC.On("UpdateRegistration", updated).Return(nil) + suite.mockC.On("GetRegistration", "uuid").Return(before, nil) + + rr := &scanner.Registration{} + err := handleAndParse(&testingRequest{ + url: fmt.Sprintf("%s/%s", rootRoute, "uuid"), + method: http.MethodPut, + credential: sysAdmin, + bodyJSON: updated, + }, rr) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), rr) + + assert.Equal(suite.T(), updated.Name, rr.Name) + assert.Equal(suite.T(), updated.UUID, rr.UUID) +} + +// +func (suite *ScannerAPITestSuite) TestScannerAPIDelete() { + r := &scanner.Registration{ + ID: 1003, + UUID: "uuid", + Name: "TestScannerAPIDelete", + Description: "JUST FOR TEST", + URL: "https://a.b.c", + } + + suite.mockC.On("DeleteRegistration", "uuid").Return(r, nil) + + deleted := &scanner.Registration{} + err := handleAndParse(&testingRequest{ + url: fmt.Sprintf("%s/%s", rootRoute, "uuid"), + method: http.MethodDelete, + credential: sysAdmin, + }, deleted) + + require.NoError(suite.T(), err) + assert.Equal(suite.T(), r.UUID, deleted.UUID) + assert.Equal(suite.T(), r.Name, deleted.Name) +} + +// TestScannerAPISetDefault tests the set default +func (suite *ScannerAPITestSuite) TestScannerAPISetDefault() { + suite.mockC.On("SetDefaultRegistration", "uuid").Return(nil) + + body := make(map[string]interface{}, 1) + body["is_default"] = true + runCodeCheckingCases(suite.T(), &codeCheckingCase{ + request: &testingRequest{ + url: fmt.Sprintf("%s/%s", rootRoute, "uuid"), + method: http.MethodPatch, + credential: sysAdmin, + bodyJSON: body, + }, + code: http.StatusOK, + }) +} + +func (suite *ScannerAPITestSuite) mockQuery(r *scanner.Registration) { + kw := make(map[string]interface{}, 1) + kw["name"] = r.Name + query := &q.Query{ + Keywords: kw, + } + emptyL := make([]*scanner.Registration, 0) + suite.mockC.On("ListRegistrations", query).Return(emptyL, nil) + + kw2 := make(map[string]interface{}, 1) + kw2["url"] = r.URL + query2 := &q.Query{ + Keywords: kw2, + } + suite.mockC.On("ListRegistrations", query2).Return(emptyL, nil) +} + +// MockScannerAPIController is mock of scanner API controller +type MockScannerAPIController struct { + mock.Mock +} + +// ListRegistrations ... +func (m *MockScannerAPIController) ListRegistrations(query *q.Query) ([]*scanner.Registration, error) { + args := m.Called(query) + return args.Get(0).([]*scanner.Registration), args.Error(1) +} + +// CreateRegistration ... +func (m *MockScannerAPIController) CreateRegistration(registration *scanner.Registration) (string, error) { + args := m.Called(registration) + return args.String(0), args.Error(1) +} + +// GetRegistration ... +func (m *MockScannerAPIController) GetRegistration(registrationUUID string) (*scanner.Registration, error) { + args := m.Called(registrationUUID) + s := args.Get(0) + if s == nil { + return nil, args.Error(1) + } + + return s.(*scanner.Registration), args.Error(1) +} + +// RegistrationExists ... +func (m *MockScannerAPIController) RegistrationExists(registrationUUID string) bool { + args := m.Called(registrationUUID) + return args.Bool(0) +} + +// UpdateRegistration ... +func (m *MockScannerAPIController) UpdateRegistration(registration *scanner.Registration) error { + args := m.Called(registration) + return args.Error(0) +} + +// DeleteRegistration ... +func (m *MockScannerAPIController) DeleteRegistration(registrationUUID string) (*scanner.Registration, error) { + args := m.Called(registrationUUID) + s := args.Get(0) + if s == nil { + return nil, args.Error(1) + } + + return s.(*scanner.Registration), args.Error(1) +} + +// SetDefaultRegistration ... +func (m *MockScannerAPIController) SetDefaultRegistration(registrationUUID string) error { + args := m.Called(registrationUUID) + return args.Error(0) +} + +// SetRegistrationByProject ... +func (m *MockScannerAPIController) SetRegistrationByProject(projectID int64, scannerID string) error { + args := m.Called(projectID, scannerID) + return args.Error(0) +} + +// GetRegistrationByProject ... +func (m *MockScannerAPIController) GetRegistrationByProject(projectID int64) (*scanner.Registration, error) { + args := m.Called(projectID) + s := args.Get(0) + if s == nil { + return nil, args.Error(1) + } + + return s.(*scanner.Registration), args.Error(1) +} + +// Ping ... +func (m *MockScannerAPIController) Ping(registration *scanner.Registration) (*v1.ScannerAdapterMetadata, error) { + args := m.Called(registration) + sam := args.Get(0) + if sam == nil { + return nil, args.Error(1) + } + + return sam.(*v1.ScannerAdapterMetadata), nil +} + +// GetMetadata ... +func (m *MockScannerAPIController) GetMetadata(registrationUUID string) (*v1.ScannerAdapterMetadata, error) { + args := m.Called(registrationUUID) + sam := args.Get(0) + if sam == nil { + return nil, args.Error(1) + } + + return sam.(*v1.ScannerAdapterMetadata), nil +} diff --git a/src/core/api/user.go b/src/core/api/user.go index a58095983..3372bdee8 100644 --- a/src/core/api/user.go +++ b/src/core/api/user.go @@ -17,6 +17,10 @@ package api import ( "errors" "fmt" + "net/http" + "regexp" + "strconv" + "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common/dao" "github.com/goharbor/harbor/src/common/models" @@ -25,9 +29,6 @@ import ( "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/common/utils/log" "github.com/goharbor/harbor/src/core/config" - "net/http" - "regexp" - "strconv" ) // UserAPI handles request to /api/users/{} @@ -51,7 +52,7 @@ type userSearch struct { Username string `json:"username"` } -type secretResp struct { +type secretReq struct { Secret string `json:"secret"` } @@ -221,12 +222,15 @@ func (ua *UserAPI) Search() { } query := &models.UserQuery{ Username: ua.GetString("username"), - Email: ua.GetString("email"), Pagination: &models.Pagination{ Page: page, Size: size, }, } + if len(query.Username) == 0 { + ua.SendBadRequestError(errors.New("username is required")) + return + } total, err := dao.GetTotalOfUsers(query) if err != nil { @@ -401,8 +405,8 @@ func (ua *UserAPI) ChangePassword() { return } - if len(req.NewPassword) == 0 { - ua.SendBadRequestError(errors.New("empty new_password")) + if err := validateSecret(req.NewPassword); err != nil { + ua.SendBadRequestError(err) return } @@ -416,20 +420,21 @@ func (ua *UserAPI) ChangePassword() { return } if changePwdOfOwn { - if user.Password != utils.Encrypt(req.OldPassword, user.Salt) { + if user.Password != utils.Encrypt(req.OldPassword, user.Salt, user.PasswordVersion) { log.Info("incorrect old_password") ua.SendForbiddenError(errors.New("incorrect old_password")) return } } - if user.Password == utils.Encrypt(req.NewPassword, user.Salt) { + if user.Password == utils.Encrypt(req.NewPassword, user.Salt, user.PasswordVersion) { ua.SendBadRequestError(errors.New("the new password can not be same with the old one")) return } updatedUser := models.User{ - UserID: ua.userID, - Password: req.NewPassword, + UserID: ua.userID, + Password: req.NewPassword, + PasswordVersion: user.PasswordVersion, } if err = dao.ChangeUserPassword(updatedUser); err != nil { ua.SendInternalServerError(fmt.Errorf("failed to change password of user %d: %v", ua.userID, err)) @@ -507,8 +512,8 @@ func (ua *UserAPI) ListUserPermissions() { return } -// GenCLISecret generates a new CLI secret and replace the old one -func (ua *UserAPI) GenCLISecret() { +// SetCLISecret handles request PUT /api/users/:id/cli_secret to update the CLI secret of the user +func (ua *UserAPI) SetCLISecret() { if ua.AuthMode != common.OIDCAuth { ua.SendPreconditionFailedError(errors.New("the auth mode has to be oidc auth")) return @@ -529,8 +534,17 @@ func (ua *UserAPI) GenCLISecret() { return } - sec := utils.GenerateRandomString() - encSec, err := utils.ReversibleEncrypt(sec, ua.secretKey) + s := &secretReq{} + if err := ua.DecodeJSONReq(s); err != nil { + ua.SendBadRequestError(err) + return + } + if err := validateSecret(s.Secret); err != nil { + ua.SendBadRequestError(err) + return + } + + encSec, err := utils.ReversibleEncrypt(s.Secret, ua.secretKey) if err != nil { log.Errorf("Failed to encrypt secret, error: %v", err) ua.SendInternalServerError(errors.New("failed to encrypt secret")) @@ -543,8 +557,6 @@ func (ua *UserAPI) GenCLISecret() { ua.SendInternalServerError(errors.New("failed to update secret in DB")) return } - ua.Data["json"] = secretResp{sec} - ua.ServeJSON() } func (ua *UserAPI) getOIDCUserInfo() (*models.OIDCUser, error) { @@ -583,12 +595,24 @@ func validate(user models.User) error { if utils.IsContainIllegalChar(user.Username, []string{",", "~", "#", "$", "%"}) { return fmt.Errorf("username contains illegal characters") } - if utils.IsIllegalLength(user.Password, 8, 20) { - return fmt.Errorf("password with illegal length") + + if err := validateSecret(user.Password); err != nil { + return err } + return commonValidate(user) } +func validateSecret(in string) error { + hasLower := regexp.MustCompile(`[a-z]`) + hasUpper := regexp.MustCompile(`[A-Z]`) + hasNumber := regexp.MustCompile(`[0-9]`) + if len(in) >= 8 && hasLower.MatchString(in) && hasUpper.MatchString(in) && hasNumber.MatchString(in) { + return nil + } + return errors.New("the password or secret must longer than 8 chars with at least 1 uppercase letter, 1 lowercase letter and 1 number") +} + // commonValidate validates email, realname, comment information when user register or change their profile func commonValidate(user models.User) error { diff --git a/src/core/api/user_test.go b/src/core/api/user_test.go index 88f35dd0d..530af0500 100644 --- a/src/core/api/user_test.go +++ b/src/core/api/user_test.go @@ -380,8 +380,8 @@ func buildChangeUserPasswordURL(id int) string { func TestUsersUpdatePassword(t *testing.T) { fmt.Println("Testing Update User Password") - oldPassword := "old_password" - newPassword := "new_password" + oldPassword := "old_Passw0rd" + newPassword := "new_Passw0rd" user01 := models.User{ Username: "user01_for_testing_change_password", @@ -515,7 +515,7 @@ func TestUsersUpdatePassword(t *testing.T) { method: http.MethodPut, url: buildChangeUserPasswordURL(user01.UserID), bodyJSON: &passwordReq{ - NewPassword: "another_new_password", + NewPassword: "another_new_Passw0rd", }, credential: admin, }, @@ -642,3 +642,13 @@ func TestUsersCurrentPermissions(t *testing.T) { assert.Nil(err) assert.Equal(int(403), httpStatusCode, "httpStatusCode should be 403") } + +func TestValidateSecret(t *testing.T) { + assert.NotNil(t, validateSecret("")) + assert.NotNil(t, validateSecret("12345678")) + assert.NotNil(t, validateSecret("passw0rd")) + assert.NotNil(t, validateSecret("PASSW0RD")) + assert.NotNil(t, validateSecret("Sh0rt")) + assert.Nil(t, validateSecret("Passw0rd")) + assert.Nil(t, validateSecret("Thisis1Valid_password")) +} diff --git a/src/core/auth/authenticator.go b/src/core/auth/authenticator.go index 48641b37b..46788ead4 100644 --- a/src/core/auth/authenticator.go +++ b/src/core/auth/authenticator.go @@ -230,12 +230,12 @@ func SearchAndOnBoardUser(username string) (int, error) { // SearchAndOnBoardGroup ... if altGroupName is not empty, take the altGroupName as groupName in harbor DB func SearchAndOnBoardGroup(groupKey, altGroupName string) (int, error) { userGroup, err := SearchGroup(groupKey) - if userGroup == nil { - return 0, ErrorGroupNotExist - } if err != nil { return 0, err } + if userGroup == nil { + return 0, ErrorGroupNotExist + } if userGroup != nil { err = OnBoardGroup(userGroup, altGroupName) } diff --git a/src/core/auth/db/db.go b/src/core/auth/db/db.go index dd9bdf1e5..405bf0d8a 100644 --- a/src/core/auth/db/db.go +++ b/src/core/auth/db/db.go @@ -15,6 +15,7 @@ package db import ( + "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common/dao" "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/core/auth" @@ -52,5 +53,5 @@ func (d *Auth) OnBoardUser(u *models.User) error { } func init() { - auth.Register("db_auth", &Auth{}) + auth.Register(common.DBAuth, &Auth{}) } diff --git a/src/core/auth/ldap/ldap.go b/src/core/auth/ldap/ldap.go index c5fd86d29..904aafc66 100644 --- a/src/core/auth/ldap/ldap.go +++ b/src/core/auth/ldap/ldap.go @@ -265,5 +265,5 @@ func (l *Auth) PostAuthenticate(u *models.User) error { } func init() { - auth.Register("ldap_auth", &Auth{}) + auth.Register(common.LDAPAuth, &Auth{}) } diff --git a/src/core/auth/oidc/oidc.go b/src/core/auth/oidc/oidc.go new file mode 100644 index 000000000..6f7398845 --- /dev/null +++ b/src/core/auth/oidc/oidc.go @@ -0,0 +1,35 @@ +package oidc + +import ( + "fmt" + "github.com/goharbor/harbor/src/common" + "github.com/goharbor/harbor/src/common/dao/group" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/core/auth" +) + +// Auth of OIDC mode only implements the funcs for onboarding group +type Auth struct { + auth.DefaultAuthenticateHelper +} + +// SearchGroup is skipped in OIDC mode, so it makes sure any group will be onboarded. +func (a *Auth) SearchGroup(groupKey string) (*models.UserGroup, error) { + return &models.UserGroup{ + GroupName: groupKey, + GroupType: common.OIDCGroupType, + }, nil +} + +// OnBoardGroup create user group entity in Harbor DB, altGroupName is not used. +func (a *Auth) OnBoardGroup(u *models.UserGroup, altGroupName string) error { + // if group name provided, on board the user group + if len(u.GroupName) == 0 || u.GroupType != common.OIDCGroupType { + return fmt.Errorf("invalid input group for OIDC mode: %v", *u) + } + return group.OnBoardUserGroup(u) +} + +func init() { + auth.Register(common.OIDCAuth, &Auth{}) +} diff --git a/src/core/auth/oidc/oidc_test.go b/src/core/auth/oidc/oidc_test.go new file mode 100644 index 000000000..cb2a13fc4 --- /dev/null +++ b/src/core/auth/oidc/oidc_test.go @@ -0,0 +1,31 @@ +package oidc + +import ( + "github.com/goharbor/harbor/src/common" + "github.com/goharbor/harbor/src/common/models" + "github.com/stretchr/testify/assert" + "os" + "testing" +) + +func TestMain(m *testing.M) { + retCode := m.Run() + os.Exit(retCode) +} + +func TestAuth_SearchGroup(t *testing.T) { + a := Auth{} + res, err := a.SearchGroup("grp") + assert.Nil(t, err) + assert.Equal(t, models.UserGroup{GroupName: "grp", GroupType: common.OIDCGroupType}, *res) +} + +func TestAuth_OnBoardGroup(t *testing.T) { + a := Auth{} + g1 := &models.UserGroup{GroupName: "", GroupType: common.OIDCGroupType} + err1 := a.OnBoardGroup(g1, "") + assert.NotNil(t, err1) + g2 := &models.UserGroup{GroupName: "group", GroupType: common.LDAPGroupType} + err2 := a.OnBoardGroup(g2, "") + assert.NotNil(t, err2) +} diff --git a/src/core/config/config.go b/src/core/config/config.go index b3808745d..f7dea7f8c 100755 --- a/src/core/config/config.go +++ b/src/core/config/config.go @@ -512,6 +512,7 @@ func OIDCSetting() (*models.OIDCSetting, error) { VerifyCert: cfgMgr.Get(common.OIDCVerifyCert).GetBool(), ClientID: cfgMgr.Get(common.OIDCCLientID).GetString(), ClientSecret: cfgMgr.Get(common.OIDCClientSecret).GetString(), + GroupsClaim: cfgMgr.Get(common.OIDCGroupsClaim).GetString(), RedirectURL: extEndpoint + common.OIDCCallbackPath, Scope: scope, }, nil diff --git a/src/core/config/config_test.go b/src/core/config/config_test.go index ae31c04bc..d10db7aa7 100644 --- a/src/core/config/config_test.go +++ b/src/core/config/config_test.go @@ -253,6 +253,7 @@ func TestOIDCSetting(t *testing.T) { common.OIDCEndpoint: "https://oidc.test", common.OIDCVerifyCert: "true", common.OIDCScope: "openid, profile", + common.OIDCGroupsClaim: "my_group", common.OIDCCLientID: "client", common.OIDCClientSecret: "secret", common.ExtEndpoint: "https://harbor.test", @@ -263,6 +264,7 @@ func TestOIDCSetting(t *testing.T) { assert.Equal(t, "test", v.Name) assert.Equal(t, "https://oidc.test", v.Endpoint) assert.True(t, v.VerifyCert) + assert.Equal(t, "my_group", v.GroupsClaim) assert.Equal(t, "client", v.ClientID) assert.Equal(t, "secret", v.ClientSecret) assert.Equal(t, "https://harbor.test/c/oidc/callback", v.RedirectURL) diff --git a/src/core/controllers/base.go b/src/core/controllers/base.go index 9dd0f18a2..714306a75 100644 --- a/src/core/controllers/base.go +++ b/src/core/controllers/base.go @@ -17,7 +17,7 @@ package controllers import ( "bytes" "context" - "github.com/goharbor/harbor/src/core/filter" + "github.com/goharbor/harbor/src/core/api" "html/template" "net" "net/http" @@ -36,13 +36,12 @@ import ( "github.com/goharbor/harbor/src/common/utils/log" "github.com/goharbor/harbor/src/core/auth" "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/core/filter" ) -const userKey = "user" - // CommonController handles request from UI that doesn't expect a page, such as /SwitchLanguage /logout ... type CommonController struct { - beego.Controller + api.BaseController i18n.Locale } @@ -51,6 +50,9 @@ func (cc *CommonController) Render() error { return nil } +// Prepare overwrites the Prepare func in api.BaseController to ignore unnecessary steps +func (cc *CommonController) Prepare() {} + type messageDetail struct { Hint string URL string @@ -111,7 +113,7 @@ func (cc *CommonController) Login() { if user == nil { cc.CustomAbort(http.StatusUnauthorized, "") } - cc.SetSession(userKey, *user) + cc.PopulateUserSession(*user) } // LogOut Habor UI @@ -252,11 +254,10 @@ func (cc *CommonController) ResetPassword() { cc.CustomAbort(http.StatusForbidden, http.StatusText(http.StatusForbidden)) } - password := cc.GetString("password") + rawPassword := cc.GetString("password") - if password != "" { - user.Password = password - err = dao.ResetUserPassword(*user) + if rawPassword != "" { + err = dao.ResetUserPassword(*user, rawPassword) if err != nil { log.Errorf("Error occurred in ResetUserPassword: %v", err) cc.CustomAbort(http.StatusInternalServerError, "Internal error.") diff --git a/src/core/controllers/oidc.go b/src/core/controllers/oidc.go index 3bc5d1e35..c43e7ffca 100644 --- a/src/core/controllers/oidc.go +++ b/src/core/controllers/oidc.go @@ -17,6 +17,7 @@ package controllers import ( "encoding/json" "fmt" + "github.com/goharbor/harbor/src/common/dao/group" "net/http" "strings" @@ -50,12 +51,13 @@ type oidcUserData struct { Subject string `json:"sub"` Username string `json:"name"` Email string `json:"email"` + GroupIDs []int `json:"group_ids"` } // Prepare include public code path for call request handler of OIDCController func (oc *OIDCController) Prepare() { if mode, _ := config.AuthMode(); mode != common.OIDCAuth { - oc.SendPreconditionFailedError(fmt.Errorf("Auth Mode: %s is not OIDC based", mode)) + oc.SendPreconditionFailedError(fmt.Errorf("auth mode: %s is not OIDC based", mode)) return } } @@ -114,6 +116,10 @@ func (oc *OIDCController) Callback() { oc.SendInternalServerError(err) return } + d.GroupIDs, err = group.GetGroupIDByGroupName(oidc.GroupsFromToken(idToken), common.OIDCGroupType) + if err != nil { + log.Warningf("Failed to get group ID list, due to error: %v, setting empty list into user model.", err) + } ouDataStr, err := json.Marshal(d) if err != nil { oc.SendInternalServerError(err) @@ -137,6 +143,7 @@ func (oc *OIDCController) Callback() { oc.Controller.Redirect(fmt.Sprintf("/oidc-onboard?username=%s", strings.Replace(d.Username, " ", "_", -1)), http.StatusFound) } else { + u.GroupIDs = d.GroupIDs oidcUser, err := dao.GetOIDCUserByUserID(u.UserID) if err != nil { oc.SendInternalServerError(err) @@ -148,7 +155,7 @@ func (oc *OIDCController) Callback() { oc.SendInternalServerError(err) return } - oc.SetSession(userKey, *u) + oc.PopulateUserSession(*u) oc.Controller.Redirect("/", http.StatusFound) } } @@ -203,6 +210,7 @@ func (oc *OIDCController) Onboard() { Username: username, Realname: d.Username, Email: email, + GroupIDs: d.GroupIDs, OIDCUserMeta: &oidcUser, Comment: oidcUserComment, } @@ -219,8 +227,8 @@ func (oc *OIDCController) Onboard() { } user.OIDCUserMeta = nil - oc.SetSession(userKey, user) oc.DelSession(userInfoKey) + oc.PopulateUserSession(user) } func secretAndToken(tokenBytes []byte) (string, string, error) { diff --git a/src/core/filter/security.go b/src/core/filter/security.go index 34f7310a5..b56429252 100644 --- a/src/core/filter/security.go +++ b/src/core/filter/security.go @@ -25,6 +25,7 @@ import ( "github.com/docker/distribution/reference" "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/dao/group" "github.com/goharbor/harbor/src/common/models" secstore "github.com/goharbor/harbor/src/common/secret" "github.com/goharbor/harbor/src/common/security" @@ -42,6 +43,7 @@ import ( "strings" "github.com/goharbor/harbor/src/pkg/authproxy" + "github.com/goharbor/harbor/src/pkg/robot" ) // ContextValueKey for content value @@ -193,7 +195,8 @@ func (r *robotAuthReqCtxModifier) Modify(ctx *beegoctx.Context) bool { return false } // Do authn for robot account, as Harbor only stores the token ID, just validate the ID and disable. - robot, err := dao.GetRobotByID(htk.Claims.(*token.RobotClaims).TokenID) + ctr := robot.RobotCtr + robot, err := ctr.GetRobotAccount(htk.Claims.(*token.RobotClaims).TokenID) if err != nil { log.Errorf("failed to get robot %s: %v", robotName, err) return false @@ -284,6 +287,10 @@ func (it *idTokenReqCtxModifier) Modify(ctx *beegoctx.Context) bool { log.Warning("User matches token's claims is not onboarded.") return false } + u.GroupIDs, err = group.GetGroupIDByGroupName(oidc.GroupsFromToken(claims), common.OIDCGroupType) + if err != nil { + log.Errorf("Failed to get group ID list for OIDC user: %s, error: %v", u.Username, err) + } pm := config.GlobalProjectMgr sc := local.NewSecurityContext(u, pm) setSecurCtxAndPM(ctx.Request, sc, pm) @@ -457,7 +464,7 @@ func (s *sessionReqCtxModifier) Modify(ctx *beegoctx.Context) bool { if ou != nil { // If user does not have OIDC metadata, it means he is not onboarded via OIDC authn, // so we can skip checking the token. if err := oidc.VerifyAndPersistToken(ctx.Request.Context(), ou); err != nil { - log.Errorf("Failed to verify secret, error: %v", err) + log.Errorf("Failed to verify token, error: %v", err) return false } } diff --git a/src/core/filter/security_test.go b/src/core/filter/security_test.go index a74d2fa12..5c23dd7ec 100644 --- a/src/core/filter/security_test.go +++ b/src/core/filter/security_test.go @@ -112,6 +112,7 @@ func TestConfigCtxModifier(t *testing.T) { common.OIDCEndpoint: "https://accounts.google.com", common.OIDCVerifyCert: "true", common.OIDCScope: "openid, profile, offline_access", + common.OIDCGroupsClaim: "groups", common.OIDCCLientID: "client", common.OIDCClientSecret: "secret", common.ExtEndpoint: "https://harbor.test", diff --git a/src/core/main.go b/src/core/main.go index 837d4be28..bf485275f 100755 --- a/src/core/main.go +++ b/src/core/main.go @@ -35,6 +35,7 @@ import ( _ "github.com/goharbor/harbor/src/core/auth/authproxy" _ "github.com/goharbor/harbor/src/core/auth/db" _ "github.com/goharbor/harbor/src/core/auth/ldap" + _ "github.com/goharbor/harbor/src/core/auth/oidc" _ "github.com/goharbor/harbor/src/core/auth/uaa" quota "github.com/goharbor/harbor/src/core/api/quota" @@ -84,17 +85,22 @@ func updateInitPassword(userID int, password string) error { // Quota migration func quotaSync() error { - usages, err := dao.ListQuotaUsages() - if err != nil { - log.Errorf("list quota usage error, %v", err) - return err - } projects, err := dao.GetProjects(nil) if err != nil { log.Errorf("list project error, %v", err) return err } + var pids []string + for _, project := range projects { + pids = append(pids, strconv.FormatInt(project.ProjectID, 10)) + } + usages, err := dao.ListQuotaUsages(&models.QuotaUsageQuery{Reference: "project", ReferenceIDs: pids}) + if err != nil { + log.Errorf("list quota usage error, %v", err) + return err + } + // The condition handles these two cases: // 1, len(project) > 1 && len(usages) == 1. existing projects without usage, as we do always has 'library' usage in DB. // 2, migration fails at the phase of inserting usage into DB, and parts of them are inserted successfully. diff --git a/src/core/middlewares/sizequota/builder.go b/src/core/middlewares/sizequota/builder.go index b3085f4d6..66cc158cf 100644 --- a/src/core/middlewares/sizequota/builder.go +++ b/src/core/middlewares/sizequota/builder.go @@ -36,7 +36,7 @@ var ( } ) -// blobStreamUploadBuilder interceptor for PATCH /v2//blobs/uploads/ +// blobStreamUploadBuilder interceptor builder for PATCH /v2//blobs/uploads/ type blobStreamUploadBuilder struct{} func (*blobStreamUploadBuilder) Build(req *http.Request) (interceptor.Interceptor, error) { @@ -48,9 +48,13 @@ func (*blobStreamUploadBuilder) Build(req *http.Request) (interceptor.Intercepto uuid := s[2] onResponse := func(w http.ResponseWriter, req *http.Request) { + if !config.QuotaPerProjectEnable() { + return + } + size, err := parseUploadedBlobSize(w) if err != nil { - log.Errorf("failed to parse uploaded blob size for upload %s", uuid) + log.Errorf("failed to parse uploaded blob size for upload %s, error: %v", uuid, err) return } diff --git a/src/core/middlewares/sizequota/util.go b/src/core/middlewares/sizequota/util.go index 965b96c5b..26d1c8e0c 100644 --- a/src/core/middlewares/sizequota/util.go +++ b/src/core/middlewares/sizequota/util.go @@ -42,9 +42,16 @@ func parseUploadedBlobSize(w http.ResponseWriter) (int64, error) { // Range: Range indicating the current progress of the upload. // https://github.com/opencontainers/distribution-spec/blob/master/spec.md#get-blob-upload r := w.Header().Get("Range") + if r == "" { + return 0, errors.New("range header not found") + } - end := strings.Split(r, "-")[1] - size, err := strconv.ParseInt(end, 10, 64) + parts := strings.SplitN(r, "-", 2) + if len(parts) != 2 { + return 0, fmt.Errorf("range header bad value: %s", r) + } + + size, err := strconv.ParseInt(parts[1], 10, 64) if err != nil { return 0, err } diff --git a/src/core/middlewares/sizequota/util_test.go b/src/core/middlewares/sizequota/util_test.go new file mode 100644 index 000000000..93be6fcd9 --- /dev/null +++ b/src/core/middlewares/sizequota/util_test.go @@ -0,0 +1,59 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sizequota + +import ( + "net/http" + "net/http/httptest" + "testing" +) + +func Test_parseUploadedBlobSize(t *testing.T) { + writer := func(header string) http.ResponseWriter { + rr := httptest.NewRecorder() + if header != "" { + rr.Header().Add("Range", header) + } + return rr + } + type args struct { + w http.ResponseWriter + } + tests := []struct { + name string + args args + want int64 + wantErr bool + }{ + {"success", args{writer("0-99")}, 100, false}, + {"ranage header not found", args{writer("")}, 0, true}, + {"ranage header bad value", args{writer("0")}, 0, true}, + {"ranage header bad value", args{writer("0-")}, 0, true}, + {"ranage header bad value", args{writer("0-a")}, 0, true}, + {"ranage header bad value", args{writer("0-1-2")}, 0, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := parseUploadedBlobSize(tt.args.w) + if (err != nil) != tt.wantErr { + t.Errorf("parseUploadedBlobSize() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("parseUploadedBlobSize() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/src/core/middlewares/util/util.go b/src/core/middlewares/util/util.go index 8a9624291..85a32ab4b 100644 --- a/src/core/middlewares/util/util.go +++ b/src/core/middlewares/util/util.go @@ -40,7 +40,7 @@ import ( "github.com/goharbor/harbor/src/core/config" "github.com/goharbor/harbor/src/core/promgr" "github.com/goharbor/harbor/src/pkg/scan/whitelist" - "github.com/opencontainers/go-digest" + digest "github.com/opencontainers/go-digest" ) type contextKey string @@ -346,6 +346,10 @@ func (pc PmsPolicyChecker) ContentTrustEnabled(name string) bool { log.Errorf("Unexpected error when getting the project, error: %v", err) return true } + if project == nil { + log.Debugf("project %s not found", name) + return false + } return project.ContentTrustEnabled() } diff --git a/src/core/middlewares/util/util_test.go b/src/core/middlewares/util/util_test.go index 2e6c9d609..db2f8960a 100644 --- a/src/core/middlewares/util/util_test.go +++ b/src/core/middlewares/util/util_test.go @@ -32,7 +32,7 @@ import ( notarytest "github.com/goharbor/harbor/src/common/utils/notary/test" testutils "github.com/goharbor/harbor/src/common/utils/test" "github.com/goharbor/harbor/src/core/config" - "github.com/opencontainers/go-digest" + digest "github.com/opencontainers/go-digest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -187,6 +187,9 @@ func TestPMSPolicyChecker(t *testing.T) { assert.True(t, projectVulnerableEnabled) assert.Equal(t, projectVulnerableSeverity, models.SevLow) assert.Empty(t, wl.Items) + + contentTrustFlag = GetPolicyChecker().ContentTrustEnabled("non_exist_project") + assert.False(t, contentTrustFlag) } func TestCopyResp(t *testing.T) { diff --git a/src/core/router.go b/src/core/router.go index 7e01b934e..6f38680b9 100755 --- a/src/core/router.go +++ b/src/core/router.go @@ -52,7 +52,7 @@ func initRouters() { beego.Router("/api/users/:id([0-9]+)/password", &api.UserAPI{}, "put:ChangePassword") beego.Router("/api/users/:id/permissions", &api.UserAPI{}, "get:ListUserPermissions") beego.Router("/api/users/:id/sysadmin", &api.UserAPI{}, "put:ToggleUserAdminRole") - beego.Router("/api/users/:id/gen_cli_secret", &api.UserAPI{}, "post:GenCLISecret") + beego.Router("/api/users/:id/cli_secret", &api.UserAPI{}, "put:SetCLISecret") beego.Router("/api/usergroups/?:ugid([0-9]+)", &api.UserGroupAPI{}) beego.Router("/api/ldap/ping", &api.LdapAPI{}, "post:Ping") beego.Router("/api/ldap/users/search", &api.LdapAPI{}, "get:Search") @@ -87,12 +87,9 @@ func initRouters() { beego.Router("/api/repositories/*/tags/:tag/labels", &api.RepositoryLabelAPI{}, "get:GetOfImage;post:AddToImage") beego.Router("/api/repositories/*/tags/:tag/labels/:id([0-9]+)", &api.RepositoryLabelAPI{}, "delete:RemoveFromImage") beego.Router("/api/repositories/*/tags", &api.RepositoryAPI{}, "get:GetTags;post:Retag") - beego.Router("/api/repositories/*/tags/:tag/scan", &api.RepositoryAPI{}, "post:ScanImage") - beego.Router("/api/repositories/*/tags/:tag/vulnerability/details", &api.RepositoryAPI{}, "Get:VulnerabilityDetails") beego.Router("/api/repositories/*/tags/:tag/manifest", &api.RepositoryAPI{}, "get:GetManifests") beego.Router("/api/repositories/*/signatures", &api.RepositoryAPI{}, "get:GetSignatures") beego.Router("/api/repositories/top", &api.RepositoryAPI{}, "get:GetTopRepos") - beego.Router("/api/jobs/scan/:id([0-9]+)/log", &api.ScanJobAPI{}, "get:GetLog") beego.Router("/api/system/gc", &api.GCAPI{}, "get:List") beego.Router("/api/system/gc/:id", &api.GCAPI{}, "get:GetGC") @@ -121,6 +118,9 @@ func initRouters() { beego.Router("/api/projects/:pid([0-9]+)/webhook/jobs/", &api.NotificationJobAPI{}, "get:List") + beego.Router("/api/projects/:pid([0-9]+)/immutabletagrules", &api.ImmutableTagRuleAPI{}, "get:List;post:Post") + beego.Router("/api/projects/:pid([0-9]+)/immutabletagrules/:id([0-9]+)", &api.ImmutableTagRuleAPI{}) + beego.Router("/api/internal/configurations", &api.ConfigAPI{}, "get:GetInternalConfig;put:Put") beego.Router("/api/configurations", &api.ConfigAPI{}, "get:Get;put:Put") beego.Router("/api/statistics", &api.StatisticAPI{}) @@ -139,7 +139,6 @@ func initRouters() { // external service that hosted on harbor process: beego.Router("/service/notifications", ®istry.NotificationHandler{}) - beego.Router("/service/notifications/jobs/scan/:id([0-9]+)", &jobs.Handler{}, "post:HandleScan") beego.Router("/service/notifications/jobs/adminjob/:id([0-9]+)", &admin.Handler{}, "post:HandleAdminJob") beego.Router("/service/notifications/jobs/replication/:id([0-9]+)", &jobs.Handler{}, "post:HandleReplicationScheduleJob") beego.Router("/service/notifications/jobs/replication/task/:id([0-9]+)", &jobs.Handler{}, "post:HandleReplicationTask") @@ -164,6 +163,8 @@ func initRouters() { beego.Router("/api/retentions/:id/executions", &api.RetentionAPI{}, "get:ListRetentionExecs") beego.Router("/api/retentions/:id/executions/:eid/tasks", &api.RetentionAPI{}, "get:ListRetentionExecTasks") beego.Router("/api/retentions/:id/executions/:eid/tasks/:tid", &api.RetentionAPI{}, "get:GetRetentionExecTaskLog") + beego.Router("/api/projects/:pid([0-9]+)/immutabletagrules", &api.ImmutableTagRuleAPI{}, "get:List;post:Post") + beego.Router("/api/projects/:pid([0-9]+)/immutabletagrules/:id([0-9]+)", &api.ImmutableTagRuleAPI{}) beego.Router("/v2/*", &controllers.RegistryProxy{}, "*:Handle") @@ -192,6 +193,25 @@ func initRouters() { beego.Router("/api/chartrepo/:repo/charts/:name/:version/labels/:id([0-9]+)", chartLabelAPIType, "delete:RemoveLabel") } + // Add routes for plugin scanner management + scannerAPI := &api.ScannerAPI{} + beego.Router("/api/scanners", scannerAPI, "post:Create;get:List") + beego.Router("/api/scanners/:uuid", scannerAPI, "get:Get;delete:Delete;put:Update;patch:SetAsDefault") + beego.Router("/api/scanners/:uuid/metadata", scannerAPI, "get:Metadata") + beego.Router("/api/scanners/ping", scannerAPI, "post:Ping") + + // Add routes for project level scanner + proScannerAPI := &api.ProjectScannerAPI{} + beego.Router("/api/projects/:pid([0-9]+)/scanner", proScannerAPI, "get:GetProjectScanner;put:SetProjectScanner") + + // Add routes for scan + scanAPI := &api.ScanAPI{} + beego.Router("/api/repositories/*/tags/:tag/scan", scanAPI, "post:Scan;get:Report") + beego.Router("/api/repositories/*/tags/:tag/scan/:uuid/log", scanAPI, "get:Log") + + // Handle scan hook + beego.Router("/service/notifications/jobs/scan/:uuid", &jobs.Handler{}, "post:HandleScan") + // Error pages beego.ErrorController(&controllers.ErrorController{}) diff --git a/src/core/service/notifications/jobs/handler.go b/src/core/service/notifications/jobs/handler.go index b383400b4..a1599ddc1 100755 --- a/src/core/service/notifications/jobs/handler.go +++ b/src/core/service/notifications/jobs/handler.go @@ -18,7 +18,8 @@ import ( "encoding/json" "time" - "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/pkg/scan/api/scan" + "github.com/goharbor/harbor/src/common/job" "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/common/utils/log" @@ -49,29 +50,36 @@ type Handler struct { rawStatus string checkIn string revision int64 + trackID string + change *jjob.StatusChange } // Prepare ... func (h *Handler) Prepare() { - id, err := h.GetInt64FromPath(":id") - if err != nil { - log.Errorf("Failed to get job ID, error: %v", err) - // Avoid job service from resending... - h.Abort("200") - return + h.trackID = h.GetStringFromPath(":uuid") + if len(h.trackID) == 0 { + id, err := h.GetInt64FromPath(":id") + if err != nil { + log.Errorf("Failed to get job ID, error: %v", err) + // Avoid job service from resending... + h.Abort("200") + return + } + h.id = id } - h.id = id + var data jjob.StatusChange - err = json.Unmarshal(h.Ctx.Input.CopyBody(1<<32), &data) + err := json.Unmarshal(h.Ctx.Input.CopyBody(1<<32), &data) if err != nil { - log.Errorf("Failed to decode job status change, job ID: %d, error: %v", id, err) + log.Errorf("Failed to decode job status change with error: %v", err) h.Abort("200") return } + h.change = &data h.rawStatus = data.Status status, ok := statusMap[data.Status] if !ok { - log.Debugf("drop the job status update event: job id-%d, status-%s", id, status) + log.Debugf("drop the job status update event: job id-%d/track id-%s, status-%s", h.id, h.trackID, status) h.Abort("200") return } @@ -84,7 +92,8 @@ func (h *Handler) Prepare() { // HandleScan handles the webhook of scan job func (h *Handler) HandleScan() { - log.Debugf("received san job status update event: job-%d, status-%s", h.id, h.status) + log.Debugf("received san job status update event: job-%d, status-%s, track_id-%s", h.id, h.status, h.trackID) + // Trigger image scan webhook event only for JobFinished and JobError status if h.status == models.JobFinished || h.status == models.JobError { e := &event.Event{} @@ -101,7 +110,7 @@ func (h *Handler) HandleScan() { } } - if err := dao.UpdateScanJobStatus(h.id, h.status); err != nil { + if err := scan.DefaultController.HandleJobHooks(h.trackID, h.change); err != nil { log.Errorf("Failed to update job status, id: %d, status: %s", h.id, h.status) h.SendInternalServerError(err) return diff --git a/src/go.mod b/src/go.mod index fdc8554c8..11b8d578a 100644 --- a/src/go.mod +++ b/src/go.mod @@ -45,6 +45,7 @@ require ( github.com/google/certificate-transparency-go v1.0.21 // indirect github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135 // indirect github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect + github.com/google/uuid v1.1.1 github.com/gorilla/handlers v1.3.0 github.com/gorilla/mux v1.6.2 github.com/graph-gophers/dataloader v5.0.0+incompatible diff --git a/src/go.sum b/src/go.sum index 5ac4284ff..aa24c1d75 100644 --- a/src/go.sum +++ b/src/go.sum @@ -140,6 +140,8 @@ github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeq github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= diff --git a/src/jobservice/api/authenticator.go b/src/jobservice/api/authenticator.go index d75413683..cced73415 100644 --- a/src/jobservice/api/authenticator.go +++ b/src/jobservice/api/authenticator.go @@ -56,7 +56,7 @@ func (sa *SecretAuthenticator) DoAuth(req *http.Request) error { } if !strings.HasPrefix(h, secretPrefix) { - return fmt.Errorf("'%s' should start with '%s' but got '%s' now", authHeader, secretPrefix, h) + return fmt.Errorf("'%s' should start with '%s'", authHeader, secretPrefix) } secret := strings.TrimSpace(strings.TrimPrefix(h, secretPrefix)) diff --git a/src/jobservice/core/controller.go b/src/jobservice/core/controller.go index ea018e53a..bbc2f1220 100644 --- a/src/jobservice/core/controller.go +++ b/src/jobservice/core/controller.go @@ -16,17 +16,17 @@ package core import ( "fmt" - "github.com/goharbor/harbor/src/jobservice/mgt" - "github.com/pkg/errors" - "github.com/goharbor/harbor/src/jobservice/logger" + "github.com/pkg/errors" + "github.com/robfig/cron" "github.com/goharbor/harbor/src/jobservice/common/query" "github.com/goharbor/harbor/src/jobservice/common/utils" "github.com/goharbor/harbor/src/jobservice/errs" "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/jobservice/logger" + "github.com/goharbor/harbor/src/jobservice/mgt" "github.com/goharbor/harbor/src/jobservice/worker" - "github.com/robfig/cron" ) // basicController implement the core interface and provides related job handle methods. diff --git a/src/jobservice/job/impl/replication/replication.go b/src/jobservice/job/impl/replication/replication.go index e341313fe..4cdb63acf 100644 --- a/src/jobservice/job/impl/replication/replication.go +++ b/src/jobservice/job/impl/replication/replication.go @@ -42,6 +42,10 @@ import ( _ "github.com/goharbor/harbor/src/replication/adapter/azurecr" // register the AliACR adapter _ "github.com/goharbor/harbor/src/replication/adapter/aliacr" + // register the Jfrog Artifactory adapter + _ "github.com/goharbor/harbor/src/replication/adapter/jfrog" + // register the Quay.io adapter + _ "github.com/goharbor/harbor/src/replication/adapter/quayio" // register the Helm Hub adapter _ "github.com/goharbor/harbor/src/replication/adapter/helmhub" // register the GitLab adapter diff --git a/src/jobservice/job/impl/replication/replication_test.go b/src/jobservice/job/impl/replication/replication_test.go index 8abcd65dc..81c8d36bc 100644 --- a/src/jobservice/job/impl/replication/replication_test.go +++ b/src/jobservice/job/impl/replication/replication_test.go @@ -88,10 +88,10 @@ func (f *fakedTransfer) Transfer(src *model.Resource, dst *model.Resource) error } func TestRun(t *testing.T) { - err := transfer.RegisterFactory("res", fakedTransferFactory) + err := transfer.RegisterFactory("art", fakedTransferFactory) require.Nil(t, err) params := map[string]interface{}{ - "src_resource": `{"type":"res"}`, + "src_resource": `{"type":"art"}`, "dst_resource": `{}`, } rep := &Replication{} diff --git a/src/jobservice/logger/backend/file_logger.go b/src/jobservice/logger/backend/file_logger.go index 0dc454bbc..53834ade7 100644 --- a/src/jobservice/logger/backend/file_logger.go +++ b/src/jobservice/logger/backend/file_logger.go @@ -16,7 +16,7 @@ type FileLogger struct { // NewFileLogger crates a new file logger // nil might be returned func NewFileLogger(level string, logPath string, depth int) (*FileLogger, error) { - f, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY, 0644) + f, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY, 0600) if err != nil { return nil, err } diff --git a/src/jobservice/logger/base_test.go b/src/jobservice/logger/base_test.go index 7ce373dba..45c638d58 100644 --- a/src/jobservice/logger/base_test.go +++ b/src/jobservice/logger/base_test.go @@ -12,6 +12,13 @@ import ( "github.com/goharbor/harbor/src/jobservice/logger/backend" ) +const ( + fakeLogFile = "f00000000000000000000000.log" + fakeLogID = "f00000000000000000000000" + fakeJobID = "f00000000000000000000001" + fakeJobID2 = "f00000000000000000000002" +) + // Test one single std logger func TestGetLoggerSingleStd(t *testing.T) { l, err := GetLogger(BackendOption("STD_OUTPUT", "DEBUG", nil)) @@ -48,7 +55,7 @@ func TestGetLoggerSingleFile(t *testing.T) { lSettings := map[string]interface{}{} lSettings["base_dir"] = os.TempDir() - lSettings["filename"] = fmt.Sprintf("%s.log", "fake_job_ID") + lSettings["filename"] = fmt.Sprintf("%s.log", fakeJobID) defer func() { if err := os.Remove(path.Join(os.TempDir(), lSettings["filename"].(string))); err != nil { t.Error(err) @@ -67,7 +74,7 @@ func TestGetLoggerSingleFile(t *testing.T) { func TestGetLoggersMulti(t *testing.T) { lSettings := map[string]interface{}{} lSettings["base_dir"] = os.TempDir() - lSettings["filename"] = fmt.Sprintf("%s.log", "fake_job_ID2") + lSettings["filename"] = fmt.Sprintf("%s.log", fakeJobID2) defer func() { if err := os.Remove(path.Join(os.TempDir(), lSettings["filename"].(string))); err != nil { t.Error(err) @@ -142,7 +149,7 @@ func TestGetGetter(t *testing.T) { t.Fatal(err) } - logFile := path.Join(os.TempDir(), "fake_log_file.log") + logFile := path.Join(os.TempDir(), fakeLogFile) if err := ioutil.WriteFile(logFile, []byte("hello log getter"), 0644); err != nil { t.Fatal(err) } @@ -152,7 +159,7 @@ func TestGetGetter(t *testing.T) { } }() - data, err := g.Retrieve("fake_log_file") + data, err := g.Retrieve(fakeLogID) if err != nil { t.Error(err) } diff --git a/src/jobservice/logger/getter/file_getter.go b/src/jobservice/logger/getter/file_getter.go index cc7faffad..3838964b4 100644 --- a/src/jobservice/logger/getter/file_getter.go +++ b/src/jobservice/logger/getter/file_getter.go @@ -1,6 +1,7 @@ package getter import ( + "encoding/hex" "errors" "fmt" "io/ioutil" @@ -23,8 +24,12 @@ func NewFileGetter(baseDir string) *FileGetter { // Retrieve implements @Interface.Retrieve func (fg *FileGetter) Retrieve(logID string) ([]byte, error) { - if len(logID) == 0 { - return nil, errors.New("empty log identify") + if len(logID) != 24 { + return nil, errors.New("invalid length of log identify") + } + + if _, err := hex.DecodeString(logID); err != nil { + return nil, errors.New("invalid log identify") } fPath := path.Join(fg.baseDir, fmt.Sprintf("%s.log", logID)) diff --git a/src/jobservice/logger/getter/file_getter_test.go b/src/jobservice/logger/getter/file_getter_test.go index 9053819a3..3685d9874 100644 --- a/src/jobservice/logger/getter/file_getter_test.go +++ b/src/jobservice/logger/getter/file_getter_test.go @@ -9,10 +9,16 @@ import ( "github.com/goharbor/harbor/src/jobservice/errs" ) +const ( + newLogFileName = "30dbf28152f361ba57f95f84.log" + newLogFileID = "30dbf28152f361ba57f95f84" + nonExistFileID = "f00000000000000000000000" +) + // Test the log data getter func TestLogDataGetter(t *testing.T) { - fakeLog := path.Join(os.TempDir(), "TestLogDataGetter.log") - if err := ioutil.WriteFile(fakeLog, []byte("hello"), 0644); err != nil { + fakeLog := path.Join(os.TempDir(), newLogFileName) + if err := ioutil.WriteFile(fakeLog, []byte("hello"), 0600); err != nil { t.Fatal(err) } defer func() { @@ -22,7 +28,7 @@ func TestLogDataGetter(t *testing.T) { }() fg := NewFileGetter(os.TempDir()) - if _, err := fg.Retrieve("not-existing"); err != nil { + if _, err := fg.Retrieve(nonExistFileID); err != nil { if !errs.IsObjectNotFoundError(err) { t.Error("expect object not found error but got other error") } @@ -30,7 +36,7 @@ func TestLogDataGetter(t *testing.T) { t.Error("expect non nil error but got nil") } - data, err := fg.Retrieve("TestLogDataGetter") + data, err := fg.Retrieve(newLogFileID) if err != nil { t.Error(err) } diff --git a/src/jobservice/runtime/bootstrap.go b/src/jobservice/runtime/bootstrap.go index 9a2bdd41f..78c657605 100644 --- a/src/jobservice/runtime/bootstrap.go +++ b/src/jobservice/runtime/bootstrap.go @@ -23,9 +23,6 @@ import ( "syscall" "time" - "github.com/gomodule/redigo/redis" - "github.com/pkg/errors" - "github.com/goharbor/harbor/src/jobservice/api" "github.com/goharbor/harbor/src/jobservice/common/utils" "github.com/goharbor/harbor/src/jobservice/config" @@ -45,7 +42,10 @@ import ( "github.com/goharbor/harbor/src/jobservice/worker" "github.com/goharbor/harbor/src/jobservice/worker/cworker" "github.com/goharbor/harbor/src/pkg/retention" + sc "github.com/goharbor/harbor/src/pkg/scan" "github.com/goharbor/harbor/src/pkg/scheduler" + "github.com/gomodule/redigo/redis" + "github.com/pkg/errors" ) const ( @@ -242,7 +242,7 @@ func (bs *Bootstrap) loadAndRunRedisWorkerPool( // Only for debugging and testing purpose job.SampleJob: (*sample.Job)(nil), // Functional jobs - job.ImageScanJob: (*scan.ClairJob)(nil), + job.ImageScanJob: (*sc.Job)(nil), job.ImageScanAllJob: (*scan.All)(nil), job.ImageGC: (*gc.GarbageCollector)(nil), job.Replication: (*replication.Replication)(nil), diff --git a/src/pkg/retention/res/candidate.go b/src/pkg/art/candidate.go similarity index 99% rename from src/pkg/retention/res/candidate.go rename to src/pkg/art/candidate.go index 15f5e8088..f44e22b99 100644 --- a/src/pkg/retention/res/candidate.go +++ b/src/pkg/art/candidate.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package res +package art import ( "encoding/base64" diff --git a/src/pkg/retention/res/result.go b/src/pkg/art/result.go similarity index 98% rename from src/pkg/retention/res/result.go rename to src/pkg/art/result.go index be91be04a..43d09b29d 100644 --- a/src/pkg/retention/res/result.go +++ b/src/pkg/art/result.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package res +package art // Result keeps the action result type Result struct { diff --git a/src/pkg/retention/res/selector.go b/src/pkg/art/selector.go similarity index 98% rename from src/pkg/retention/res/selector.go rename to src/pkg/art/selector.go index de0d34836..4e7bbcdb0 100644 --- a/src/pkg/retention/res/selector.go +++ b/src/pkg/art/selector.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package res +package art // Selector is used to filter the inputting list type Selector interface { diff --git a/src/pkg/retention/res/selectors/doublestar/selector.go b/src/pkg/art/selectors/doublestar/selector.go similarity index 92% rename from src/pkg/retention/res/selectors/doublestar/selector.go rename to src/pkg/art/selectors/doublestar/selector.go index fcbb628b9..274dae730 100644 --- a/src/pkg/retention/res/selectors/doublestar/selector.go +++ b/src/pkg/art/selectors/doublestar/selector.go @@ -16,7 +16,7 @@ package doublestar import ( "github.com/bmatcuk/doublestar" - "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/goharbor/harbor/src/pkg/art" ) const ( @@ -46,7 +46,7 @@ type selector struct { } // Select candidates by regular expressions -func (s *selector) Select(artifacts []*res.Candidate) (selected []*res.Candidate, err error) { +func (s *selector) Select(artifacts []*art.Candidate) (selected []*art.Candidate, err error) { value := "" excludes := false @@ -86,7 +86,7 @@ func (s *selector) Select(artifacts []*res.Candidate) (selected []*res.Candidate } // New is factory method for doublestar selector -func New(decoration string, pattern string) res.Selector { +func New(decoration string, pattern string) art.Selector { return &selector{ decoration: decoration, pattern: pattern, diff --git a/src/pkg/retention/res/selectors/doublestar/selector_test.go b/src/pkg/art/selectors/doublestar/selector_test.go similarity index 96% rename from src/pkg/retention/res/selectors/doublestar/selector_test.go rename to src/pkg/art/selectors/doublestar/selector_test.go index 23c8dd377..f511f8ebe 100644 --- a/src/pkg/retention/res/selectors/doublestar/selector_test.go +++ b/src/pkg/art/selectors/doublestar/selector_test.go @@ -16,7 +16,7 @@ package doublestar import ( "fmt" - "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/goharbor/harbor/src/pkg/art" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -28,7 +28,7 @@ import ( type RegExpSelectorTestSuite struct { suite.Suite - artifacts []*res.Candidate + artifacts []*art.Candidate } // TestRegExpSelector is entrance for RegExpSelectorTestSuite @@ -38,13 +38,13 @@ func TestRegExpSelector(t *testing.T) { // SetupSuite to do preparation work func (suite *RegExpSelectorTestSuite) SetupSuite() { - suite.artifacts = []*res.Candidate{ + suite.artifacts = []*art.Candidate{ { NamespaceID: 1, Namespace: "library", Repository: "harbor", Tag: "latest", - Kind: res.Image, + Kind: art.Image, PushedTime: time.Now().Unix() - 3600, PulledTime: time.Now().Unix(), CreationTime: time.Now().Unix() - 7200, @@ -55,7 +55,7 @@ func (suite *RegExpSelectorTestSuite) SetupSuite() { Namespace: "retention", Repository: "redis", Tag: "4.0", - Kind: res.Image, + Kind: art.Image, PushedTime: time.Now().Unix() - 3600, PulledTime: time.Now().Unix(), CreationTime: time.Now().Unix() - 7200, @@ -66,7 +66,7 @@ func (suite *RegExpSelectorTestSuite) SetupSuite() { Namespace: "retention", Repository: "redis", Tag: "4.1", - Kind: res.Image, + Kind: art.Image, PushedTime: time.Now().Unix() - 3600, PulledTime: time.Now().Unix(), CreationTime: time.Now().Unix() - 7200, @@ -235,7 +235,7 @@ func (suite *RegExpSelectorTestSuite) TestNSExcludes() { } // Check whether the returned result matched the expected ones (only check repo:tag) -func expect(expected []string, candidates []*res.Candidate) bool { +func expect(expected []string, candidates []*art.Candidate) bool { hash := make(map[string]bool) for _, art := range candidates { diff --git a/src/pkg/retention/res/selectors/index/index.go b/src/pkg/art/selectors/index/index.go similarity index 89% rename from src/pkg/retention/res/selectors/index/index.go rename to src/pkg/art/selectors/index/index.go index 690beef2d..8387de7cd 100644 --- a/src/pkg/retention/res/selectors/index/index.go +++ b/src/pkg/art/selectors/index/index.go @@ -17,8 +17,8 @@ package index import ( "sync" - "github.com/goharbor/harbor/src/pkg/retention/res" - "github.com/goharbor/harbor/src/pkg/retention/res/selectors/doublestar" + "github.com/goharbor/harbor/src/pkg/art" + "github.com/goharbor/harbor/src/pkg/art/selectors/doublestar" "github.com/pkg/errors" ) @@ -49,11 +49,11 @@ type IndexedMeta struct { // indexedItem defined item kept in the index type indexedItem struct { Meta *IndexedMeta - Factory res.SelectorFactory + Factory art.SelectorFactory } // Register the selector with the corresponding selector kind and decoration -func Register(kind string, decorations []string, factory res.SelectorFactory) { +func Register(kind string, decorations []string, factory art.SelectorFactory) { if len(kind) == 0 || factory == nil { // do nothing return @@ -69,7 +69,7 @@ func Register(kind string, decorations []string, factory res.SelectorFactory) { } // Get selector with the provided kind and decoration -func Get(kind, decoration, pattern string) (res.Selector, error) { +func Get(kind, decoration, pattern string) (art.Selector, error) { if len(kind) == 0 || len(decoration) == 0 { return nil, errors.New("empty selector kind or decoration") } diff --git a/src/pkg/retention/res/selectors/label/selector.go b/src/pkg/art/selectors/label/selector.go similarity index 89% rename from src/pkg/retention/res/selectors/label/selector.go rename to src/pkg/art/selectors/label/selector.go index 2fa788a5a..c43616fd6 100644 --- a/src/pkg/retention/res/selectors/label/selector.go +++ b/src/pkg/art/selectors/label/selector.go @@ -17,7 +17,7 @@ package label import ( "strings" - "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/goharbor/harbor/src/pkg/art" ) const ( @@ -39,7 +39,7 @@ type selector struct { } // Select candidates by the labels -func (s *selector) Select(artifacts []*res.Candidate) (selected []*res.Candidate, err error) { +func (s *selector) Select(artifacts []*art.Candidate) (selected []*art.Candidate, err error) { for _, art := range artifacts { if isMatched(s.labels, art.Labels, s.decoration) { selected = append(selected, art) @@ -50,7 +50,7 @@ func (s *selector) Select(artifacts []*res.Candidate) (selected []*res.Candidate } // New is factory method for list selector -func New(decoration string, pattern string) res.Selector { +func New(decoration string, pattern string) art.Selector { labels := make([]string, 0) if len(pattern) > 0 { labels = append(labels, strings.Split(pattern, ",")...) diff --git a/src/pkg/retention/res/selectors/label/selector_test.go b/src/pkg/art/selectors/label/selector_test.go similarity index 94% rename from src/pkg/retention/res/selectors/label/selector_test.go rename to src/pkg/art/selectors/label/selector_test.go index 6bf58118a..6e028b62d 100644 --- a/src/pkg/retention/res/selectors/label/selector_test.go +++ b/src/pkg/art/selectors/label/selector_test.go @@ -16,7 +16,7 @@ package label import ( "fmt" - "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/goharbor/harbor/src/pkg/art" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -28,7 +28,7 @@ import ( type LabelSelectorTestSuite struct { suite.Suite - artifacts []*res.Candidate + artifacts []*art.Candidate } // TestLabelSelector is entrance for LabelSelectorTestSuite @@ -38,13 +38,13 @@ func TestLabelSelector(t *testing.T) { // SetupSuite to do preparation work func (suite *LabelSelectorTestSuite) SetupSuite() { - suite.artifacts = []*res.Candidate{ + suite.artifacts = []*art.Candidate{ { NamespaceID: 1, Namespace: "library", Repository: "harbor", Tag: "1.9", - Kind: res.Image, + Kind: art.Image, PushedTime: time.Now().Unix() - 3600, PulledTime: time.Now().Unix(), CreationTime: time.Now().Unix() - 7200, @@ -55,7 +55,7 @@ func (suite *LabelSelectorTestSuite) SetupSuite() { Namespace: "library", Repository: "harbor", Tag: "dev", - Kind: res.Image, + Kind: art.Image, PushedTime: time.Now().Unix() - 3600, PulledTime: time.Now().Unix(), CreationTime: time.Now().Unix() - 7200, @@ -131,7 +131,7 @@ func (suite *LabelSelectorTestSuite) TestWithoutNoneExistingLabels() { } // Check whether the returned result matched the expected ones (only check repo:tag) -func expect(expected []string, candidates []*res.Candidate) bool { +func expect(expected []string, candidates []*art.Candidate) bool { hash := make(map[string]bool) for _, art := range candidates { diff --git a/src/pkg/immutabletag/controller.go b/src/pkg/immutabletag/controller.go new file mode 100644 index 000000000..482286d92 --- /dev/null +++ b/src/pkg/immutabletag/controller.go @@ -0,0 +1,75 @@ +package immutabletag + +import ( + "github.com/goharbor/harbor/src/pkg/immutabletag/model" +) + +var ( + // ImmuCtr is a global variable for the default immutable controller implementation + ImmuCtr = NewAPIController(NewDefaultRuleManager()) +) + +// APIController to handle the requests related with immutabletag +type APIController interface { + // GetImmutableRule ... + GetImmutableRule(id int64) (*model.Metadata, error) + + // CreateImmutableRule ... + CreateImmutableRule(m *model.Metadata) (int64, error) + + // DeleteImmutableRule ... + DeleteImmutableRule(id int64) error + + // UpdateImmutableRule ... + UpdateImmutableRule(pid int64, m *model.Metadata) error + + // ListImmutableRules ... + ListImmutableRules(pid int64) ([]model.Metadata, error) +} + +// DefaultAPIController ... +type DefaultAPIController struct { + manager Manager +} + +// GetImmutableRule ... +func (r *DefaultAPIController) GetImmutableRule(id int64) (*model.Metadata, error) { + return r.manager.GetImmutableRule(id) +} + +// DeleteImmutableRule ... +func (r *DefaultAPIController) DeleteImmutableRule(id int64) error { + _, err := r.manager.DeleteImmutableRule(id) + return err +} + +// CreateImmutableRule ... +func (r *DefaultAPIController) CreateImmutableRule(m *model.Metadata) (int64, error) { + return r.manager.CreateImmutableRule(m) +} + +// UpdateImmutableRule ... +func (r *DefaultAPIController) UpdateImmutableRule(pid int64, m *model.Metadata) error { + m0, err := r.manager.GetImmutableRule(m.ID) + if err != nil { + return err + } + if m0.Disabled != m.Disabled { + _, err := r.manager.EnableImmutableRule(m.ID, m.Disabled) + return err + } + _, err = r.manager.UpdateImmutableRule(pid, m) + return err +} + +// ListImmutableRules ... +func (r *DefaultAPIController) ListImmutableRules(pid int64) ([]model.Metadata, error) { + return r.manager.QueryImmutableRuleByProjectID(pid) +} + +// NewAPIController ... +func NewAPIController(immutableMgr Manager) APIController { + return &DefaultAPIController{ + manager: immutableMgr, + } +} diff --git a/src/pkg/immutabletag/controller_test.go b/src/pkg/immutabletag/controller_test.go new file mode 100644 index 000000000..96867ef9f --- /dev/null +++ b/src/pkg/immutabletag/controller_test.go @@ -0,0 +1 @@ +package immutabletag diff --git a/src/pkg/immutabletag/dao/immutable.go b/src/pkg/immutabletag/dao/immutable.go new file mode 100644 index 000000000..6f07c2b37 --- /dev/null +++ b/src/pkg/immutabletag/dao/immutable.go @@ -0,0 +1,93 @@ +package dao + +import ( + "fmt" + + "github.com/astaxie/beego/orm" + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/pkg/immutabletag/dao/model" +) + +// ImmutableRuleDao defines the interface to access the ImmutableRule data model +type ImmutableRuleDao interface { + CreateImmutableRule(ir *model.ImmutableRule) (int64, error) + UpdateImmutableRule(projectID int64, ir *model.ImmutableRule) (int64, error) + ToggleImmutableRule(id int64, enabled bool) (int64, error) + GetImmutableRule(id int64) (*model.ImmutableRule, error) + QueryImmutableRuleByProjectID(projectID int64) ([]model.ImmutableRule, error) + QueryEnabledImmutableRuleByProjectID(projectID int64) ([]model.ImmutableRule, error) + DeleteImmutableRule(id int64) (int64, error) +} + +// New creates a default implementation for ImmutableRuleDao +func New() ImmutableRuleDao { + return &immutableRuleDao{} +} + +type immutableRuleDao struct{} + +// CreateImmutableRule creates the Immutable Rule +func (i *immutableRuleDao) CreateImmutableRule(ir *model.ImmutableRule) (int64, error) { + ir.Enabled = true + o := dao.GetOrmer() + return o.Insert(ir) +} + +// UpdateImmutableRule update the immutable rules +func (i *immutableRuleDao) UpdateImmutableRule(projectID int64, ir *model.ImmutableRule) (int64, error) { + ir.ProjectID = projectID + o := dao.GetOrmer() + return o.Update(ir, "TagFilter") +} + +// ToggleImmutableRule enable/disable immutable rules +func (i *immutableRuleDao) ToggleImmutableRule(id int64, enabled bool) (int64, error) { + o := dao.GetOrmer() + ir := &model.ImmutableRule{ID: id, Enabled: enabled} + return o.Update(ir, "Enabled") +} + +// GetImmutableRule get immutable rule +func (i *immutableRuleDao) GetImmutableRule(id int64) (*model.ImmutableRule, error) { + o := dao.GetOrmer() + ir := &model.ImmutableRule{ID: id} + err := o.Read(ir) + if err == orm.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + return ir, nil +} + +// QueryImmutableRuleByProjectID get all immutable rule by project +func (i *immutableRuleDao) QueryImmutableRuleByProjectID(projectID int64) ([]model.ImmutableRule, error) { + o := dao.GetOrmer() + qs := o.QueryTable(&model.ImmutableRule{}).Filter("ProjectID", projectID) + var r []model.ImmutableRule + _, err := qs.All(&r) + if err != nil { + return nil, fmt.Errorf("failed to get immutable tag rule by projectID %d, error: %v", projectID, err) + } + return r, nil +} + +// QueryEnabledImmutableRuleByProjectID get all enabled immutable rule by project +func (i *immutableRuleDao) QueryEnabledImmutableRuleByProjectID(projectID int64) ([]model.ImmutableRule, error) { + o := dao.GetOrmer() + qs := o.QueryTable(&model.ImmutableRule{}).Filter("ProjectID", projectID).Filter("Enabled", true) + var r []model.ImmutableRule + _, err := qs.All(&r) + if err != nil { + return nil, fmt.Errorf("failed to get enabled immutable tag rule for by projectID %d, error: %v", projectID, err) + } + return r, nil +} + +// DeleteImmutableRule delete the immutable rule +func (i *immutableRuleDao) DeleteImmutableRule(id int64) (int64, error) { + o := dao.GetOrmer() + ir := &model.ImmutableRule{ID: id} + return o.Delete(ir) +} diff --git a/src/pkg/immutabletag/dao/immutable_test.go b/src/pkg/immutabletag/dao/immutable_test.go new file mode 100644 index 000000000..1fe61faf6 --- /dev/null +++ b/src/pkg/immutabletag/dao/immutable_test.go @@ -0,0 +1,113 @@ +package dao + +import ( + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/pkg/immutabletag/dao/model" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "testing" +) + +type immutableRuleDaoTestSuite struct { + suite.Suite + require *require.Assertions + assert *assert.Assertions + dao ImmutableRuleDao + id int64 +} + +func (t *immutableRuleDaoTestSuite) SetupSuite() { + t.require = require.New(t.T()) + t.assert = assert.New(t.T()) + dao.PrepareTestForPostgresSQL() + t.dao = New() +} + +func (t *immutableRuleDaoTestSuite) TestCreateImmutableRule() { + ir := &model.ImmutableRule{TagFilter: "**", ProjectID: 1} + id, err := t.dao.CreateImmutableRule(ir) + t.require.Nil(err) + t.require.True(id > 0, "Can not create immutable tag rule") + _, err = t.dao.DeleteImmutableRule(id) + t.require.Nil(err) +} + +func (t *immutableRuleDaoTestSuite) TestUpdateImmutableRule() { + ir := &model.ImmutableRule{TagFilter: "**", ProjectID: 1} + id, err := t.dao.CreateImmutableRule(ir) + t.require.Nil(err) + t.require.True(id > 0, "Can not create immutable tag rule") + + updatedIR := &model.ImmutableRule{ID: id, TagFilter: "1.2.0", ProjectID: 1} + updatedCnt, err := t.dao.UpdateImmutableRule(1, updatedIR) + t.require.Nil(err) + t.require.True(updatedCnt > 0, "Failed to update immutable id") + + newIr, err := t.dao.GetImmutableRule(id) + t.require.Nil(err) + t.require.True(newIr.TagFilter == "1.2.0", "Failed to update immutable tag") + + defer t.dao.DeleteImmutableRule(id) + +} + +func (t *immutableRuleDaoTestSuite) TestEnableImmutableRule() { + ir := &model.ImmutableRule{TagFilter: "**", ProjectID: 1} + id, err := t.dao.CreateImmutableRule(ir) + t.require.Nil(err) + t.require.True(id > 0, "Can not create immutable tag rule") + + t.dao.ToggleImmutableRule(id, false) + newIr, err := t.dao.GetImmutableRule(id) + + t.require.Nil(err) + t.require.False(newIr.Enabled, "Failed to disable the immutable rule") + + defer t.dao.DeleteImmutableRule(id) +} + +func (t *immutableRuleDaoTestSuite) TestGetImmutableRuleByProject() { + irs := []*model.ImmutableRule{ + {TagFilter: "version1", ProjectID: 99}, + {TagFilter: "version2", ProjectID: 99}, + {TagFilter: "version3", ProjectID: 99}, + {TagFilter: "version4", ProjectID: 99}, + } + for _, ir := range irs { + t.dao.CreateImmutableRule(ir) + } + + qrs, err := t.dao.QueryImmutableRuleByProjectID(99) + t.require.Nil(err) + t.require.True(len(qrs) == 4, "Failed to query 4 rows!") + + defer dao.ExecuteBatchSQL([]string{"delete from immutable_tag_rule where project_id = 99 "}) + +} +func (t *immutableRuleDaoTestSuite) TestGetEnabledImmutableRuleByProject() { + irs := []*model.ImmutableRule{ + {TagFilter: "version1", ProjectID: 99}, + {TagFilter: "version2", ProjectID: 99}, + {TagFilter: "version3", ProjectID: 99}, + {TagFilter: "version4", ProjectID: 99}, + } + for i, ir := range irs { + id, _ := t.dao.CreateImmutableRule(ir) + if i == 1 { + t.dao.ToggleImmutableRule(id, false) + } + + } + + qrs, err := t.dao.QueryEnabledImmutableRuleByProjectID(99) + t.require.Nil(err) + t.require.True(len(qrs) == 3, "Failed to query 3 rows!, got %v", len(qrs)) + + defer dao.ExecuteBatchSQL([]string{"delete from immutable_tag_rule where project_id = 99 "}) + +} + +func TestImmutableRuleDaoTestSuite(t *testing.T) { + suite.Run(t, &immutableRuleDaoTestSuite{}) +} diff --git a/src/pkg/immutabletag/dao/model/rule.go b/src/pkg/immutabletag/dao/model/rule.go new file mode 100644 index 000000000..878cc3838 --- /dev/null +++ b/src/pkg/immutabletag/dao/model/rule.go @@ -0,0 +1,22 @@ +package model + +import ( + "github.com/astaxie/beego/orm" +) + +func init() { + orm.RegisterModel(&ImmutableRule{}) +} + +// ImmutableRule - rule which filter image tags should be immutable. +type ImmutableRule struct { + ID int64 `orm:"pk;auto;column(id)" json:"id,omitempty"` + ProjectID int64 `orm:"column(project_id)" json:"project_id,omitempty"` + TagFilter string `orm:"column(tag_filter)" json:"tag_filter,omitempty"` + Enabled bool `orm:"column(enabled)" json:"enabled,omitempty"` +} + +// TableName ... +func (c *ImmutableRule) TableName() string { + return "immutable_tag_rule" +} diff --git a/src/pkg/immutabletag/manager.go b/src/pkg/immutabletag/manager.go new file mode 100644 index 000000000..4ef415ae2 --- /dev/null +++ b/src/pkg/immutabletag/manager.go @@ -0,0 +1,110 @@ +package immutabletag + +import ( + "encoding/json" + "github.com/goharbor/harbor/src/pkg/immutabletag/dao" + dao_model "github.com/goharbor/harbor/src/pkg/immutabletag/dao/model" + "github.com/goharbor/harbor/src/pkg/immutabletag/model" +) + +var ( + // Mgr is a global variable for the default immutablerule manager implementation + Mgr = NewDefaultRuleManager() +) + +// Manager ... +type Manager interface { + // CreateImmutableRule creates the Immutable Rule + CreateImmutableRule(m *model.Metadata) (int64, error) + // UpdateImmutableRule update the immutable rules + UpdateImmutableRule(projectID int64, ir *model.Metadata) (int64, error) + // EnableImmutableRule enable/disable immutable rules + EnableImmutableRule(id int64, enabled bool) (int64, error) + // GetImmutableRule get immutable rule + GetImmutableRule(id int64) (*model.Metadata, error) + // QueryImmutableRuleByProjectID get all immutable rule by project + QueryImmutableRuleByProjectID(projectID int64) ([]model.Metadata, error) + // QueryEnabledImmutableRuleByProjectID get all enabled immutable rule by project + QueryEnabledImmutableRuleByProjectID(projectID int64) ([]model.Metadata, error) + // DeleteImmutableRule delete the immutable rule + DeleteImmutableRule(id int64) (int64, error) +} + +type defaultRuleManager struct { + dao dao.ImmutableRuleDao +} + +func (drm *defaultRuleManager) CreateImmutableRule(ir *model.Metadata) (int64, error) { + daoRule := &dao_model.ImmutableRule{} + daoRule.Enabled = !ir.Disabled + daoRule.ProjectID = ir.ProjectID + data, _ := json.Marshal(ir) + daoRule.TagFilter = string(data) + return drm.dao.CreateImmutableRule(daoRule) +} + +func (drm *defaultRuleManager) UpdateImmutableRule(projectID int64, ir *model.Metadata) (int64, error) { + daoRule := &dao_model.ImmutableRule{} + data, _ := json.Marshal(ir) + daoRule.TagFilter = string(data) + return drm.dao.UpdateImmutableRule(projectID, daoRule) +} + +func (drm *defaultRuleManager) EnableImmutableRule(id int64, enabled bool) (int64, error) { + return drm.dao.ToggleImmutableRule(id, enabled) +} + +func (drm *defaultRuleManager) GetImmutableRule(id int64) (*model.Metadata, error) { + daoRule, err := drm.dao.GetImmutableRule(id) + if err != nil { + return nil, err + } + rule := &model.Metadata{} + if err = json.Unmarshal([]byte(daoRule.TagFilter), rule); err != nil { + return nil, err + } + return rule, nil +} + +func (drm *defaultRuleManager) QueryImmutableRuleByProjectID(projectID int64) ([]model.Metadata, error) { + daoRules, err := drm.dao.QueryImmutableRuleByProjectID(projectID) + if err != nil { + return nil, err + } + var rules []model.Metadata + for _, daoRule := range daoRules { + rule := model.Metadata{} + if err = json.Unmarshal([]byte(daoRule.TagFilter), &rule); err != nil { + return nil, err + } + rules = append(rules, rule) + } + return rules, nil +} + +func (drm *defaultRuleManager) QueryEnabledImmutableRuleByProjectID(projectID int64) ([]model.Metadata, error) { + daoRules, err := drm.dao.QueryEnabledImmutableRuleByProjectID(projectID) + if err != nil { + return nil, err + } + var rules []model.Metadata + for _, daoRule := range daoRules { + rule := model.Metadata{} + if err = json.Unmarshal([]byte(daoRule.TagFilter), &rule); err != nil { + return nil, err + } + rules = append(rules, rule) + } + return rules, nil +} + +func (drm *defaultRuleManager) DeleteImmutableRule(id int64) (int64, error) { + return drm.dao.DeleteImmutableRule(id) +} + +// NewDefaultRuleManager return a new instance of defaultRuleManager +func NewDefaultRuleManager() Manager { + return &defaultRuleManager{ + dao: dao.New(), + } +} diff --git a/src/pkg/immutabletag/manager_test.go b/src/pkg/immutabletag/manager_test.go new file mode 100644 index 000000000..319685b26 --- /dev/null +++ b/src/pkg/immutabletag/manager_test.go @@ -0,0 +1,201 @@ +package immutabletag + +import ( + dao_model "github.com/goharbor/harbor/src/pkg/immutabletag/dao/model" + "github.com/goharbor/harbor/src/pkg/immutabletag/model" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "os" + "testing" +) + +type mockImmutableDao struct { + mock.Mock +} + +func (m *mockImmutableDao) CreateImmutableRule(ir *dao_model.ImmutableRule) (int64, error) { + args := m.Called(ir) + return int64(args.Int(0)), args.Error(1) +} + +func (m *mockImmutableDao) UpdateImmutableRule(projectID int64, ir *dao_model.ImmutableRule) (int64, error) { + args := m.Called(ir) + return int64(0), args.Error(1) +} + +func (m *mockImmutableDao) QueryImmutableRuleByProjectID(projectID int64) ([]dao_model.ImmutableRule, error) { + args := m.Called() + var irs []dao_model.ImmutableRule + if args.Get(0) != nil { + irs = args.Get(0).([]dao_model.ImmutableRule) + } + return irs, args.Error(1) +} + +func (m *mockImmutableDao) QueryEnabledImmutableRuleByProjectID(projectID int64) ([]dao_model.ImmutableRule, error) { + args := m.Called() + var irs []dao_model.ImmutableRule + if args.Get(0) != nil { + irs = args.Get(0).([]dao_model.ImmutableRule) + } + return irs, args.Error(1) +} + +func (m *mockImmutableDao) DeleteImmutableRule(id int64) (int64, error) { + args := m.Called(id) + return int64(args.Int(0)), args.Error(1) +} + +func (m *mockImmutableDao) ToggleImmutableRule(id int64, enabled bool) (int64, error) { + args := m.Called(id) + return int64(args.Int(0)), args.Error(1) +} + +func (m *mockImmutableDao) GetImmutableRule(id int64) (*dao_model.ImmutableRule, error) { + args := m.Called(id) + var ir *dao_model.ImmutableRule + if args.Get(0) != nil { + ir = args.Get(0).(*dao_model.ImmutableRule) + } + return ir, args.Error(1) + +} + +type managerTestingSuite struct { + suite.Suite + t *testing.T + assert *assert.Assertions + require *require.Assertions + mockImmutableDao *mockImmutableDao +} + +func (m *managerTestingSuite) SetupSuite() { + m.t = m.T() + m.assert = assert.New(m.t) + m.require = require.New(m.t) + + err := os.Setenv("RUN_MODE", "TEST") + m.require.Nil(err) +} + +func (m *managerTestingSuite) TearDownSuite() { + err := os.Unsetenv("RUN_MODE") + m.require.Nil(err) +} + +func (m *managerTestingSuite) SetupTest() { + m.mockImmutableDao = &mockImmutableDao{} + Mgr = &defaultRuleManager{ + dao: m.mockImmutableDao, + } +} + +func TestManagerTestingSuite(t *testing.T) { + suite.Run(t, &managerTestingSuite{}) +} + +func (m *managerTestingSuite) TestCreateImmutableRule() { + m.mockImmutableDao.On("CreateImmutableRule", mock.Anything).Return(1, nil) + id, err := Mgr.CreateImmutableRule(&model.Metadata{}) + m.mockImmutableDao.AssertCalled(m.t, "CreateImmutableRule", mock.Anything) + m.require.Nil(err) + m.assert.Equal(int64(1), id) +} + +func (m *managerTestingSuite) TestQueryImmutableRuleByProjectID() { + m.mockImmutableDao.On("QueryImmutableRuleByProjectID", mock.Anything).Return([]dao_model.ImmutableRule{ + { + ID: 1, + ProjectID: 1, + Enabled: true, + TagFilter: "{\"id\":1, \"projectID\":1,\"priority\":0,\"disabled\":false,\"action\":\"immutable\"," + + "\"template\":\"immutable_template\"," + + "\"tag_selectors\":[{\"kind\":\"doublestar\",\"decoration\":\"matches\",\"pattern\":\"**\"}]," + + "\"scope_selectors\":{\"repository\":[{\"kind\":\"doublestar\",\"decoration\":\"repoMatches\",\"pattern\":\"**\"}]}}", + }, + { + ID: 2, + ProjectID: 1, + Enabled: false, + TagFilter: "{\"id\":2, \"projectID\":1,\"priority\":0,\"disabled\":false,\"action\":\"immutable\"," + + "\"template\":\"immutable_template\"," + + "\"tag_selectors\":[{\"kind\":\"doublestar\",\"decoration\":\"matches\",\"pattern\":\"**\"}]," + + "\"scope_selectors\":{\"repository\":[{\"kind\":\"doublestar\",\"decoration\":\"repoMatches\",\"pattern\":\"**\"}]}}", + }}, nil) + irs, err := Mgr.QueryImmutableRuleByProjectID(int64(1)) + m.mockImmutableDao.AssertCalled(m.t, "QueryImmutableRuleByProjectID", mock.Anything) + m.require.Nil(err) + m.assert.Equal(len(irs), 2) + m.assert.Equal(irs[1].Disabled, false) +} + +func (m *managerTestingSuite) TestQueryEnabledImmutableRuleByProjectID() { + m.mockImmutableDao.On("QueryEnabledImmutableRuleByProjectID", mock.Anything).Return([]dao_model.ImmutableRule{ + { + ID: 1, + ProjectID: 1, + Enabled: true, + TagFilter: "{\"id\":1, \"projectID\":1,\"priority\":0,\"disabled\":false,\"action\":\"immutable\"," + + "\"template\":\"immutable_template\"," + + "\"tag_selectors\":[{\"kind\":\"doublestar\",\"decoration\":\"matches\",\"pattern\":\"**\"}]," + + "\"scope_selectors\":{\"repository\":[{\"kind\":\"doublestar\",\"decoration\":\"repoMatches\",\"pattern\":\"**\"}]}}", + }, + { + ID: 2, + ProjectID: 1, + Enabled: true, + TagFilter: "{\"id\":2, \"projectID\":1,\"priority\":0,\"disabled\":false,\"action\":\"immutable\"," + + "\"template\":\"immutable_template\"," + + "\"tag_selectors\":[{\"kind\":\"doublestar\",\"decoration\":\"matches\",\"pattern\":\"**\"}]," + + "\"scope_selectors\":{\"repository\":[{\"kind\":\"doublestar\",\"decoration\":\"repoMatches\",\"pattern\":\"**\"}]}}", + }}, nil) + irs, err := Mgr.QueryEnabledImmutableRuleByProjectID(int64(1)) + m.mockImmutableDao.AssertCalled(m.t, "QueryEnabledImmutableRuleByProjectID", mock.Anything) + m.require.Nil(err) + m.assert.Equal(len(irs), 2) + m.assert.Equal(irs[0].Disabled, false) +} + +func (m *managerTestingSuite) TestGetImmutableRule() { + m.mockImmutableDao.On("GetImmutableRule", mock.Anything).Return(&dao_model.ImmutableRule{ + ID: 1, + ProjectID: 1, + Enabled: true, + TagFilter: "{\"id\":1, \"projectID\":1,\"priority\":0,\"disabled\":false,\"action\":\"immutable\"," + + "\"template\":\"immutable_template\"," + + "\"tag_selectors\":[{\"kind\":\"doublestar\",\"decoration\":\"matches\",\"pattern\":\"**\"}]," + + "\"scope_selectors\":{\"repository\":[{\"kind\":\"doublestar\",\"decoration\":\"repoMatches\",\"pattern\":\"**\"}]}}", + }, nil) + ir, err := Mgr.GetImmutableRule(1) + m.mockImmutableDao.AssertCalled(m.t, "GetImmutableRule", mock.Anything) + m.require.Nil(err) + m.require.NotNil(ir) + m.assert.Equal(int64(1), ir.ID) +} + +func (m *managerTestingSuite) TestUpdateImmutableRule() { + m.mockImmutableDao.On("UpdateImmutableRule", mock.Anything).Return(1, nil) + id, err := Mgr.UpdateImmutableRule(int64(1), &model.Metadata{}) + m.mockImmutableDao.AssertCalled(m.t, "UpdateImmutableRule", mock.Anything) + m.require.Nil(err) + m.assert.Equal(int64(0), id) +} + +func (m *managerTestingSuite) TestEnableImmutableRule() { + m.mockImmutableDao.On("ToggleImmutableRule", mock.Anything).Return(1, nil) + id, err := Mgr.EnableImmutableRule(int64(1), true) + m.mockImmutableDao.AssertCalled(m.t, "ToggleImmutableRule", mock.Anything) + m.require.Nil(err) + m.assert.Equal(int64(1), id) +} + +func (m *managerTestingSuite) TestDeleteImmutableRule() { + m.mockImmutableDao.On("DeleteImmutableRule", mock.Anything).Return(1, nil) + id, err := Mgr.DeleteImmutableRule(int64(1)) + m.mockImmutableDao.AssertCalled(m.t, "DeleteImmutableRule", mock.Anything) + m.require.Nil(err) + m.assert.Equal(int64(1), id) +} diff --git a/src/pkg/immutabletag/match/matcher.go b/src/pkg/immutabletag/match/matcher.go new file mode 100644 index 000000000..73c60cfb5 --- /dev/null +++ b/src/pkg/immutabletag/match/matcher.go @@ -0,0 +1,11 @@ +package match + +import ( + "github.com/goharbor/harbor/src/pkg/art" +) + +// ImmutableTagMatcher ... +type ImmutableTagMatcher interface { + // Match whether the candidate is in the immutable list + Match(c art.Candidate) (bool, error) +} diff --git a/src/pkg/immutabletag/match/rule/match.go b/src/pkg/immutabletag/match/rule/match.go new file mode 100644 index 000000000..d06605a12 --- /dev/null +++ b/src/pkg/immutabletag/match/rule/match.go @@ -0,0 +1,88 @@ +package rule + +import ( + "github.com/goharbor/harbor/src/pkg/art" + "github.com/goharbor/harbor/src/pkg/art/selectors/index" + "github.com/goharbor/harbor/src/pkg/immutabletag" + "github.com/goharbor/harbor/src/pkg/immutabletag/match" + "github.com/goharbor/harbor/src/pkg/immutabletag/model" +) + +// Matcher ... +type Matcher struct { + pid int64 + rules []model.Metadata +} + +// Match ... +func (rm *Matcher) Match(c art.Candidate) (bool, error) { + if err := rm.getImmutableRules(); err != nil { + return false, err + } + + cands := []*art.Candidate{&c} + for _, r := range rm.rules { + if r.Disabled { + continue + } + + // match repositories according to the repository selectors + var repositoryCandidates []*art.Candidate + repositorySelectors := r.ScopeSelectors["repository"] + if len(repositorySelectors) < 1 { + continue + } + repositorySelector := repositorySelectors[0] + selector, err := index.Get(repositorySelector.Kind, repositorySelector.Decoration, + repositorySelector.Pattern) + if err != nil { + return false, err + } + repositoryCandidates, err = selector.Select(cands) + if err != nil { + return false, err + } + if len(repositoryCandidates) == 0 { + continue + } + + // match tag according to the tag selectors + var tagCandidates []*art.Candidate + tagSelectors := r.TagSelectors + if len(tagSelectors) < 0 { + continue + } + tagSelector := r.TagSelectors[0] + selector, err = index.Get(tagSelector.Kind, tagSelector.Decoration, + tagSelector.Pattern) + if err != nil { + return false, err + } + tagCandidates, err = selector.Select(cands) + if err != nil { + return false, err + } + if len(tagCandidates) == 0 { + continue + } + + return true, nil + } + return false, nil +} + +func (rm *Matcher) getImmutableRules() error { + rules, err := immutabletag.ImmuCtr.ListImmutableRules(rm.pid) + if err != nil { + return err + } + rm.rules = rules + return nil +} + +// NewRuleMatcher ... +func NewRuleMatcher(pid int64) match.ImmutableTagMatcher { + return &Matcher{ + pid: pid, + } +} diff --git a/src/pkg/immutabletag/match/rule/match_test.go b/src/pkg/immutabletag/match/rule/match_test.go new file mode 100644 index 000000000..05992e3fe --- /dev/null +++ b/src/pkg/immutabletag/match/rule/match_test.go @@ -0,0 +1,162 @@ +package rule + +import ( + "github.com/goharbor/harbor/src/common/utils/test" + "github.com/goharbor/harbor/src/pkg/art" + "github.com/goharbor/harbor/src/pkg/immutabletag" + "github.com/goharbor/harbor/src/pkg/immutabletag/model" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "testing" + "time" +) + +// MatchTestSuite ... +type MatchTestSuite struct { + suite.Suite + t *testing.T + assert *assert.Assertions + require *require.Assertions + ctr immutabletag.APIController + ruleID int64 + ruleID2 int64 +} + +// SetupSuite ... +func (s *MatchTestSuite) SetupSuite() { + test.InitDatabaseFromEnv() + s.t = s.T() + s.assert = assert.New(s.t) + s.require = require.New(s.t) + s.ctr = immutabletag.ImmuCtr +} + +func (s *MatchTestSuite) TestImmuMatch() { + rule := &model.Metadata{ + ID: 1, + ProjectID: 2, + Priority: 1, + Template: "latestPushedK", + Action: "immuablity", + TagSelectors: []*model.Selector{ + { + Kind: "doublestar", + Decoration: "matches", + Pattern: "release-[\\d\\.]+", + }, + }, + ScopeSelectors: map[string][]*model.Selector{ + "repository": { + { + Kind: "doublestar", + Decoration: "matches", + Pattern: "redis", + }, + }, + }, + } + + rule2 := &model.Metadata{ + ID: 1, + ProjectID: 2, + Priority: 1, + Template: "latestPushedK", + Action: "immuablity", + TagSelectors: []*model.Selector{ + { + Kind: "doublestar", + Decoration: "matches", + Pattern: "**", + }, + }, + ScopeSelectors: map[string][]*model.Selector{ + "repository": { + { + Kind: "doublestar", + Decoration: "matches", + Pattern: "mysql", + }, + }, + }, + } + + id, err := s.ctr.CreateImmutableRule(rule) + s.ruleID = id + s.require.NotNil(err) + + id, err = s.ctr.CreateImmutableRule(rule2) + s.ruleID2 = id + s.require.NotNil(err) + + match := NewRuleMatcher(2) + + c1 := art.Candidate{ + NamespaceID: 2, + Namespace: "immutable", + Repository: "redis", + Tag: "release-1.10", + Kind: art.Image, + PushedTime: time.Now().Unix() - 3600, + PulledTime: time.Now().Unix(), + CreationTime: time.Now().Unix() - 7200, + Labels: []string{"label1", "label4", "label5"}, + } + isMatch, err := match.Match(c1) + s.require.Equal(isMatch, true) + s.require.Nil(err) + + c2 := art.Candidate{ + NamespaceID: 2, + Namespace: "immutable", + Repository: "redis", + Tag: "1.10", + Kind: art.Image, + PushedTime: time.Now().Unix() - 3600, + PulledTime: time.Now().Unix(), + CreationTime: time.Now().Unix() - 7200, + Labels: []string{"label1", "label4", "label5"}, + } + isMatch, err = match.Match(c2) + s.require.Equal(isMatch, false) + s.require.Nil(err) + + c3 := art.Candidate{ + NamespaceID: 2, + Namespace: "immutable", + Repository: "mysql", + Tag: "9.4.8", + Kind: art.Image, + PushedTime: time.Now().Unix() - 3600, + PulledTime: time.Now().Unix(), + CreationTime: time.Now().Unix() - 7200, + Labels: []string{"label1"}, + } + isMatch, err = match.Match(c3) + s.require.Equal(isMatch, true) + s.require.Nil(err) + + c4 := art.Candidate{ + NamespaceID: 2, + Namespace: "immutable", + Repository: "hello", + Tag: "world", + Kind: art.Image, + PushedTime: time.Now().Unix() - 3600, + PulledTime: time.Now().Unix(), + CreationTime: time.Now().Unix() - 7200, + Labels: []string{"label1"}, + } + isMatch, err = match.Match(c4) + s.require.Equal(isMatch, false) + s.require.Nil(err) +} + +// TearDownSuite clears env for test suite +func (s *MatchTestSuite) TearDownSuite() { + err := s.ctr.DeleteImmutableRule(s.ruleID) + require.NoError(s.T(), err, "delete immutable") + + err = s.ctr.DeleteImmutableRule(s.ruleID2) + require.NoError(s.T(), err, "delete immutable") +} diff --git a/src/pkg/immutabletag/model/rule.go b/src/pkg/immutabletag/model/rule.go new file mode 100644 index 000000000..0795abe6c --- /dev/null +++ b/src/pkg/immutabletag/model/rule.go @@ -0,0 +1,64 @@ +package model + +import ( + "github.com/astaxie/beego/validation" +) + +// Metadata of the immutable rule +type Metadata struct { + // UUID of rule + ID int64 `json:"id"` + + // ProjectID of project + ProjectID int64 `json:"project_id"` + + // Disabled rule + Disabled bool `json:"disabled"` + + // Priority of rule when doing calculating + Priority int `json:"priority"` + + // Action of the rule performs + // "immutable" + Action string `json:"action" valid:"Required"` + + // Template ID + Template string `json:"template" valid:"Required"` + + // TagSelectors attached to the rule for filtering tags + TagSelectors []*Selector `json:"tag_selectors" valid:"Required"` + + // Selector attached to the rule for filtering scope (e.g: repositories or namespaces) + ScopeSelectors map[string][]*Selector `json:"scope_selectors" valid:"Required"` +} + +// Valid Valid +func (m *Metadata) Valid(v *validation.Validation) { + for _, ts := range m.TagSelectors { + if pass, _ := v.Valid(ts); !pass { + return + } + } + for _, ss := range m.ScopeSelectors { + for _, s := range ss { + if pass, _ := v.Valid(s); !pass { + return + } + } + } +} + +// Selector to narrow down the list +type Selector struct { + // Kind of the selector + // "doublestar" or "label" + Kind string `json:"kind" valid:"Required;Match(doublestar)"` + + // Decorated the selector + // for "doublestar" : "matching" and "excluding" + // for "label" : "with" and "without" + Decoration string `json:"decoration" valid:"Required"` + + // Param for the selector + Pattern string `json:"pattern" valid:"Required"` +} diff --git a/src/pkg/q/query.go b/src/pkg/q/query.go new file mode 100644 index 000000000..74c8e3da9 --- /dev/null +++ b/src/pkg/q/query.go @@ -0,0 +1,25 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package q + +// Query parameters +type Query struct { + // Page number + PageNumber int64 + // Page size + PageSize int64 + // List of key words + Keywords map[string]interface{} +} diff --git a/src/pkg/retention/dep/client.go b/src/pkg/retention/dep/client.go index c51d427e8..871b8a924 100644 --- a/src/pkg/retention/dep/client.go +++ b/src/pkg/retention/dep/client.go @@ -21,8 +21,8 @@ import ( "github.com/goharbor/harbor/src/common/http/modifier/auth" "github.com/goharbor/harbor/src/jobservice/config" + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/clients/core" - "github.com/goharbor/harbor/src/pkg/retention/res" ) // DefaultClient for the retention @@ -33,30 +33,30 @@ type Client interface { // Get the tag candidates under the repository // // Arguments: - // repo *res.Repository : repository info + // repo *art.Repository : repository info // // Returns: - // []*res.Candidate : candidates returned + // []*art.Candidate : candidates returned // error : common error if any errors occurred - GetCandidates(repo *res.Repository) ([]*res.Candidate, error) + GetCandidates(repo *art.Repository) ([]*art.Candidate, error) // Delete the given repository // // Arguments: - // repo *res.Repository : repository info + // repo *art.Repository : repository info // // Returns: // error : common error if any errors occurred - DeleteRepository(repo *res.Repository) error + DeleteRepository(repo *art.Repository) error // Delete the specified candidate // // Arguments: - // candidate *res.Candidate : the deleting candidate + // candidate *art.Candidate : the deleting candidate // // Returns: // error : common error if any errors occurred - Delete(candidate *res.Candidate) error + Delete(candidate *art.Candidate) error } // NewClient new a basic client @@ -88,13 +88,13 @@ type basicClient struct { } // GetCandidates gets the tag candidates under the repository -func (bc *basicClient) GetCandidates(repository *res.Repository) ([]*res.Candidate, error) { +func (bc *basicClient) GetCandidates(repository *art.Repository) ([]*art.Candidate, error) { if repository == nil { return nil, errors.New("repository is nil") } - candidates := make([]*res.Candidate, 0) + candidates := make([]*art.Candidate, 0) switch repository.Kind { - case res.Image: + case art.Image: images, err := bc.coreClient.ListAllImages(repository.Namespace, repository.Name) if err != nil { return nil, err @@ -104,8 +104,8 @@ func (bc *basicClient) GetCandidates(repository *res.Repository) ([]*res.Candida for _, label := range image.Labels { labels = append(labels, label.Name) } - candidate := &res.Candidate{ - Kind: res.Image, + candidate := &art.Candidate{ + Kind: art.Image, Namespace: repository.Namespace, Repository: repository.Name, Tag: image.Name, @@ -118,7 +118,7 @@ func (bc *basicClient) GetCandidates(repository *res.Repository) ([]*res.Candida candidates = append(candidates, candidate) } /* - case res.Chart: + case art.Chart: charts, err := bc.coreClient.ListAllCharts(repository.Namespace, repository.Name) if err != nil { return nil, err @@ -128,8 +128,8 @@ func (bc *basicClient) GetCandidates(repository *res.Repository) ([]*res.Candida for _, label := range chart.Labels { labels = append(labels, label.Name) } - candidate := &res.Candidate{ - Kind: res.Chart, + candidate := &art.Candidate{ + Kind: art.Chart, Namespace: repository.Namespace, Repository: repository.Name, Tag: chart.Name, @@ -148,15 +148,15 @@ func (bc *basicClient) GetCandidates(repository *res.Repository) ([]*res.Candida } // DeleteRepository deletes the specified repository -func (bc *basicClient) DeleteRepository(repo *res.Repository) error { +func (bc *basicClient) DeleteRepository(repo *art.Repository) error { if repo == nil { return errors.New("repository is nil") } switch repo.Kind { - case res.Image: + case art.Image: return bc.coreClient.DeleteImageRepository(repo.Namespace, repo.Name) /* - case res.Chart: + case art.Chart: return bc.coreClient.DeleteChartRepository(repo.Namespace, repo.Name) */ default: @@ -165,15 +165,15 @@ func (bc *basicClient) DeleteRepository(repo *res.Repository) error { } // Deletes the specified candidate -func (bc *basicClient) Delete(candidate *res.Candidate) error { +func (bc *basicClient) Delete(candidate *art.Candidate) error { if candidate == nil { return errors.New("candidate is nil") } switch candidate.Kind { - case res.Image: + case art.Image: return bc.coreClient.DeleteImage(candidate.Namespace, candidate.Repository, candidate.Tag) /* - case res.Chart: + case art.Chart: return bc.coreClient.DeleteChart(candidate.Namespace, candidate.Repository, candidate.Tag) */ default: diff --git a/src/pkg/retention/dep/client_test.go b/src/pkg/retention/dep/client_test.go index 071cc230c..90c0e38c8 100644 --- a/src/pkg/retention/dep/client_test.go +++ b/src/pkg/retention/dep/client_test.go @@ -21,7 +21,7 @@ import ( jmodels "github.com/goharbor/harbor/src/common/job/models" "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/jobservice/job" - "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/testing/clients" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -73,33 +73,33 @@ type clientTestSuite struct { func (c *clientTestSuite) TestGetCandidates() { client := &basicClient{} client.coreClient = &fakeCoreClient{} - var repository *res.Repository + var repository *art.Repository // nil repository candidates, err := client.GetCandidates(repository) require.NotNil(c.T(), err) // image repository - repository = &res.Repository{} - repository.Kind = res.Image + repository = &art.Repository{} + repository.Kind = art.Image repository.Namespace = "library" repository.Name = "hello-world" candidates, err = client.GetCandidates(repository) require.Nil(c.T(), err) assert.Equal(c.T(), 1, len(candidates)) - assert.Equal(c.T(), res.Image, candidates[0].Kind) + assert.Equal(c.T(), art.Image, candidates[0].Kind) assert.Equal(c.T(), "library", candidates[0].Namespace) assert.Equal(c.T(), "hello-world", candidates[0].Repository) assert.Equal(c.T(), "latest", candidates[0].Tag) /* // chart repository - repository.Kind = res.Chart + repository.Kind = art.Chart repository.Namespace = "goharbor" repository.Name = "harbor" candidates, err = client.GetCandidates(repository) require.Nil(c.T(), err) assert.Equal(c.T(), 1, len(candidates)) - assert.Equal(c.T(), res.Chart, candidates[0].Kind) + assert.Equal(c.T(), art.Chart, candidates[0].Kind) assert.Equal(c.T(), "goharbor", candidates[0].Namespace) assert.Equal(c.T(), "1.0", candidates[0].Tag) */ @@ -109,20 +109,20 @@ func (c *clientTestSuite) TestDelete() { client := &basicClient{} client.coreClient = &fakeCoreClient{} - var candidate *res.Candidate + var candidate *art.Candidate // nil candidate err := client.Delete(candidate) require.NotNil(c.T(), err) // image - candidate = &res.Candidate{} - candidate.Kind = res.Image + candidate = &art.Candidate{} + candidate.Kind = art.Image err = client.Delete(candidate) require.Nil(c.T(), err) /* // chart - candidate.Kind = res.Chart + candidate.Kind = art.Chart err = client.Delete(candidate) require.Nil(c.T(), err) */ diff --git a/src/pkg/retention/job.go b/src/pkg/retention/job.go index 4839b7002..0944aa0c1 100644 --- a/src/pkg/retention/job.go +++ b/src/pkg/retention/job.go @@ -23,10 +23,10 @@ import ( "github.com/goharbor/harbor/src/jobservice/job" "github.com/goharbor/harbor/src/jobservice/logger" + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/retention/dep" "github.com/goharbor/harbor/src/pkg/retention/policy" "github.com/goharbor/harbor/src/pkg/retention/policy/lwp" - "github.com/goharbor/harbor/src/pkg/retention/res" "github.com/olekukonko/tablewriter" "github.com/pkg/errors" ) @@ -116,7 +116,7 @@ func (pj *Job) Run(ctx job.Context, params job.Parameters) error { return saveRetainNum(ctx, results, allCandidates) } -func saveRetainNum(ctx job.Context, retained []*res.Result, allCandidates []*res.Candidate) error { +func saveRetainNum(ctx job.Context, retained []*art.Result, allCandidates []*art.Candidate) error { var delNum int for _, r := range retained { if r.Error == nil { @@ -138,7 +138,7 @@ func saveRetainNum(ctx job.Context, retained []*res.Result, allCandidates []*res return nil } -func logResults(logger logger.Interface, all []*res.Candidate, results []*res.Result) { +func logResults(logger logger.Interface, all []*art.Candidate, results []*art.Result) { hash := make(map[string]error, len(results)) for _, r := range results { if r.Target != nil { @@ -146,7 +146,7 @@ func logResults(logger logger.Interface, all []*res.Candidate, results []*res.Re } } - op := func(art *res.Candidate) string { + op := func(art *art.Candidate) string { if e, exists := hash[art.Hash()]; exists { if e != nil { return actionMarkError @@ -194,7 +194,7 @@ func logResults(logger logger.Interface, all []*res.Candidate, results []*res.Re } } -func arn(art *res.Candidate) string { +func arn(art *art.Candidate) string { return fmt.Sprintf("%s/%s:%s", art.Namespace, art.Repository, art.Tag) } @@ -237,7 +237,7 @@ func getParamDryRun(params job.Parameters) (bool, error) { return dryRun, nil } -func getParamRepo(params job.Parameters) (*res.Repository, error) { +func getParamRepo(params job.Parameters) (*art.Repository, error) { v, ok := params[ParamRepo] if !ok { return nil, errors.Errorf("missing parameter: %s", ParamRepo) @@ -248,7 +248,7 @@ func getParamRepo(params job.Parameters) (*res.Repository, error) { return nil, errors.Errorf("invalid parameter: %s", ParamRepo) } - repo := &res.Repository{} + repo := &art.Repository{} if err := repo.FromJSON(repoJSON); err != nil { return nil, errors.Wrap(err, "parse repository from JSON") } diff --git a/src/pkg/retention/job_test.go b/src/pkg/retention/job_test.go index 6d960ae64..27ab7410d 100644 --- a/src/pkg/retention/job_test.go +++ b/src/pkg/retention/job_test.go @@ -22,14 +22,14 @@ import ( "github.com/goharbor/harbor/src/jobservice/job" "github.com/goharbor/harbor/src/jobservice/logger" + "github.com/goharbor/harbor/src/pkg/art" + "github.com/goharbor/harbor/src/pkg/art/selectors/doublestar" "github.com/goharbor/harbor/src/pkg/retention/dep" "github.com/goharbor/harbor/src/pkg/retention/policy" "github.com/goharbor/harbor/src/pkg/retention/policy/action" "github.com/goharbor/harbor/src/pkg/retention/policy/lwp" "github.com/goharbor/harbor/src/pkg/retention/policy/rule" "github.com/goharbor/harbor/src/pkg/retention/policy/rule/latestps" - "github.com/goharbor/harbor/src/pkg/retention/res" - "github.com/goharbor/harbor/src/pkg/retention/res/selectors/doublestar" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" ) @@ -60,10 +60,10 @@ func (suite *JobTestSuite) TearDownSuite() { func (suite *JobTestSuite) TestRunSuccess() { params := make(job.Parameters) params[ParamDryRun] = false - repository := &res.Repository{ + repository := &art.Repository{ Namespace: "library", Name: "harbor", - Kind: res.Image, + Kind: art.Image, } repoJSON, err := repository.ToJSON() require.Nil(suite.T(), err) @@ -112,8 +112,8 @@ func (suite *JobTestSuite) TestRunSuccess() { type fakeRetentionClient struct{} // GetCandidates ... -func (frc *fakeRetentionClient) GetCandidates(repo *res.Repository) ([]*res.Candidate, error) { - return []*res.Candidate{ +func (frc *fakeRetentionClient) GetCandidates(repo *art.Repository) ([]*art.Candidate, error) { + return []*art.Candidate{ { Namespace: "library", Repository: "harbor", @@ -140,12 +140,12 @@ func (frc *fakeRetentionClient) GetCandidates(repo *res.Repository) ([]*res.Cand } // Delete ... -func (frc *fakeRetentionClient) Delete(candidate *res.Candidate) error { +func (frc *fakeRetentionClient) Delete(candidate *art.Candidate) error { return nil } // SubmitTask ... -func (frc *fakeRetentionClient) DeleteRepository(repo *res.Repository) error { +func (frc *fakeRetentionClient) DeleteRepository(repo *art.Repository) error { return nil } diff --git a/src/pkg/retention/launcher.go b/src/pkg/retention/launcher.go index c9f6d4655..09d264ee7 100644 --- a/src/pkg/retention/launcher.go +++ b/src/pkg/retention/launcher.go @@ -19,7 +19,7 @@ import ( "time" "github.com/goharbor/harbor/src/jobservice/job" - "github.com/goharbor/harbor/src/pkg/retention/res/selectors/index" + "github.com/goharbor/harbor/src/pkg/art/selectors/index" cjob "github.com/goharbor/harbor/src/common/job" "github.com/goharbor/harbor/src/common/job/models" @@ -27,12 +27,12 @@ import ( "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/common/utils/log" "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/project" "github.com/goharbor/harbor/src/pkg/repository" "github.com/goharbor/harbor/src/pkg/retention/policy" "github.com/goharbor/harbor/src/pkg/retention/policy/lwp" "github.com/goharbor/harbor/src/pkg/retention/q" - "github.com/goharbor/harbor/src/pkg/retention/res" "github.com/pkg/errors" ) @@ -84,7 +84,7 @@ func NewLauncher(projectMgr project.Manager, repositoryMgr repository.Manager, type jobData struct { TaskID int64 - Repository res.Repository + Repository art.Repository JobName string JobParams map[string]interface{} } @@ -111,9 +111,9 @@ func (l *launcher) Launch(ply *policy.Metadata, executionID int64, isDryRun bool if scope == nil { return 0, launcherError(fmt.Errorf("the scope of policy is nil")) } - repositoryRules := make(map[res.Repository]*lwp.Metadata, 0) + repositoryRules := make(map[art.Repository]*lwp.Metadata, 0) level := scope.Level - var allProjects []*res.Candidate + var allProjects []*art.Candidate var err error if level == "system" { // get projects @@ -144,12 +144,12 @@ func (l *launcher) Launch(ply *policy.Metadata, executionID int64, isDryRun bool } } case "project": - projectCandidates = append(projectCandidates, &res.Candidate{ + projectCandidates = append(projectCandidates, &art.Candidate{ NamespaceID: scope.Reference, }) } - var repositoryCandidates []*res.Candidate + var repositoryCandidates []*art.Candidate // get repositories of projects for _, projectCandidate := range projectCandidates { repositories, err := getRepositories(l.projectMgr, l.repositoryMgr, projectCandidate.NamespaceID, l.chartServerEnabled) @@ -174,7 +174,7 @@ func (l *launcher) Launch(ply *policy.Metadata, executionID int64, isDryRun bool } for _, repositoryCandidate := range repositoryCandidates { - reposit := res.Repository{ + reposit := art.Repository{ Namespace: repositoryCandidate.Namespace, Name: repositoryCandidate.Repository, Kind: repositoryCandidate.Kind, @@ -214,7 +214,7 @@ func (l *launcher) Launch(ply *policy.Metadata, executionID int64, isDryRun bool return int64(len(jobDatas)), nil } -func createJobs(repositoryRules map[res.Repository]*lwp.Metadata, isDryRun bool) ([]*jobData, error) { +func createJobs(repositoryRules map[art.Repository]*lwp.Metadata, isDryRun bool) ([]*jobData, error) { jobDatas := []*jobData{} for repository, policy := range repositoryRules { jobData := &jobData{ @@ -320,14 +320,14 @@ func launcherError(err error) error { return errors.Wrap(err, "launcher") } -func getProjects(projectMgr project.Manager) ([]*res.Candidate, error) { +func getProjects(projectMgr project.Manager) ([]*art.Candidate, error) { projects, err := projectMgr.List() if err != nil { return nil, err } - var candidates []*res.Candidate + var candidates []*art.Candidate for _, pro := range projects { - candidates = append(candidates, &res.Candidate{ + candidates = append(candidates, &art.Candidate{ NamespaceID: pro.ProjectID, Namespace: pro.Name, }) @@ -336,8 +336,8 @@ func getProjects(projectMgr project.Manager) ([]*res.Candidate, error) { } func getRepositories(projectMgr project.Manager, repositoryMgr repository.Manager, - projectID int64, chartServerEnabled bool) ([]*res.Candidate, error) { - var candidates []*res.Candidate + projectID int64, chartServerEnabled bool) ([]*art.Candidate, error) { + var candidates []*art.Candidate /* pro, err := projectMgr.Get(projectID) if err != nil { @@ -351,7 +351,7 @@ func getRepositories(projectMgr project.Manager, repositoryMgr repository.Manage } for _, r := range imageRepositories { namespace, repo := utils.ParseRepository(r.Name) - candidates = append(candidates, &res.Candidate{ + candidates = append(candidates, &art.Candidate{ Namespace: namespace, Repository: repo, Kind: "image", @@ -366,7 +366,7 @@ func getRepositories(projectMgr project.Manager, repositoryMgr repository.Manage return nil, err } for _, r := range chartRepositories { - candidates = append(candidates, &res.Candidate{ + candidates = append(candidates, &art.Candidate{ Namespace: pro.Name, Repository: r.Name, Kind: "chart", diff --git a/src/pkg/retention/launcher_test.go b/src/pkg/retention/launcher_test.go index c63b7bf28..3048b15a9 100644 --- a/src/pkg/retention/launcher_test.go +++ b/src/pkg/retention/launcher_test.go @@ -21,12 +21,12 @@ import ( "github.com/goharbor/harbor/src/chartserver" "github.com/goharbor/harbor/src/common/job" "github.com/goharbor/harbor/src/common/models" + _ "github.com/goharbor/harbor/src/pkg/art/selectors/doublestar" "github.com/goharbor/harbor/src/pkg/project" "github.com/goharbor/harbor/src/pkg/repository" "github.com/goharbor/harbor/src/pkg/retention/policy" "github.com/goharbor/harbor/src/pkg/retention/policy/rule" "github.com/goharbor/harbor/src/pkg/retention/q" - _ "github.com/goharbor/harbor/src/pkg/retention/res/selectors/doublestar" hjob "github.com/goharbor/harbor/src/testing/job" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/src/pkg/retention/policy/action/index/index_test.go b/src/pkg/retention/policy/action/index/index_test.go index f9d4f57e5..a873fed3c 100644 --- a/src/pkg/retention/policy/action/index/index_test.go +++ b/src/pkg/retention/policy/action/index/index_test.go @@ -18,8 +18,8 @@ import ( "testing" "time" + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/retention/policy/action" - "github.com/goharbor/harbor/src/pkg/retention/res" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -29,7 +29,7 @@ import ( type IndexTestSuite struct { suite.Suite - candidates []*res.Candidate + candidates []*art.Candidate } // TestIndexEntry is entry of IndexTestSuite @@ -41,7 +41,7 @@ func TestIndexEntry(t *testing.T) { func (suite *IndexTestSuite) SetupSuite() { Register("fakeAction", newFakePerformer) - suite.candidates = []*res.Candidate{{ + suite.candidates = []*art.Candidate{{ Namespace: "library", Repository: "harbor", Kind: "image", @@ -77,9 +77,9 @@ type fakePerformer struct { } // Perform the artifacts -func (p *fakePerformer) Perform(candidates []*res.Candidate) (results []*res.Result, err error) { +func (p *fakePerformer) Perform(candidates []*art.Candidate) (results []*art.Result, err error) { for _, c := range candidates { - results = append(results, &res.Result{ + results = append(results, &art.Result{ Target: c, }) } diff --git a/src/pkg/retention/policy/action/performer.go b/src/pkg/retention/policy/action/performer.go index 72d34d612..2461d0945 100644 --- a/src/pkg/retention/policy/action/performer.go +++ b/src/pkg/retention/policy/action/performer.go @@ -15,8 +15,8 @@ package action import ( + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/retention/dep" - "github.com/goharbor/harbor/src/pkg/retention/res" ) const ( @@ -29,12 +29,12 @@ type Performer interface { // Perform the action // // Arguments: - // candidates []*res.Candidate : the targets to perform + // candidates []*art.Candidate : the targets to perform // // Returns: - // []*res.Result : result infos + // []*art.Result : result infos // error : common error if any errors occurred - Perform(candidates []*res.Candidate) ([]*res.Result, error) + Perform(candidates []*art.Candidate) ([]*art.Result, error) } // PerformerFactory is factory method for creating Performer @@ -42,13 +42,13 @@ type PerformerFactory func(params interface{}, isDryRun bool) Performer // retainAction make sure all the candidates will be retained and others will be cleared type retainAction struct { - all []*res.Candidate + all []*art.Candidate // Indicate if it is a dry run isDryRun bool } // Perform the action -func (ra *retainAction) Perform(candidates []*res.Candidate) (results []*res.Result, err error) { +func (ra *retainAction) Perform(candidates []*art.Candidate) (results []*art.Result, err error) { retained := make(map[string]bool) for _, c := range candidates { retained[c.Hash()] = true @@ -56,14 +56,14 @@ func (ra *retainAction) Perform(candidates []*res.Candidate) (results []*res.Res // start to delete if len(ra.all) > 0 { - for _, art := range ra.all { - if _, ok := retained[art.Hash()]; !ok { - result := &res.Result{ - Target: art, + for _, c := range ra.all { + if _, ok := retained[c.Hash()]; !ok { + result := &art.Result{ + Target: c, } if !ra.isDryRun { - if err := dep.DefaultClient.Delete(art); err != nil { + if err := dep.DefaultClient.Delete(c); err != nil { result.Error = err } } @@ -79,7 +79,7 @@ func (ra *retainAction) Perform(candidates []*res.Candidate) (results []*res.Res // NewRetainAction is factory method for RetainAction func NewRetainAction(params interface{}, isDryRun bool) Performer { if params != nil { - if all, ok := params.([]*res.Candidate); ok { + if all, ok := params.([]*art.Candidate); ok { return &retainAction{ all: all, isDryRun: isDryRun, @@ -88,7 +88,7 @@ func NewRetainAction(params interface{}, isDryRun bool) Performer { } return &retainAction{ - all: make([]*res.Candidate, 0), + all: make([]*art.Candidate, 0), isDryRun: isDryRun, } } diff --git a/src/pkg/retention/policy/action/performer_test.go b/src/pkg/retention/policy/action/performer_test.go index 0f07c0433..868bb4c93 100644 --- a/src/pkg/retention/policy/action/performer_test.go +++ b/src/pkg/retention/policy/action/performer_test.go @@ -18,8 +18,8 @@ import ( "testing" "time" + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/retention/dep" - "github.com/goharbor/harbor/src/pkg/retention/res" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -31,7 +31,7 @@ type TestPerformerSuite struct { suite.Suite oldClient dep.Client - all []*res.Candidate + all []*art.Candidate } // TestPerformer is the entry of the TestPerformerSuite @@ -41,7 +41,7 @@ func TestPerformer(t *testing.T) { // SetupSuite ... func (suite *TestPerformerSuite) SetupSuite() { - suite.all = []*res.Candidate{ + suite.all = []*art.Candidate{ { Namespace: "library", Repository: "harbor", @@ -77,7 +77,7 @@ func (suite *TestPerformerSuite) TestPerform() { all: suite.all, } - candidates := []*res.Candidate{ + candidates := []*art.Candidate{ { Namespace: "library", Repository: "harbor", @@ -100,16 +100,16 @@ func (suite *TestPerformerSuite) TestPerform() { type fakeRetentionClient struct{} // GetCandidates ... -func (frc *fakeRetentionClient) GetCandidates(repo *res.Repository) ([]*res.Candidate, error) { +func (frc *fakeRetentionClient) GetCandidates(repo *art.Repository) ([]*art.Candidate, error) { return nil, errors.New("not implemented") } // Delete ... -func (frc *fakeRetentionClient) Delete(candidate *res.Candidate) error { +func (frc *fakeRetentionClient) Delete(candidate *art.Candidate) error { return nil } // DeleteRepository ... -func (frc *fakeRetentionClient) DeleteRepository(repo *res.Repository) error { +func (frc *fakeRetentionClient) DeleteRepository(repo *art.Repository) error { panic("implement me") } diff --git a/src/pkg/retention/policy/alg/or/processor.go b/src/pkg/retention/policy/alg/or/processor.go index 623e4f050..a940299dd 100644 --- a/src/pkg/retention/policy/alg/or/processor.go +++ b/src/pkg/retention/policy/alg/or/processor.go @@ -18,10 +18,10 @@ import ( "sync" "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/retention/policy/action" "github.com/goharbor/harbor/src/pkg/retention/policy/alg" "github.com/goharbor/harbor/src/pkg/retention/policy/rule" - "github.com/goharbor/harbor/src/pkg/retention/res" "github.com/pkg/errors" ) @@ -29,7 +29,7 @@ import ( type processor struct { // keep evaluator and its related selector if existing // attentions here, the selectors can be empty/nil, that means match all "**" - evaluators map[*rule.Evaluator][]res.Selector + evaluators map[*rule.Evaluator][]art.Selector // action performer performers map[string]action.Performer } @@ -37,7 +37,7 @@ type processor struct { // New processor func New(parameters []*alg.Parameter) alg.Processor { p := &processor{ - evaluators: make(map[*rule.Evaluator][]res.Selector), + evaluators: make(map[*rule.Evaluator][]art.Selector), performers: make(map[string]action.Performer), } @@ -59,10 +59,10 @@ func New(parameters []*alg.Parameter) alg.Processor { } // Process the candidates with the rules -func (p *processor) Process(artifacts []*res.Candidate) ([]*res.Result, error) { +func (p *processor) Process(artifacts []*art.Candidate) ([]*art.Result, error) { if len(artifacts) == 0 { log.Debug("no artifacts to retention") - return make([]*res.Result, 0), nil + return make([]*art.Result, 0), nil } var ( @@ -75,7 +75,7 @@ func (p *processor) Process(artifacts []*res.Candidate) ([]*res.Result, error) { // for sync type chanItem struct { action string - processed []*res.Candidate + processed []*art.Candidate } resChan := make(chan *chanItem, 1) @@ -124,9 +124,9 @@ func (p *processor) Process(artifacts []*res.Candidate) ([]*res.Result, error) { for eva, selectors := range p.evaluators { var evaluator = *eva - go func(evaluator rule.Evaluator, selectors []res.Selector) { + go func(evaluator rule.Evaluator, selectors []art.Selector) { var ( - processed []*res.Candidate + processed []*art.Candidate err error ) @@ -173,7 +173,7 @@ func (p *processor) Process(artifacts []*res.Candidate) ([]*res.Result, error) { return nil, err } - results := make([]*res.Result, 0) + results := make([]*art.Result, 0) // Perform actions for act, hash := range processedCandidates { var attachedErr error @@ -192,7 +192,7 @@ func (p *processor) Process(artifacts []*res.Candidate) ([]*res.Result, error) { if attachedErr != nil { for _, c := range cl { - results = append(results, &res.Result{ + results = append(results, &art.Result{ Target: c, Error: attachedErr, }) @@ -203,10 +203,10 @@ func (p *processor) Process(artifacts []*res.Candidate) ([]*res.Result, error) { return results, nil } -type cHash map[string]*res.Candidate +type cHash map[string]*art.Candidate -func (ch cHash) toList() []*res.Candidate { - l := make([]*res.Candidate, 0) +func (ch cHash) toList() []*art.Candidate { + l := make([]*art.Candidate, 0) for _, v := range ch { l = append(l, v) diff --git a/src/pkg/retention/policy/alg/or/processor_test.go b/src/pkg/retention/policy/alg/or/processor_test.go index 8d09966e5..54e5233f5 100644 --- a/src/pkg/retention/policy/alg/or/processor_test.go +++ b/src/pkg/retention/policy/alg/or/processor_test.go @@ -19,6 +19,9 @@ import ( "testing" "time" + "github.com/goharbor/harbor/src/pkg/art" + "github.com/goharbor/harbor/src/pkg/art/selectors/doublestar" + "github.com/goharbor/harbor/src/pkg/art/selectors/label" "github.com/goharbor/harbor/src/pkg/retention/dep" "github.com/goharbor/harbor/src/pkg/retention/policy/action" "github.com/goharbor/harbor/src/pkg/retention/policy/alg" @@ -26,9 +29,6 @@ import ( "github.com/goharbor/harbor/src/pkg/retention/policy/rule/always" "github.com/goharbor/harbor/src/pkg/retention/policy/rule/lastx" "github.com/goharbor/harbor/src/pkg/retention/policy/rule/latestps" - "github.com/goharbor/harbor/src/pkg/retention/res" - "github.com/goharbor/harbor/src/pkg/retention/res/selectors/doublestar" - "github.com/goharbor/harbor/src/pkg/retention/res/selectors/label" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -38,7 +38,7 @@ import ( type ProcessorTestSuite struct { suite.Suite - all []*res.Candidate + all []*art.Candidate oldClient dep.Client } @@ -50,7 +50,7 @@ func TestProcessor(t *testing.T) { // SetupSuite ... func (suite *ProcessorTestSuite) SetupSuite() { - suite.all = []*res.Candidate{ + suite.all = []*art.Candidate{ { Namespace: "library", Repository: "harbor", @@ -90,7 +90,7 @@ func (suite *ProcessorTestSuite) TestProcess() { lastxParams[lastx.ParameterX] = 10 params = append(params, &alg.Parameter{ Evaluator: lastx.New(lastxParams), - Selectors: []res.Selector{ + Selectors: []art.Selector{ doublestar.New(doublestar.Matches, "*dev*"), label.New(label.With, "L1,L2"), }, @@ -101,7 +101,7 @@ func (suite *ProcessorTestSuite) TestProcess() { latestKParams[latestps.ParameterK] = 10 params = append(params, &alg.Parameter{ Evaluator: latestps.New(latestKParams), - Selectors: []res.Selector{ + Selectors: []art.Selector{ label.New(label.With, "L3"), }, Performer: perf, @@ -131,7 +131,7 @@ func (suite *ProcessorTestSuite) TestProcess2() { alwaysParams := make(map[string]rule.Parameter) params = append(params, &alg.Parameter{ Evaluator: always.New(alwaysParams), - Selectors: []res.Selector{ + Selectors: []art.Selector{ doublestar.New(doublestar.Matches, "latest"), label.New(label.With, ""), }, @@ -163,16 +163,16 @@ func (suite *ProcessorTestSuite) TestProcess2() { type fakeRetentionClient struct{} // GetCandidates ... -func (frc *fakeRetentionClient) GetCandidates(repo *res.Repository) ([]*res.Candidate, error) { +func (frc *fakeRetentionClient) GetCandidates(repo *art.Repository) ([]*art.Candidate, error) { return nil, errors.New("not implemented") } // Delete ... -func (frc *fakeRetentionClient) Delete(candidate *res.Candidate) error { +func (frc *fakeRetentionClient) Delete(candidate *art.Candidate) error { return nil } // DeleteRepository ... -func (frc *fakeRetentionClient) DeleteRepository(repo *res.Repository) error { +func (frc *fakeRetentionClient) DeleteRepository(repo *art.Repository) error { panic("implement me") } diff --git a/src/pkg/retention/policy/alg/processor.go b/src/pkg/retention/policy/alg/processor.go index 4f7103a5f..a057b6fee 100644 --- a/src/pkg/retention/policy/alg/processor.go +++ b/src/pkg/retention/policy/alg/processor.go @@ -15,9 +15,9 @@ package alg import ( + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/retention/policy/action" "github.com/goharbor/harbor/src/pkg/retention/policy/rule" - "github.com/goharbor/harbor/src/pkg/retention/res" ) // Processor processing the whole policy targeting a repository. @@ -27,12 +27,12 @@ type Processor interface { // Process the artifact candidates // // Arguments: - // artifacts []*res.Candidate : process the retention candidates + // artifacts []*art.Candidate : process the retention candidates // // Returns: - // []*res.Result : the processed results + // []*art.Result : the processed results // error : common error object if any errors occurred - Process(artifacts []*res.Candidate) ([]*res.Result, error) + Process(artifacts []*art.Candidate) ([]*art.Result, error) } // Parameter for constructing a processor @@ -42,7 +42,7 @@ type Parameter struct { Evaluator rule.Evaluator // Selectors for the rule - Selectors []res.Selector + Selectors []art.Selector // Performer for the rule evaluator Performer action.Performer diff --git a/src/pkg/retention/policy/builder.go b/src/pkg/retention/policy/builder.go index 88443fb6b..59884c86c 100644 --- a/src/pkg/retention/policy/builder.go +++ b/src/pkg/retention/policy/builder.go @@ -21,13 +21,13 @@ import ( index3 "github.com/goharbor/harbor/src/pkg/retention/policy/alg/index" - index2 "github.com/goharbor/harbor/src/pkg/retention/res/selectors/index" + index2 "github.com/goharbor/harbor/src/pkg/art/selectors/index" "github.com/goharbor/harbor/src/pkg/retention/policy/rule/index" + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/retention/policy/alg" "github.com/goharbor/harbor/src/pkg/retention/policy/lwp" - "github.com/goharbor/harbor/src/pkg/retention/res" "github.com/pkg/errors" ) @@ -46,7 +46,7 @@ type Builder interface { } // NewBuilder news a basic builder -func NewBuilder(all []*res.Candidate) Builder { +func NewBuilder(all []*art.Candidate) Builder { return &basicBuilder{ allCandidates: all, } @@ -54,7 +54,7 @@ func NewBuilder(all []*res.Candidate) Builder { // basicBuilder is default implementation of Builder interface type basicBuilder struct { - allCandidates []*res.Candidate + allCandidates []*art.Candidate } // Build policy processor from the raw policy @@ -76,7 +76,7 @@ func (bb *basicBuilder) Build(policy *lwp.Metadata, isDryRun bool) (alg.Processo return nil, errors.Wrap(err, "get action performer by metadata") } - sl := make([]res.Selector, 0) + sl := make([]art.Selector, 0) for _, s := range r.TagSelectors { sel, err := index2.Get(s.Kind, s.Decoration, s.Pattern) if err != nil { diff --git a/src/pkg/retention/policy/builder_test.go b/src/pkg/retention/policy/builder_test.go index cd12b9494..60ba74e0e 100644 --- a/src/pkg/retention/policy/builder_test.go +++ b/src/pkg/retention/policy/builder_test.go @@ -22,7 +22,7 @@ import ( index2 "github.com/goharbor/harbor/src/pkg/retention/policy/alg/index" - "github.com/goharbor/harbor/src/pkg/retention/res/selectors/index" + "github.com/goharbor/harbor/src/pkg/art/selectors/index" "github.com/goharbor/harbor/src/pkg/retention/dep" @@ -30,9 +30,9 @@ import ( "github.com/goharbor/harbor/src/pkg/retention/policy/alg/or" - "github.com/goharbor/harbor/src/pkg/retention/res/selectors/label" + "github.com/goharbor/harbor/src/pkg/art/selectors/label" - "github.com/goharbor/harbor/src/pkg/retention/res/selectors/doublestar" + "github.com/goharbor/harbor/src/pkg/art/selectors/doublestar" "github.com/goharbor/harbor/src/pkg/retention/policy/rule/latestps" @@ -46,7 +46,7 @@ import ( "github.com/goharbor/harbor/src/pkg/retention/policy/lwp" - "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/goharbor/harbor/src/pkg/art" "github.com/stretchr/testify/suite" ) @@ -55,7 +55,7 @@ import ( type TestBuilderSuite struct { suite.Suite - all []*res.Candidate + all []*art.Candidate oldClient dep.Client } @@ -66,7 +66,7 @@ func TestBuilder(t *testing.T) { // SetupSuite prepares the testing content if needed func (suite *TestBuilderSuite) SetupSuite() { - suite.all = []*res.Candidate{ + suite.all = []*art.Candidate{ { NamespaceID: 1, Namespace: "library", @@ -163,21 +163,21 @@ func (suite *TestBuilderSuite) TestBuild() { type fakeRetentionClient struct{} -func (frc *fakeRetentionClient) DeleteRepository(repo *res.Repository) error { +func (frc *fakeRetentionClient) DeleteRepository(repo *art.Repository) error { panic("implement me") } // GetCandidates ... -func (frc *fakeRetentionClient) GetCandidates(repo *res.Repository) ([]*res.Candidate, error) { +func (frc *fakeRetentionClient) GetCandidates(repo *art.Repository) ([]*art.Candidate, error) { return nil, errors.New("not implemented") } // Delete ... -func (frc *fakeRetentionClient) Delete(candidate *res.Candidate) error { +func (frc *fakeRetentionClient) Delete(candidate *art.Candidate) error { return nil } // SubmitTask ... -func (frc *fakeRetentionClient) SubmitTask(taskID int64, repository *res.Repository, meta *lwp.Metadata) (string, error) { +func (frc *fakeRetentionClient) SubmitTask(taskID int64, repository *art.Repository, meta *lwp.Metadata) (string, error) { return "", errors.New("not implemented") } diff --git a/src/pkg/retention/policy/rule/always/evaluator.go b/src/pkg/retention/policy/rule/always/evaluator.go index 1cd4f4eb4..7155b5a99 100644 --- a/src/pkg/retention/policy/rule/always/evaluator.go +++ b/src/pkg/retention/policy/rule/always/evaluator.go @@ -15,9 +15,9 @@ package always import ( + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/retention/policy/action" "github.com/goharbor/harbor/src/pkg/retention/policy/rule" - "github.com/goharbor/harbor/src/pkg/retention/res" ) const ( @@ -28,7 +28,7 @@ const ( type evaluator struct{} // Process for the "always" Evaluator simply returns the input with no error -func (e *evaluator) Process(artifacts []*res.Candidate) ([]*res.Candidate, error) { +func (e *evaluator) Process(artifacts []*art.Candidate) ([]*art.Candidate, error) { return artifacts, nil } diff --git a/src/pkg/retention/policy/rule/always/evaluator_test.go b/src/pkg/retention/policy/rule/always/evaluator_test.go index 9e7c53b77..52bb142b9 100644 --- a/src/pkg/retention/policy/rule/always/evaluator_test.go +++ b/src/pkg/retention/policy/rule/always/evaluator_test.go @@ -17,8 +17,8 @@ package always import ( "testing" + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/retention/policy/rule" - "github.com/goharbor/harbor/src/pkg/retention/res" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" ) @@ -36,7 +36,7 @@ func (e *EvaluatorTestSuite) TestNew() { func (e *EvaluatorTestSuite) TestProcess() { sut := New(rule.Parameters{}) - input := []*res.Candidate{{PushedTime: 0}, {PushedTime: 1}, {PushedTime: 2}, {PushedTime: 3}} + input := []*art.Candidate{{PushedTime: 0}, {PushedTime: 1}, {PushedTime: 2}, {PushedTime: 3}} result, err := sut.Process(input) diff --git a/src/pkg/retention/policy/rule/dayspl/evaluator.go b/src/pkg/retention/policy/rule/dayspl/evaluator.go index 7face2b73..962555257 100644 --- a/src/pkg/retention/policy/rule/dayspl/evaluator.go +++ b/src/pkg/retention/policy/rule/dayspl/evaluator.go @@ -20,9 +20,9 @@ import ( "time" "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/retention/policy/action" "github.com/goharbor/harbor/src/pkg/retention/policy/rule" - "github.com/goharbor/harbor/src/pkg/retention/res" ) const ( @@ -41,7 +41,7 @@ type evaluator struct { n int } -func (e *evaluator) Process(artifacts []*res.Candidate) (result []*res.Candidate, err error) { +func (e *evaluator) Process(artifacts []*art.Candidate) (result []*art.Candidate, err error) { minPullTime := time.Now().UTC().Add(time.Duration(-1*24*e.n) * time.Hour).Unix() for _, a := range artifacts { if a.PulledTime >= minPullTime { diff --git a/src/pkg/retention/policy/rule/dayspl/evaluator_test.go b/src/pkg/retention/policy/rule/dayspl/evaluator_test.go index 49a98d2cb..79893b2d2 100644 --- a/src/pkg/retention/policy/rule/dayspl/evaluator_test.go +++ b/src/pkg/retention/policy/rule/dayspl/evaluator_test.go @@ -17,12 +17,12 @@ package dayspl import ( "errors" "fmt" + "github.com/stretchr/testify/assert" "testing" "time" + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/retention/policy/rule" - "github.com/goharbor/harbor/src/pkg/retention/res" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" ) @@ -54,15 +54,15 @@ func (e *EvaluatorTestSuite) TestNew() { func (e *EvaluatorTestSuite) TestProcess() { now := time.Now().UTC() - data := []*res.Candidate{ - {PulledTime: daysAgo(now, 1)}, - {PulledTime: daysAgo(now, 2)}, - {PulledTime: daysAgo(now, 3)}, - {PulledTime: daysAgo(now, 4)}, - {PulledTime: daysAgo(now, 5)}, - {PulledTime: daysAgo(now, 10)}, - {PulledTime: daysAgo(now, 20)}, - {PulledTime: daysAgo(now, 30)}, + data := []*art.Candidate{ + {PulledTime: daysAgo(now, 1, time.Hour)}, + {PulledTime: daysAgo(now, 2, time.Hour)}, + {PulledTime: daysAgo(now, 3, time.Hour)}, + {PulledTime: daysAgo(now, 4, time.Hour)}, + {PulledTime: daysAgo(now, 5, time.Hour)}, + {PulledTime: daysAgo(now, 10, time.Hour)}, + {PulledTime: daysAgo(now, 20, time.Hour)}, + {PulledTime: daysAgo(now, 30, time.Hour)}, } tests := []struct { @@ -71,13 +71,13 @@ func (e *EvaluatorTestSuite) TestProcess() { minPullTime int64 }{ {n: 0, expected: 0, minPullTime: 0}, - {n: 1, expected: 1, minPullTime: daysAgo(now, 1)}, - {n: 2, expected: 2, minPullTime: daysAgo(now, 2)}, - {n: 3, expected: 3, minPullTime: daysAgo(now, 3)}, - {n: 4, expected: 4, minPullTime: daysAgo(now, 4)}, - {n: 5, expected: 5, minPullTime: daysAgo(now, 5)}, - {n: 15, expected: 6, minPullTime: daysAgo(now, 10)}, - {n: 90, expected: 8, minPullTime: daysAgo(now, 30)}, + {n: 1, expected: 1, minPullTime: daysAgo(now, 1, 0)}, + {n: 2, expected: 2, minPullTime: daysAgo(now, 2, 0)}, + {n: 3, expected: 3, minPullTime: daysAgo(now, 3, 0)}, + {n: 4, expected: 4, minPullTime: daysAgo(now, 4, 0)}, + {n: 5, expected: 5, minPullTime: daysAgo(now, 5, 0)}, + {n: 15, expected: 6, minPullTime: daysAgo(now, 10, 0)}, + {n: 90, expected: 8, minPullTime: daysAgo(now, 30, 0)}, } for _, tt := range tests { @@ -120,6 +120,6 @@ func TestEvaluatorSuite(t *testing.T) { suite.Run(t, &EvaluatorTestSuite{}) } -func daysAgo(from time.Time, n int) int64 { - return from.Add(time.Duration(-1*24*n) * time.Hour).Unix() +func daysAgo(from time.Time, n int, offset time.Duration) int64 { + return from.Add(time.Duration(-1*24*n)*time.Hour + offset).Unix() } diff --git a/src/pkg/retention/policy/rule/daysps/evaluator.go b/src/pkg/retention/policy/rule/daysps/evaluator.go index 58cf73d2b..58ee79a57 100644 --- a/src/pkg/retention/policy/rule/daysps/evaluator.go +++ b/src/pkg/retention/policy/rule/daysps/evaluator.go @@ -20,9 +20,9 @@ import ( "time" "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/retention/policy/action" "github.com/goharbor/harbor/src/pkg/retention/policy/rule" - "github.com/goharbor/harbor/src/pkg/retention/res" ) const ( @@ -41,7 +41,7 @@ type evaluator struct { n int } -func (e *evaluator) Process(artifacts []*res.Candidate) (result []*res.Candidate, err error) { +func (e *evaluator) Process(artifacts []*art.Candidate) (result []*art.Candidate, err error) { minPushTime := time.Now().UTC().Add(time.Duration(-1*24*e.n) * time.Hour).Unix() for _, a := range artifacts { if a.PushedTime >= minPushTime { diff --git a/src/pkg/retention/policy/rule/daysps/evaluator_test.go b/src/pkg/retention/policy/rule/daysps/evaluator_test.go index a40c2c5a2..4a7877f1f 100644 --- a/src/pkg/retention/policy/rule/daysps/evaluator_test.go +++ b/src/pkg/retention/policy/rule/daysps/evaluator_test.go @@ -20,8 +20,8 @@ import ( "testing" "time" + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/retention/policy/rule" - "github.com/goharbor/harbor/src/pkg/retention/res" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -54,15 +54,15 @@ func (e *EvaluatorTestSuite) TestNew() { func (e *EvaluatorTestSuite) TestProcess() { now := time.Now().UTC() - data := []*res.Candidate{ - {PushedTime: daysAgo(now, 1)}, - {PushedTime: daysAgo(now, 2)}, - {PushedTime: daysAgo(now, 3)}, - {PushedTime: daysAgo(now, 4)}, - {PushedTime: daysAgo(now, 5)}, - {PushedTime: daysAgo(now, 10)}, - {PushedTime: daysAgo(now, 20)}, - {PushedTime: daysAgo(now, 30)}, + data := []*art.Candidate{ + {PushedTime: daysAgo(now, 1, time.Hour)}, + {PushedTime: daysAgo(now, 2, time.Hour)}, + {PushedTime: daysAgo(now, 3, time.Hour)}, + {PushedTime: daysAgo(now, 4, time.Hour)}, + {PushedTime: daysAgo(now, 5, time.Hour)}, + {PushedTime: daysAgo(now, 10, time.Hour)}, + {PushedTime: daysAgo(now, 20, time.Hour)}, + {PushedTime: daysAgo(now, 30, time.Hour)}, } tests := []struct { @@ -71,13 +71,13 @@ func (e *EvaluatorTestSuite) TestProcess() { minPushTime int64 }{ {n: 0, expected: 0, minPushTime: 0}, - {n: 1, expected: 1, minPushTime: daysAgo(now, 1)}, - {n: 2, expected: 2, minPushTime: daysAgo(now, 2)}, - {n: 3, expected: 3, minPushTime: daysAgo(now, 3)}, - {n: 4, expected: 4, minPushTime: daysAgo(now, 4)}, - {n: 5, expected: 5, minPushTime: daysAgo(now, 5)}, - {n: 15, expected: 6, minPushTime: daysAgo(now, 10)}, - {n: 90, expected: 8, minPushTime: daysAgo(now, 30)}, + {n: 1, expected: 1, minPushTime: daysAgo(now, 1, 0)}, + {n: 2, expected: 2, minPushTime: daysAgo(now, 2, 0)}, + {n: 3, expected: 3, minPushTime: daysAgo(now, 3, 0)}, + {n: 4, expected: 4, minPushTime: daysAgo(now, 4, 0)}, + {n: 5, expected: 5, minPushTime: daysAgo(now, 5, 0)}, + {n: 15, expected: 6, minPushTime: daysAgo(now, 10, 0)}, + {n: 90, expected: 8, minPushTime: daysAgo(now, 30, 0)}, } for _, tt := range tests { @@ -120,6 +120,6 @@ func TestEvaluatorSuite(t *testing.T) { suite.Run(t, &EvaluatorTestSuite{}) } -func daysAgo(from time.Time, n int) int64 { - return from.Add(time.Duration(-1*24*n) * time.Hour).Unix() +func daysAgo(from time.Time, n int, offset time.Duration) int64 { + return from.Add(time.Duration(-1*24*n)*time.Hour + offset).Unix() } diff --git a/src/pkg/retention/policy/rule/evaluator.go b/src/pkg/retention/policy/rule/evaluator.go index 18e641986..f36d0f344 100644 --- a/src/pkg/retention/policy/rule/evaluator.go +++ b/src/pkg/retention/policy/rule/evaluator.go @@ -14,19 +14,19 @@ package rule -import "github.com/goharbor/harbor/src/pkg/retention/res" +import "github.com/goharbor/harbor/src/pkg/art" // Evaluator defines method of executing rule type Evaluator interface { // Filter the inputs and return the filtered outputs // // Arguments: - // artifacts []*res.Candidate : candidates for processing + // artifacts []*art.Candidate : candidates for processing // // Returns: - // []*res.Candidate : matched candidates for next stage + // []*art.Candidate : matched candidates for next stage // error : common error object if any errors occurred - Process(artifacts []*res.Candidate) ([]*res.Candidate, error) + Process(artifacts []*art.Candidate) ([]*art.Candidate, error) // Specify what action is performed to the candidates processed by this evaluator Action() string diff --git a/src/pkg/retention/policy/rule/index/index_test.go b/src/pkg/retention/policy/rule/index/index_test.go index fd8268f18..9f2f1f3ef 100644 --- a/src/pkg/retention/policy/rule/index/index_test.go +++ b/src/pkg/retention/policy/rule/index/index_test.go @@ -22,8 +22,8 @@ import ( "github.com/stretchr/testify/require" + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/retention/policy/rule" - "github.com/goharbor/harbor/src/pkg/retention/res" "github.com/stretchr/testify/suite" ) @@ -63,7 +63,7 @@ func (suite *IndexTestSuite) TestGet() { require.NoError(suite.T(), err) require.NotNil(suite.T(), evaluator) - candidates := []*res.Candidate{{ + candidates := []*art.Candidate{{ Namespace: "library", Repository: "harbor", Kind: "image", @@ -102,7 +102,7 @@ type fakeEvaluator struct { } // Process rule -func (e *fakeEvaluator) Process(artifacts []*res.Candidate) ([]*res.Candidate, error) { +func (e *fakeEvaluator) Process(artifacts []*art.Candidate) ([]*art.Candidate, error) { return artifacts, nil } diff --git a/src/pkg/retention/policy/rule/lastx/evaluator.go b/src/pkg/retention/policy/rule/lastx/evaluator.go index 7457c0db3..ad0447f65 100644 --- a/src/pkg/retention/policy/rule/lastx/evaluator.go +++ b/src/pkg/retention/policy/rule/lastx/evaluator.go @@ -19,9 +19,9 @@ import ( "time" "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/retention/policy/action" "github.com/goharbor/harbor/src/pkg/retention/policy/rule" - "github.com/goharbor/harbor/src/pkg/retention/res" ) const ( @@ -40,7 +40,7 @@ type evaluator struct { } // Process the candidates based on the rule definition -func (e *evaluator) Process(artifacts []*res.Candidate) (retain []*res.Candidate, err error) { +func (e *evaluator) Process(artifacts []*art.Candidate) (retain []*art.Candidate, err error) { cutoff := time.Now().Add(time.Duration(e.x*-24) * time.Hour) for _, a := range artifacts { if time.Unix(a.PushedTime, 0).UTC().After(cutoff) { diff --git a/src/pkg/retention/policy/rule/lastx/evaluator_test.go b/src/pkg/retention/policy/rule/lastx/evaluator_test.go index becd79234..ee42425e8 100644 --- a/src/pkg/retention/policy/rule/lastx/evaluator_test.go +++ b/src/pkg/retention/policy/rule/lastx/evaluator_test.go @@ -5,8 +5,8 @@ import ( "testing" "time" + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/retention/policy/rule" - "github.com/goharbor/harbor/src/pkg/retention/res" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" ) @@ -38,7 +38,7 @@ func (e *EvaluatorTestSuite) TestNew() { func (e *EvaluatorTestSuite) TestProcess() { now := time.Now().UTC() - data := []*res.Candidate{ + data := []*art.Candidate{ {PushedTime: now.Add(time.Duration(1*-24) * time.Hour).Unix()}, {PushedTime: now.Add(time.Duration(2*-24) * time.Hour).Unix()}, {PushedTime: now.Add(time.Duration(3*-24) * time.Hour).Unix()}, diff --git a/src/pkg/retention/policy/rule/latestk/evaluator.go b/src/pkg/retention/policy/rule/latestk/evaluator.go index 9f9610f55..3405841cc 100644 --- a/src/pkg/retention/policy/rule/latestk/evaluator.go +++ b/src/pkg/retention/policy/rule/latestk/evaluator.go @@ -19,9 +19,9 @@ import ( "sort" "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/retention/policy/action" "github.com/goharbor/harbor/src/pkg/retention/policy/rule" - "github.com/goharbor/harbor/src/pkg/retention/res" ) const ( @@ -40,7 +40,7 @@ type evaluator struct { } // Process the candidates based on the rule definition -func (e *evaluator) Process(artifacts []*res.Candidate) ([]*res.Candidate, error) { +func (e *evaluator) Process(artifacts []*art.Candidate) ([]*art.Candidate, error) { // Sort artifacts by their "active time" // // Active time is defined as the selection of c.PulledTime or c.PushedTime, @@ -81,7 +81,7 @@ func New(params rule.Parameters) rule.Evaluator { } } -func activeTime(c *res.Candidate) int64 { +func activeTime(c *art.Candidate) int64 { if c.PulledTime > c.PushedTime { return c.PulledTime } diff --git a/src/pkg/retention/policy/rule/latestk/evaluator_test.go b/src/pkg/retention/policy/rule/latestk/evaluator_test.go index 24b04fb9e..2fb09a5b9 100644 --- a/src/pkg/retention/policy/rule/latestk/evaluator_test.go +++ b/src/pkg/retention/policy/rule/latestk/evaluator_test.go @@ -22,18 +22,18 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/goharbor/harbor/src/pkg/art" "github.com/stretchr/testify/suite" ) type EvaluatorTestSuite struct { suite.Suite - artifacts []*res.Candidate + artifacts []*art.Candidate } func (e *EvaluatorTestSuite) SetupSuite() { - e.artifacts = []*res.Candidate{ + e.artifacts = []*art.Candidate{ {PulledTime: 1, PushedTime: 2}, {PulledTime: 3, PushedTime: 4}, {PulledTime: 6, PushedTime: 5}, diff --git a/src/pkg/retention/policy/rule/latestpl/evaluator.go b/src/pkg/retention/policy/rule/latestpl/evaluator.go index 21381759c..7f63a3896 100644 --- a/src/pkg/retention/policy/rule/latestpl/evaluator.go +++ b/src/pkg/retention/policy/rule/latestpl/evaluator.go @@ -21,9 +21,9 @@ import ( "sort" "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/retention/policy/action" "github.com/goharbor/harbor/src/pkg/retention/policy/rule" - "github.com/goharbor/harbor/src/pkg/retention/res" ) const ( @@ -41,7 +41,7 @@ type evaluator struct { n int } -func (e *evaluator) Process(artifacts []*res.Candidate) ([]*res.Candidate, error) { +func (e *evaluator) Process(artifacts []*art.Candidate) ([]*art.Candidate, error) { sort.Slice(artifacts, func(i, j int) bool { return artifacts[i].PulledTime > artifacts[j].PulledTime }) diff --git a/src/pkg/retention/policy/rule/latestpl/evaluator_test.go b/src/pkg/retention/policy/rule/latestpl/evaluator_test.go index ebd1679ae..1c33b94ea 100644 --- a/src/pkg/retention/policy/rule/latestpl/evaluator_test.go +++ b/src/pkg/retention/policy/rule/latestpl/evaluator_test.go @@ -20,8 +20,8 @@ import ( "math/rand" "testing" + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/retention/policy/rule" - "github.com/goharbor/harbor/src/pkg/retention/res" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" ) @@ -52,7 +52,7 @@ func (e *EvaluatorTestSuite) TestNew() { } func (e *EvaluatorTestSuite) TestProcess() { - data := []*res.Candidate{{PulledTime: 0}, {PulledTime: 1}, {PulledTime: 2}, {PulledTime: 3}, {PulledTime: 4}} + data := []*art.Candidate{{PulledTime: 0}, {PulledTime: 1}, {PulledTime: 2}, {PulledTime: 3}, {PulledTime: 4}} rand.Shuffle(len(data), func(i, j int) { data[i], data[j] = data[j], data[i] }) diff --git a/src/pkg/retention/policy/rule/latestps/evaluator.go b/src/pkg/retention/policy/rule/latestps/evaluator.go index f672aa1c6..96a6bf19a 100644 --- a/src/pkg/retention/policy/rule/latestps/evaluator.go +++ b/src/pkg/retention/policy/rule/latestps/evaluator.go @@ -21,9 +21,9 @@ import ( "sort" "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/retention/policy/action" "github.com/goharbor/harbor/src/pkg/retention/policy/rule" - "github.com/goharbor/harbor/src/pkg/retention/res" ) const ( @@ -42,7 +42,7 @@ type evaluator struct { } // Process the candidates based on the rule definition -func (e *evaluator) Process(artifacts []*res.Candidate) ([]*res.Candidate, error) { +func (e *evaluator) Process(artifacts []*art.Candidate) ([]*art.Candidate, error) { // The updated proposal does not guarantee the order artifacts are provided, so we have to sort them first sort.Slice(artifacts, func(i, j int) bool { return artifacts[i].PushedTime > artifacts[j].PushedTime diff --git a/src/pkg/retention/policy/rule/latestps/evaluator_test.go b/src/pkg/retention/policy/rule/latestps/evaluator_test.go index 38fa64570..a0e727c08 100644 --- a/src/pkg/retention/policy/rule/latestps/evaluator_test.go +++ b/src/pkg/retention/policy/rule/latestps/evaluator_test.go @@ -8,8 +8,8 @@ import ( "github.com/stretchr/testify/suite" + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/retention/policy/rule" - "github.com/goharbor/harbor/src/pkg/retention/res" "github.com/stretchr/testify/require" ) @@ -39,7 +39,7 @@ func (e *EvaluatorTestSuite) TestNew() { } func (e *EvaluatorTestSuite) TestProcess() { - data := []*res.Candidate{{PushedTime: 0}, {PushedTime: 1}, {PushedTime: 2}, {PushedTime: 3}, {PushedTime: 4}} + data := []*art.Candidate{{PushedTime: 0}, {PushedTime: 1}, {PushedTime: 2}, {PushedTime: 3}, {PushedTime: 4}} rand.Shuffle(len(data), func(i, j int) { data[i], data[j] = data[j], data[i] }) diff --git a/src/pkg/retention/policy/rule/nothing/evaluator.go b/src/pkg/retention/policy/rule/nothing/evaluator.go index 8bc4b9063..f926c20c7 100644 --- a/src/pkg/retention/policy/rule/nothing/evaluator.go +++ b/src/pkg/retention/policy/rule/nothing/evaluator.go @@ -15,9 +15,9 @@ package nothing import ( + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/retention/policy/action" "github.com/goharbor/harbor/src/pkg/retention/policy/rule" - "github.com/goharbor/harbor/src/pkg/retention/res" ) const ( @@ -28,7 +28,7 @@ const ( type evaluator struct{} // Process for the "nothing" Evaluator simply returns the input with no error -func (e *evaluator) Process(artifacts []*res.Candidate) (processed []*res.Candidate, err error) { +func (e *evaluator) Process(artifacts []*art.Candidate) (processed []*art.Candidate, err error) { return processed, err } diff --git a/src/pkg/retention/policy/rule/nothing/evaluator_test.go b/src/pkg/retention/policy/rule/nothing/evaluator_test.go index 1432db651..db4cea68a 100644 --- a/src/pkg/retention/policy/rule/nothing/evaluator_test.go +++ b/src/pkg/retention/policy/rule/nothing/evaluator_test.go @@ -17,8 +17,8 @@ package nothing import ( "testing" + "github.com/goharbor/harbor/src/pkg/art" "github.com/goharbor/harbor/src/pkg/retention/policy/rule" - "github.com/goharbor/harbor/src/pkg/retention/res" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" ) @@ -36,7 +36,7 @@ func (e *EvaluatorTestSuite) TestNew() { func (e *EvaluatorTestSuite) TestProcess() { sut := New(rule.Parameters{}) - input := []*res.Candidate{{PushedTime: 0}, {PushedTime: 1}, {PushedTime: 2}, {PushedTime: 3}} + input := []*art.Candidate{{PushedTime: 0}, {PushedTime: 1}, {PushedTime: 2}, {PushedTime: 3}} result, err := sut.Process(input) diff --git a/src/pkg/robot/controller.go b/src/pkg/robot/controller.go new file mode 100644 index 000000000..5a6565711 --- /dev/null +++ b/src/pkg/robot/controller.go @@ -0,0 +1,117 @@ +package robot + +import ( + "fmt" + "github.com/goharbor/harbor/src/common" + "github.com/goharbor/harbor/src/common/token" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/pkg/q" + "github.com/goharbor/harbor/src/pkg/robot/model" + "github.com/pkg/errors" + "time" +) + +var ( + // RobotCtr is a global variable for the default robot account controller implementation + RobotCtr = NewController(NewDefaultRobotAccountManager()) +) + +// Controller to handle the requests related with robot account +type Controller interface { + // GetRobotAccount ... + GetRobotAccount(id int64) (*model.Robot, error) + + // CreateRobotAccount ... + CreateRobotAccount(robotReq *model.RobotCreate) (*model.Robot, error) + + // DeleteRobotAccount ... + DeleteRobotAccount(id int64) error + + // UpdateRobotAccount ... + UpdateRobotAccount(r *model.Robot) error + + // ListRobotAccount ... + ListRobotAccount(query *q.Query) ([]*model.Robot, error) +} + +// DefaultAPIController ... +type DefaultAPIController struct { + manager Manager +} + +// NewController ... +func NewController(robotMgr Manager) Controller { + return &DefaultAPIController{ + manager: robotMgr, + } +} + +// GetRobotAccount ... +func (d *DefaultAPIController) GetRobotAccount(id int64) (*model.Robot, error) { + return d.manager.GetRobotAccount(id) +} + +// CreateRobotAccount ... +func (d *DefaultAPIController) CreateRobotAccount(robotReq *model.RobotCreate) (*model.Robot, error) { + + var deferDel error + // Token duration in minutes + tokenDuration := time.Duration(config.RobotTokenDuration()) * time.Minute + expiresAt := time.Now().UTC().Add(tokenDuration).Unix() + createdName := common.RobotPrefix + robotReq.Name + + // first to add a robot account, and get its id. + robot := &model.Robot{ + Name: createdName, + Description: robotReq.Description, + ProjectID: robotReq.ProjectID, + ExpiresAt: expiresAt, + Visible: robotReq.Visible, + } + id, err := d.manager.CreateRobotAccount(robot) + if err != nil { + return nil, err + } + + // generate the token, and return it with response data. + // token is not stored in the database. + jwtToken, err := token.New(id, robotReq.ProjectID, expiresAt, robotReq.Access) + if err != nil { + deferDel = err + return nil, fmt.Errorf("failed to valid parameters to generate token for robot account, %v", err) + } + + rawTk, err := jwtToken.Raw() + if err != nil { + deferDel = err + return nil, fmt.Errorf("failed to sign token for robot account, %v", err) + } + + defer func(deferDel error) { + if deferDel != nil { + if err := d.manager.DeleteRobotAccount(id); err != nil { + log.Error(errors.Wrap(err, fmt.Sprintf("failed to delete the robot account: %d", id))) + } + } + }(deferDel) + + robot.Token = rawTk + robot.ID = id + return robot, nil +} + +// DeleteRobotAccount ... +func (d *DefaultAPIController) DeleteRobotAccount(id int64) error { + return d.manager.DeleteRobotAccount(id) +} + +// UpdateRobotAccount ... +func (d *DefaultAPIController) UpdateRobotAccount(r *model.Robot) error { + return d.manager.UpdateRobotAccount(r) +} + +// ListRobotAccount ... +func (d *DefaultAPIController) ListRobotAccount(query *q.Query) ([]*model.Robot, error) { + return d.manager.ListRobotAccount(query) +} diff --git a/src/pkg/robot/controller_test.go b/src/pkg/robot/controller_test.go new file mode 100644 index 000000000..6b6fdd70a --- /dev/null +++ b/src/pkg/robot/controller_test.go @@ -0,0 +1,109 @@ +package robot + +import ( + "github.com/goharbor/harbor/src/common" + "github.com/goharbor/harbor/src/common/rbac" + "github.com/goharbor/harbor/src/common/utils/test" + core_cfg "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/pkg/q" + "github.com/goharbor/harbor/src/pkg/robot/model" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "testing" +) + +type ControllerTestSuite struct { + suite.Suite + ctr Controller + t *testing.T + assert *assert.Assertions + require *require.Assertions + + robotID int64 +} + +// SetupSuite ... +func (s *ControllerTestSuite) SetupSuite() { + test.InitDatabaseFromEnv() + conf := map[string]interface{}{ + common.RobotTokenDuration: "30", + } + core_cfg.InitWithSettings(conf) + s.t = s.T() + s.assert = assert.New(s.t) + s.require = require.New(s.t) + s.ctr = RobotCtr +} + +func (s *ControllerTestSuite) TestRobotAccount() { + + res := rbac.Resource("/project/1") + + rbacPolicy := &rbac.Policy{ + Resource: res.Subresource(rbac.ResourceRepository), + Action: "pull", + } + policies := []*rbac.Policy{} + policies = append(policies, rbacPolicy) + + robot1 := &model.RobotCreate{ + Name: "robot1", + Description: "TestCreateRobotAccount", + ProjectID: int64(1), + Access: policies, + } + + robot, err := s.ctr.CreateRobotAccount(robot1) + s.require.Nil(err) + s.require.Equal(robot.ProjectID, int64(1)) + s.require.Equal(robot.Description, "TestCreateRobotAccount") + s.require.NotEmpty(robot.Token) + s.require.Equal(robot.Name, common.RobotPrefix+"robot1") + + robotGet, err := s.ctr.GetRobotAccount(robot.ID) + s.require.Nil(err) + s.require.Equal(robotGet.ProjectID, int64(1)) + s.require.Equal(robotGet.Description, "TestCreateRobotAccount") + + robot.Disabled = true + err = s.ctr.UpdateRobotAccount(robot) + s.require.Nil(err) + s.require.Equal(robot.Disabled, true) + + robot2 := &model.RobotCreate{ + Name: "robot2", + Description: "TestCreateRobotAccount", + ProjectID: int64(1), + Access: policies, + } + r2, _ := s.ctr.CreateRobotAccount(robot2) + s.robotID = r2.ID + + keywords := make(map[string]interface{}) + keywords["ProjectID"] = int64(1) + query := &q.Query{ + Keywords: keywords, + } + robots, err := s.ctr.ListRobotAccount(query) + s.require.Nil(err) + s.require.Equal(len(robots), 2) + s.require.Equal(robots[1].Name, common.RobotPrefix+"robot2") + + err = s.ctr.DeleteRobotAccount(robot.ID) + s.require.Nil(err) + + robots, err = s.ctr.ListRobotAccount(query) + s.require.Equal(len(robots), 1) +} + +// TearDownSuite clears env for test suite +func (s *ControllerTestSuite) TearDownSuite() { + err := s.ctr.DeleteRobotAccount(s.robotID) + require.NoError(s.T(), err, "delete robot") +} + +// TestController ... +func TestController(t *testing.T) { + suite.Run(t, new(ControllerTestSuite)) +} diff --git a/src/pkg/robot/dao/robot.go b/src/pkg/robot/dao/robot.go new file mode 100644 index 000000000..eef25fdf5 --- /dev/null +++ b/src/pkg/robot/dao/robot.go @@ -0,0 +1,101 @@ +package dao + +import ( + "fmt" + "github.com/astaxie/beego/orm" + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/pkg/q" + "github.com/goharbor/harbor/src/pkg/robot/model" + "strings" + "time" +) + +// RobotAccountDao defines the interface to access the ImmutableRule data model +type RobotAccountDao interface { + // CreateRobotAccount ... + CreateRobotAccount(robot *model.Robot) (int64, error) + + // UpdateRobotAccount ... + UpdateRobotAccount(robot *model.Robot) error + + // GetRobotAccount ... + GetRobotAccount(id int64) (*model.Robot, error) + + // ListRobotAccounts ... + ListRobotAccounts(query *q.Query) ([]*model.Robot, error) + + // DeleteRobotAccount ... + DeleteRobotAccount(id int64) error +} + +// New creates a default implementation for RobotAccountDao +func New() RobotAccountDao { + return &robotAccountDao{} +} + +type robotAccountDao struct{} + +// CreateRobotAccount ... +func (r *robotAccountDao) CreateRobotAccount(robot *model.Robot) (int64, error) { + now := time.Now() + robot.CreationTime = now + robot.UpdateTime = now + id, err := dao.GetOrmer().Insert(robot) + if err != nil { + if strings.Contains(err.Error(), "duplicate key value violates unique constraint") { + return 0, dao.ErrDupRows + } + return 0, err + } + return id, nil +} + +// GetRobotAccount ... +func (r *robotAccountDao) GetRobotAccount(id int64) (*model.Robot, error) { + robot := &model.Robot{ + ID: id, + } + if err := dao.GetOrmer().Read(robot); err != nil { + if err == orm.ErrNoRows { + return nil, nil + } + return nil, err + } + + return robot, nil +} + +// ListRobotAccounts ... +func (r *robotAccountDao) ListRobotAccounts(query *q.Query) ([]*model.Robot, error) { + o := dao.GetOrmer() + qt := o.QueryTable(new(model.Robot)) + + if query != nil { + if len(query.Keywords) > 0 { + for k, v := range query.Keywords { + qt = qt.Filter(fmt.Sprintf("%s__icontains", k), v) + } + } + + if query.PageNumber > 0 && query.PageSize > 0 { + qt = qt.Limit(query.PageSize, (query.PageNumber-1)*query.PageSize) + } + } + + robots := make([]*model.Robot, 0) + _, err := qt.All(&robots) + return robots, err +} + +// UpdateRobotAccount ... +func (r *robotAccountDao) UpdateRobotAccount(robot *model.Robot) error { + robot.UpdateTime = time.Now() + _, err := dao.GetOrmer().Update(robot) + return err +} + +// DeleteRobotAccount ... +func (r *robotAccountDao) DeleteRobotAccount(id int64) error { + _, err := dao.GetOrmer().QueryTable(&model.Robot{}).Filter("ID", id).Delete() + return err +} diff --git a/src/pkg/robot/dao/robot_test.go b/src/pkg/robot/dao/robot_test.go new file mode 100644 index 000000000..b55d722d4 --- /dev/null +++ b/src/pkg/robot/dao/robot_test.go @@ -0,0 +1,140 @@ +package dao + +import ( + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/pkg/q" + "github.com/goharbor/harbor/src/pkg/robot/model" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "testing" +) + +type robotAccountDaoTestSuite struct { + suite.Suite + require *require.Assertions + assert *assert.Assertions + dao RobotAccountDao + id1 int64 + id2 int64 + id3 int64 + id4 int64 +} + +func (t *robotAccountDaoTestSuite) SetupSuite() { + t.require = require.New(t.T()) + t.assert = assert.New(t.T()) + dao.PrepareTestForPostgresSQL() + t.dao = New() +} + +func (t *robotAccountDaoTestSuite) TestCreateRobotAccount() { + robotName := "test1" + robot := &model.Robot{ + Name: robotName, + Description: "test1 description", + ProjectID: 1, + } + id, err := t.dao.CreateRobotAccount(robot) + t.require.Nil(err) + t.id1 = id + t.require.Nil(err) + t.require.NotNil(id) +} + +func (t *robotAccountDaoTestSuite) TestGetRobotAccount() { + robotName := "test2" + robot := &model.Robot{ + Name: robotName, + Description: "test2 description", + ProjectID: 1, + } + + // add + id, err := t.dao.CreateRobotAccount(robot) + t.require.Nil(err) + t.id2 = id + + robot, err = t.dao.GetRobotAccount(id) + t.require.Nil(err) + t.require.Equal(robotName, robot.Name) +} + +func (t *robotAccountDaoTestSuite) TestListRobotAccounts() { + robotName := "test3" + robot := &model.Robot{ + Name: robotName, + Description: "test3 description", + ProjectID: 1, + } + + id, err := t.dao.CreateRobotAccount(robot) + t.require.Nil(err) + t.id3 = id + + keywords := make(map[string]interface{}) + keywords["ProjectID"] = 1 + robots, err := t.dao.ListRobotAccounts(&q.Query{ + Keywords: keywords, + }) + t.require.Nil(err) + t.require.Equal(3, len(robots)) +} + +func (t *robotAccountDaoTestSuite) TestUpdateRobotAccount() { + robotName := "test4" + robot := &model.Robot{ + Name: robotName, + Description: "test4 description", + ProjectID: 1, + } + // add + id, err := t.dao.CreateRobotAccount(robot) + t.require.Nil(err) + t.id4 = id + // Disable + robot.Disabled = true + err = t.dao.UpdateRobotAccount(robot) + t.require.Nil(err) + // Get + robot, err = t.dao.GetRobotAccount(id) + t.require.Nil(err) + t.require.Equal(true, robot.Disabled) +} + +func (t *robotAccountDaoTestSuite) TestDeleteRobotAccount() { + robotName := "test5" + robot := &model.Robot{ + Name: robotName, + Description: "test5 description", + ProjectID: 1, + } + // add + id, err := t.dao.CreateRobotAccount(robot) + t.require.Nil(err) + // Disable + err = t.dao.DeleteRobotAccount(id) + t.require.Nil(err) + // Get + robot, err = t.dao.GetRobotAccount(id) + t.require.Nil(err) +} + +// TearDownSuite clears env for test suite +func (t *robotAccountDaoTestSuite) TearDownSuite() { + err := t.dao.DeleteRobotAccount(t.id1) + require.NoError(t.T(), err, "delete robot 1") + + err = t.dao.DeleteRobotAccount(t.id2) + require.NoError(t.T(), err, "delete robot 2") + + err = t.dao.DeleteRobotAccount(t.id3) + require.NoError(t.T(), err, "delete robot 3") + + err = t.dao.DeleteRobotAccount(t.id4) + require.NoError(t.T(), err, "delete robot 4") +} + +func TestRobotAccountDaoTestSuite(t *testing.T) { + suite.Run(t, &robotAccountDaoTestSuite{}) +} diff --git a/src/pkg/robot/manager.go b/src/pkg/robot/manager.go new file mode 100644 index 000000000..9a435e1f3 --- /dev/null +++ b/src/pkg/robot/manager.go @@ -0,0 +1,66 @@ +package robot + +import ( + "github.com/goharbor/harbor/src/pkg/q" + "github.com/goharbor/harbor/src/pkg/robot/dao" + "github.com/goharbor/harbor/src/pkg/robot/model" +) + +var ( + // Mgr is a global variable for the default robot account manager implementation + Mgr = NewDefaultRobotAccountManager() +) + +// Manager ... +type Manager interface { + // GetRobotAccount ... + GetRobotAccount(id int64) (*model.Robot, error) + + // CreateRobotAccount ... + CreateRobotAccount(m *model.Robot) (int64, error) + + // DeleteRobotAccount ... + DeleteRobotAccount(id int64) error + + // UpdateRobotAccount ... + UpdateRobotAccount(m *model.Robot) error + + // ListRobotAccount ... + ListRobotAccount(query *q.Query) ([]*model.Robot, error) +} + +type defaultRobotManager struct { + dao dao.RobotAccountDao +} + +// NewDefaultRobotAccountManager return a new instance of defaultRobotManager +func NewDefaultRobotAccountManager() Manager { + return &defaultRobotManager{ + dao: dao.New(), + } +} + +// GetRobotAccount ... +func (drm *defaultRobotManager) GetRobotAccount(id int64) (*model.Robot, error) { + return drm.dao.GetRobotAccount(id) +} + +// CreateRobotAccount ... +func (drm *defaultRobotManager) CreateRobotAccount(r *model.Robot) (int64, error) { + return drm.dao.CreateRobotAccount(r) +} + +// DeleteRobotAccount ... +func (drm *defaultRobotManager) DeleteRobotAccount(id int64) error { + return drm.dao.DeleteRobotAccount(id) +} + +// UpdateRobotAccount ... +func (drm *defaultRobotManager) UpdateRobotAccount(r *model.Robot) error { + return drm.dao.UpdateRobotAccount(r) +} + +// ListRobotAccount ... +func (drm *defaultRobotManager) ListRobotAccount(query *q.Query) ([]*model.Robot, error) { + return drm.dao.ListRobotAccounts(query) +} diff --git a/src/pkg/robot/manager_test.go b/src/pkg/robot/manager_test.go new file mode 100644 index 000000000..e1bd1fd06 --- /dev/null +++ b/src/pkg/robot/manager_test.go @@ -0,0 +1,147 @@ +package robot + +import ( + "github.com/goharbor/harbor/src/pkg/q" + "github.com/goharbor/harbor/src/pkg/robot/model" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "os" + "testing" +) + +type mockRobotDao struct { + mock.Mock +} + +func (m *mockRobotDao) CreateRobotAccount(r *model.Robot) (int64, error) { + args := m.Called(r) + return int64(args.Int(0)), args.Error(1) +} + +func (m *mockRobotDao) UpdateRobotAccount(r *model.Robot) error { + args := m.Called(r) + return args.Error(1) +} + +func (m *mockRobotDao) DeleteRobotAccount(id int64) error { + args := m.Called(id) + return args.Error(1) +} + +func (m *mockRobotDao) GetRobotAccount(id int64) (*model.Robot, error) { + args := m.Called(id) + var r *model.Robot + if args.Get(0) != nil { + r = args.Get(0).(*model.Robot) + } + return r, args.Error(1) +} + +func (m *mockRobotDao) ListRobotAccounts(query *q.Query) ([]*model.Robot, error) { + args := m.Called() + var rs []*model.Robot + if args.Get(0) != nil { + rs = args.Get(0).([]*model.Robot) + } + return rs, args.Error(1) +} + +type managerTestingSuite struct { + suite.Suite + t *testing.T + assert *assert.Assertions + require *require.Assertions + mockRobotDao *mockRobotDao +} + +func (m *managerTestingSuite) SetupSuite() { + m.t = m.T() + m.assert = assert.New(m.t) + m.require = require.New(m.t) + + err := os.Setenv("RUN_MODE", "TEST") + m.require.Nil(err) +} + +func (m *managerTestingSuite) TearDownSuite() { + err := os.Unsetenv("RUN_MODE") + m.require.Nil(err) +} + +func (m *managerTestingSuite) SetupTest() { + m.mockRobotDao = &mockRobotDao{} + Mgr = &defaultRobotManager{ + dao: m.mockRobotDao, + } +} + +func TestManagerTestingSuite(t *testing.T) { + suite.Run(t, &managerTestingSuite{}) +} + +func (m *managerTestingSuite) TestCreateRobotAccount() { + m.mockRobotDao.On("CreateRobotAccount", mock.Anything).Return(1, nil) + id, err := Mgr.CreateRobotAccount(&model.Robot{}) + m.mockRobotDao.AssertCalled(m.t, "CreateRobotAccount", mock.Anything) + m.require.Nil(err) + m.assert.Equal(int64(1), id) +} + +func (m *managerTestingSuite) TestUpdateRobotAccount() { + m.mockRobotDao.On("UpdateRobotAccount", mock.Anything).Return(1, nil) + err := Mgr.UpdateRobotAccount(&model.Robot{}) + m.mockRobotDao.AssertCalled(m.t, "UpdateRobotAccount", mock.Anything) + m.require.Nil(err) +} + +func (m *managerTestingSuite) TestDeleteRobotAccount() { + m.mockRobotDao.On("DeleteRobotAccount", mock.Anything).Return(1, nil) + err := Mgr.DeleteRobotAccount(int64(1)) + m.mockRobotDao.AssertCalled(m.t, "DeleteRobotAccount", mock.Anything) + m.require.Nil(err) +} + +func (m *managerTestingSuite) TestGetRobotAccount() { + m.mockRobotDao.On("GetRobotAccount", mock.Anything).Return(&model.Robot{ + ID: 1, + ProjectID: 1, + Disabled: true, + ExpiresAt: 150000, + }, nil) + ir, err := Mgr.GetRobotAccount(1) + m.mockRobotDao.AssertCalled(m.t, "GetRobotAccount", mock.Anything) + m.require.Nil(err) + m.require.NotNil(ir) + m.assert.Equal(int64(1), ir.ID) +} + +func (m *managerTestingSuite) ListRobotAccount() { + m.mockRobotDao.On("ListRobotAccount", mock.Anything).Return([]model.Robot{ + { + ID: 1, + ProjectID: 1, + Disabled: false, + ExpiresAt: 12345, + }, + { + ID: 2, + ProjectID: 1, + Disabled: false, + ExpiresAt: 54321, + }}, nil) + + keywords := make(map[string]interface{}) + keywords["ProjectID"] = int64(1) + query := &q.Query{ + Keywords: keywords, + } + rs, err := Mgr.ListRobotAccount(query) + m.mockRobotDao.AssertCalled(m.t, "ListRobotAccount", mock.Anything) + m.require.Nil(err) + m.assert.Equal(len(rs), 2) + m.assert.Equal(rs[0].Disabled, false) + m.assert.Equal(rs[1].ExpiresAt, 54321) + +} diff --git a/src/common/models/robot.go b/src/pkg/robot/model/robot.go similarity index 70% rename from src/common/models/robot.go rename to src/pkg/robot/model/robot.go index 2e64ca8d2..db911f11c 100644 --- a/src/common/models/robot.go +++ b/src/pkg/robot/model/robot.go @@ -1,20 +1,7 @@ -// Copyright Project Harbor Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package models +package model import ( + "github.com/astaxie/beego/orm" "github.com/astaxie/beego/validation" "github.com/goharbor/harbor/src/common/rbac" "github.com/goharbor/harbor/src/common/utils" @@ -24,18 +11,29 @@ import ( // RobotTable is the name of table in DB that holds the robot object const RobotTable = "robot" +func init() { + orm.RegisterModel(&Robot{}) +} + // Robot holds the details of a robot. type Robot struct { ID int64 `orm:"pk;auto;column(id)" json:"id"` Name string `orm:"column(name)" json:"name"` + Token string `orm:"-" json:"token"` Description string `orm:"column(description)" json:"description"` ProjectID int64 `orm:"column(project_id)" json:"project_id"` ExpiresAt int64 `orm:"column(expiresat)" json:"expires_at"` Disabled bool `orm:"column(disabled)" json:"disabled"` + Visible bool `orm:"column(visible)" json:"-"` CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"` UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"` } +// TableName ... +func (r *Robot) TableName() string { + return RobotTable +} + // RobotQuery ... type RobotQuery struct { Name string @@ -45,16 +43,24 @@ type RobotQuery struct { Pagination } -// RobotReq ... -type RobotReq struct { +// RobotCreate ... +type RobotCreate struct { Name string `json:"name"` + ProjectID int64 `json:"pid"` Description string `json:"description"` Disabled bool `json:"disabled"` + Visible bool `json:"-"` Access []*rbac.Policy `json:"access"` } +// Pagination ... +type Pagination struct { + Page int64 + Size int64 +} + // Valid ... -func (rq *RobotReq) Valid(v *validation.Validation) { +func (rq *RobotCreate) Valid(v *validation.Validation) { if utils.IsIllegalLength(rq.Name, 1, 255) { v.SetError("name", "robot name with illegal length") } @@ -68,8 +74,3 @@ type RobotRep struct { Name string `json:"name"` Token string `json:"token"` } - -// TableName ... -func (r *Robot) TableName() string { - return RobotTable -} diff --git a/src/pkg/scan/api/scan/base_controller.go b/src/pkg/scan/api/scan/base_controller.go new file mode 100644 index 000000000..514eb9ba5 --- /dev/null +++ b/src/pkg/scan/api/scan/base_controller.go @@ -0,0 +1,411 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scan + +import ( + "fmt" + "time" + + "github.com/goharbor/harbor/src/common" + cj "github.com/goharbor/harbor/src/common/job" + jm "github.com/goharbor/harbor/src/common/job/models" + "github.com/goharbor/harbor/src/common/rbac" + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/jobservice/logger" + "github.com/goharbor/harbor/src/pkg/robot" + "github.com/goharbor/harbor/src/pkg/robot/model" + sca "github.com/goharbor/harbor/src/pkg/scan" + sc "github.com/goharbor/harbor/src/pkg/scan/api/scanner" + "github.com/goharbor/harbor/src/pkg/scan/dao/scan" + "github.com/goharbor/harbor/src/pkg/scan/dao/scanner" + "github.com/goharbor/harbor/src/pkg/scan/report" + v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1" + "github.com/google/uuid" + "github.com/pkg/errors" +) + +// DefaultController is a default singleton scan API controller. +var DefaultController = NewController() + +const ( + configRegistryEndpoint = "registryEndpoint" + configCoreInternalAddr = "coreInternalAddr" +) + +// uuidGenerator is a func template which is for generating UUID. +type uuidGenerator func() (string, error) + +// configGetter is a func template which is used to wrap the config management +// utility methods. +type configGetter func(cfg string) (string, error) + +// basicController is default implementation of api.Controller interface +type basicController struct { + // Manage the scan report records + manager report.Manager + // Scanner controller + sc sc.Controller + // Robot account controller + rc robot.Controller + // Job service client + jc cj.Client + // UUID generator + uuid uuidGenerator + // Configuration getter func + config configGetter +} + +// NewController news a scan API controller +func NewController() Controller { + return &basicController{ + // New report manager + manager: report.NewManager(), + // Refer to the default scanner controller + sc: sc.DefaultController, + // Refer to the default robot account controller + rc: robot.RobotCtr, + // Refer to the default job service client + jc: cj.GlobalClient, + // Generate UUID with uuid lib + uuid: func() (string, error) { + aUUID, err := uuid.NewUUID() + if err != nil { + return "", err + } + + return aUUID.String(), nil + }, + // Get the required configuration options + config: func(cfg string) (string, error) { + switch cfg { + case configRegistryEndpoint: + return config.ExtEndpoint() + case configCoreInternalAddr: + return config.InternalCoreURL(), nil + default: + return "", errors.Errorf("configuration option %s not defined", cfg) + } + }, + } +} + +// Scan ... +func (bc *basicController) Scan(artifact *v1.Artifact) error { + if artifact == nil { + return errors.New("nil artifact to scan") + } + + r, err := bc.sc.GetRegistrationByProject(artifact.NamespaceID) + if err != nil { + return errors.Wrap(err, "scan controller: scan") + } + + // Check the health of the registration by ping. + // The metadata of the scanner adapter is also returned. + meta, err := bc.sc.Ping(r) + if err != nil { + return errors.Wrap(err, "scan controller: scan") + } + + // Generate a UUID as track ID which groups the report records generated + // by the specified registration for the digest with given mime type. + trackID, err := bc.uuid() + if err != nil { + return errors.Wrap(err, "scan controller: scan") + } + + producesMimes := make([]string, 0) + matched := false + for _, ca := range meta.Capabilities { + for _, cm := range ca.ConsumesMimeTypes { + if cm == artifact.MimeType { + matched = true + break + } + } + + if matched { + for _, pm := range ca.ProducesMimeTypes { + // Create report placeholder first + reportPlaceholder := &scan.Report{ + Digest: artifact.Digest, + RegistrationUUID: r.UUID, + Status: job.PendingStatus.String(), + StatusCode: job.PendingStatus.Code(), + TrackID: trackID, + MimeType: pm, + } + _, e := bc.manager.Create(reportPlaceholder) + if e != nil { + // Recorded by error wrap and logged at the same time. + if err == nil { + err = e + } else { + err = errors.Wrap(e, err.Error()) + } + + logger.Error(errors.Wrap(e, "scan controller: scan")) + continue + } + + producesMimes = append(producesMimes, pm) + } + + break + } + } + + // Scanner does not support scanning the given artifact. + if !matched { + return errors.Errorf("the configured scanner %s does not support scanning artifact with mime type %s", r.Name, artifact.MimeType) + } + + // If all the record are created failed. + if len(producesMimes) == 0 { + // Return the last error + return errors.Wrap(err, "scan controller: scan") + } + + jobID, err := bc.launchScanJob(trackID, artifact, r, producesMimes) + if err != nil { + // Update the status to the concrete error + // Change status code to normal error code + if e := bc.manager.UpdateStatus(trackID, err.Error(), 0); e != nil { + err = errors.Wrap(e, err.Error()) + } + + return errors.Wrap(err, "scan controller: scan") + } + + // Insert the generated job ID now + // It will not block the whole process. If any errors happened, just logged. + if err := bc.manager.UpdateScanJobID(trackID, jobID); err != nil { + logger.Error(errors.Wrap(err, "scan controller: scan")) + } + + return nil +} + +// GetReport ... +func (bc *basicController) GetReport(artifact *v1.Artifact, mimeTypes []string) ([]*scan.Report, error) { + if artifact == nil { + return nil, errors.New("no way to get report for nil artifact") + } + + mimes := make([]string, 0) + mimes = append(mimes, mimeTypes...) + if len(mimes) == 0 { + // Retrieve native as default + mimes = append(mimes, v1.MimeTypeNativeReport) + } + + // Get current scanner settings + r, err := bc.sc.GetRegistrationByProject(artifact.NamespaceID) + if err != nil { + return nil, errors.Wrap(err, "scan controller: get report") + } + + if r == nil { + return nil, errors.New("no scanner registration configured") + } + + return bc.manager.GetBy(artifact.Digest, r.UUID, mimes) +} + +// GetSummary ... +func (bc *basicController) GetSummary(artifact *v1.Artifact, mimeTypes []string) (map[string]interface{}, error) { + if artifact == nil { + return nil, errors.New("no way to get report summaries for nil artifact") + } + + // Get reports first + rps, err := bc.GetReport(artifact, mimeTypes) + if err != nil { + return nil, err + } + + summaries := make(map[string]interface{}, len(rps)) + for _, rp := range rps { + sum, err := report.GenerateSummary(rp) + if err != nil { + return nil, err + } + + summaries[rp.MimeType] = sum + } + + return summaries, nil +} + +// GetScanLog ... +func (bc *basicController) GetScanLog(uuid string) ([]byte, error) { + if len(uuid) == 0 { + return nil, errors.New("empty uuid to get scan log") + } + + // Get by uuid + sr, err := bc.manager.Get(uuid) + if err != nil { + return nil, errors.Wrap(err, "scan controller: get scan log") + } + + if sr == nil { + // Not found + return nil, nil + } + + // Not job error + if sr.StatusCode == job.ErrorStatus.Code() { + jst := job.Status(sr.Status) + if jst.Code() == -1 { + return []byte(sr.Status), nil + } + } + + // Job log + return bc.jc.GetJobLog(sr.JobID) +} + +// HandleJobHooks ... +func (bc *basicController) HandleJobHooks(trackID string, change *job.StatusChange) error { + if len(trackID) == 0 { + return errors.New("empty track ID") + } + + if change == nil { + return errors.New("nil change object") + } + + // Check in data + if len(change.CheckIn) > 0 { + checkInReport := &sca.CheckInReport{} + if err := checkInReport.FromJSON(change.CheckIn); err != nil { + return errors.Wrap(err, "scan controller: handle job hook") + } + + rpl, err := bc.manager.GetBy( + checkInReport.Digest, + checkInReport.RegistrationUUID, + []string{checkInReport.MimeType}) + if err != nil { + return errors.Wrap(err, "scan controller: handle job hook") + } + + if len(rpl) == 0 { + return errors.New("no report found to update data") + } + + if err := bc.manager.UpdateReportData( + rpl[0].UUID, + checkInReport.RawReport, + change.Metadata.Revision); err != nil { + return errors.Wrap(err, "scan controller: handle job hook") + } + + return nil + } + + return bc.manager.UpdateStatus(trackID, change.Status, change.Metadata.Revision) +} + +// makeRobotAccount creates a robot account based on the arguments for scanning. +func (bc *basicController) makeRobotAccount(pid int64, repository string, ttl int64) (string, error) { + // Use uuid as name to avoid duplicated entries. + UUID, err := bc.uuid() + if err != nil { + return "", errors.Wrap(err, "scan controller: make robot account") + } + + expireAt := time.Now().UTC().Add(time.Duration(ttl) * time.Second).Unix() + + logger.Warningf("repository %s and expire time %d are not supported by robot controller", repository, expireAt) + + resource := fmt.Sprintf("/project/%d/repository", pid) + access := []*rbac.Policy{{ + Resource: rbac.Resource(resource), + Action: "pull", + }} + + account := &model.RobotCreate{ + Name: fmt.Sprintf("%s%s", common.RobotPrefix, UUID), + Description: "for scan", + ProjectID: pid, + Access: access, + } + + rb, err := bc.rc.CreateRobotAccount(account) + if err != nil { + return "", errors.Wrap(err, "scan controller: make robot account") + } + + return rb.Token, nil +} + +// launchScanJob launches a job to run scan +func (bc *basicController) launchScanJob(trackID string, artifact *v1.Artifact, registration *scanner.Registration, mimes []string) (jobID string, err error) { + externalURL, err := bc.config(configRegistryEndpoint) + if err != nil { + return "", errors.Wrap(err, "scan controller: launch scan job") + } + + // Make a robot account with 30 minutes + robotAccount, err := bc.makeRobotAccount(artifact.NamespaceID, artifact.Repository, 1800) + if err != nil { + return "", errors.Wrap(err, "scan controller: launch scan job") + } + + // Set job parameters + scanReq := &v1.ScanRequest{ + Registry: &v1.Registry{ + URL: externalURL, + Authorization: robotAccount, + }, + Artifact: artifact, + } + + rJSON, err := registration.ToJSON() + if err != nil { + return "", errors.Wrap(err, "scan controller: launch scan job") + } + + sJSON, err := scanReq.ToJSON() + if err != nil { + return "", errors.Wrap(err, "launch scan job") + } + + params := make(map[string]interface{}) + params[sca.JobParamRegistration] = rJSON + params[sca.JobParameterRequest] = sJSON + params[sca.JobParameterMimes] = mimes + + // Launch job + callbackURL, err := bc.config(configCoreInternalAddr) + if err != nil { + return "", errors.Wrap(err, "launch scan job") + } + hookURL := fmt.Sprintf("%s/service/notifications/jobs/scan/%s", callbackURL, trackID) + + j := &jm.JobData{ + Name: job.ImageScanJob, + Metadata: &jm.JobMetadata{ + JobKind: job.KindGeneric, + }, + Parameters: params, + StatusHook: hookURL, + } + + return bc.jc.SubmitJob(j) +} diff --git a/src/pkg/scan/api/scan/base_controller_test.go b/src/pkg/scan/api/scan/base_controller_test.go new file mode 100644 index 000000000..5d3c87d35 --- /dev/null +++ b/src/pkg/scan/api/scan/base_controller_test.go @@ -0,0 +1,537 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scan + +import ( + "encoding/json" + "fmt" + "testing" + "time" + + "github.com/goharbor/harbor/src/common" + "github.com/goharbor/harbor/src/common/rbac" + + "github.com/goharbor/harbor/src/pkg/robot/model" + + cjm "github.com/goharbor/harbor/src/common/job/models" + jm "github.com/goharbor/harbor/src/common/job/models" + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/pkg/q" + sca "github.com/goharbor/harbor/src/pkg/scan" + "github.com/goharbor/harbor/src/pkg/scan/dao/scan" + "github.com/goharbor/harbor/src/pkg/scan/dao/scanner" + v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1" + "github.com/goharbor/harbor/src/pkg/scan/vuln" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +// ControllerTestSuite is the test suite for scan controller. +type ControllerTestSuite struct { + suite.Suite + + registration *scanner.Registration + artifact *v1.Artifact + rawReport string + c Controller +} + +// TestController is the entry point of ControllerTestSuite. +func TestController(t *testing.T) { + suite.Run(t, new(ControllerTestSuite)) +} + +// SetupSuite ... +func (suite *ControllerTestSuite) SetupSuite() { + suite.registration = &scanner.Registration{ + ID: 1, + UUID: "uuid001", + Name: "Test-scan-controller", + URL: "http://testing.com:3128", + IsDefault: true, + } + + suite.artifact = &v1.Artifact{ + NamespaceID: 1, + Repository: "scan", + Tag: "golang", + Digest: "digest-code", + MimeType: v1.MimeTypeDockerArtifact, + } + + m := &v1.ScannerAdapterMetadata{ + Scanner: &v1.Scanner{ + Name: "Clair", + Vendor: "Harbor", + Version: "0.1.0", + }, + Capabilities: []*v1.ScannerCapability{{ + ConsumesMimeTypes: []string{ + v1.MimeTypeOCIArtifact, + v1.MimeTypeDockerArtifact, + }, + ProducesMimeTypes: []string{ + v1.MimeTypeNativeReport, + }, + }}, + Properties: v1.ScannerProperties{ + "extra": "testing", + }, + } + + sc := &MockScannerController{} + sc.On("GetRegistrationByProject", suite.artifact.NamespaceID).Return(suite.registration, nil) + sc.On("Ping", suite.registration).Return(m, nil) + + mgr := &MockReportManager{} + mgr.On("Create", &scan.Report{ + Digest: "digest-code", + RegistrationUUID: "uuid001", + MimeType: "application/vnd.scanner.adapter.vuln.report.harbor+json; version=1.0", + Status: "Pending", + StatusCode: 0, + TrackID: "the-uuid-123", + }).Return("r-uuid", nil) + mgr.On("UpdateScanJobID", "the-uuid-123", "the-job-id").Return(nil) + + rp := vuln.Report{ + GeneratedAt: time.Now().UTC().String(), + Scanner: &v1.Scanner{ + Name: "Clair", + Vendor: "Harbor", + Version: "0.1.0", + }, + Severity: vuln.High, + Vulnerabilities: []*vuln.VulnerabilityItem{ + { + ID: "2019-0980-0909", + Package: "dpkg", + Version: "0.9.1", + FixVersion: "0.9.2", + Severity: vuln.High, + Description: "mock one", + Links: []string{"https://vuln.com"}, + }, + }, + } + + jsonData, err := json.Marshal(rp) + require.NoError(suite.T(), err) + suite.rawReport = string(jsonData) + + reports := []*scan.Report{ + { + ID: 11, + UUID: "rp-uuid-001", + Digest: "digest-code", + RegistrationUUID: "uuid001", + MimeType: "application/vnd.scanner.adapter.vuln.report.harbor+json; version=1.0", + Status: "Success", + StatusCode: 3, + TrackID: "the-uuid-123", + JobID: "the-job-id", + StatusRevision: time.Now().Unix(), + Report: suite.rawReport, + StartTime: time.Now(), + EndTime: time.Now().Add(2 * time.Second), + }, + } + + mgr.On("GetBy", suite.artifact.Digest, suite.registration.UUID, []string{v1.MimeTypeNativeReport}).Return(reports, nil) + mgr.On("Get", "rp-uuid-001").Return(reports[0], nil) + mgr.On("UpdateReportData", "rp-uuid-001", suite.rawReport, (int64)(10000)).Return(nil) + mgr.On("UpdateStatus", "the-uuid-123", "Success", (int64)(10000)).Return(nil) + + rc := &MockRobotController{} + + resource := fmt.Sprintf("/project/%d/repository", suite.artifact.NamespaceID) + access := []*rbac.Policy{{ + Resource: rbac.Resource(resource), + Action: "pull", + }} + + rname := fmt.Sprintf("%s%s", common.RobotPrefix, "the-uuid-123") + account := &model.RobotCreate{ + Name: rname, + Description: "for scan", + ProjectID: suite.artifact.NamespaceID, + Access: access, + } + rc.On("CreateRobotAccount", account).Return(&model.Robot{ + ID: 1, + Name: rname, + Token: "robot-account", + Description: "for scan", + ProjectID: suite.artifact.NamespaceID, + }, nil) + + // Set job parameters + req := &v1.ScanRequest{ + Registry: &v1.Registry{ + URL: "https://core.com", + Authorization: "robot-account", + }, + Artifact: suite.artifact, + } + + rJSON, err := req.ToJSON() + require.NoError(suite.T(), err) + + regJSON, err := suite.registration.ToJSON() + require.NoError(suite.T(), err) + + jc := &MockJobServiceClient{} + params := make(map[string]interface{}) + params[sca.JobParamRegistration] = regJSON + params[sca.JobParameterRequest] = rJSON + params[sca.JobParameterMimes] = []string{v1.MimeTypeNativeReport} + + j := &jm.JobData{ + Name: job.ImageScanJob, + Metadata: &jm.JobMetadata{ + JobKind: job.KindGeneric, + }, + Parameters: params, + StatusHook: fmt.Sprintf("%s/service/notifications/jobs/scan/%s", "http://core:8080", "the-uuid-123"), + } + jc.On("SubmitJob", j).Return("the-job-id", nil) + jc.On("GetJobLog", "the-job-id").Return([]byte("job log"), nil) + + suite.c = &basicController{ + manager: mgr, + sc: sc, + jc: jc, + rc: rc, + uuid: func() (string, error) { + return "the-uuid-123", nil + }, + config: func(cfg string) (string, error) { + switch cfg { + case configRegistryEndpoint: + return "https://core.com", nil + case configCoreInternalAddr: + return "http://core:8080", nil + } + + return "", nil + }, + } +} + +// TearDownSuite ... +func (suite *ControllerTestSuite) TearDownSuite() {} + +// TestScanControllerScan ... +func (suite *ControllerTestSuite) TestScanControllerScan() { + err := suite.c.Scan(suite.artifact) + require.NoError(suite.T(), err) +} + +// TestScanControllerGetReport ... +func (suite *ControllerTestSuite) TestScanControllerGetReport() { + rep, err := suite.c.GetReport(suite.artifact, []string{v1.MimeTypeNativeReport}) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 1, len(rep)) +} + +// TestScanControllerGetSummary ... +func (suite *ControllerTestSuite) TestScanControllerGetSummary() { + sum, err := suite.c.GetSummary(suite.artifact, []string{v1.MimeTypeNativeReport}) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 1, len(sum)) +} + +// TestScanControllerGetScanLog ... +func (suite *ControllerTestSuite) TestScanControllerGetScanLog() { + bytes, err := suite.c.GetScanLog("rp-uuid-001") + require.NoError(suite.T(), err) + assert.Condition(suite.T(), func() (success bool) { + success = len(bytes) > 0 + return + }) +} + +// TestScanControllerHandleJobHooks ... +func (suite *ControllerTestSuite) TestScanControllerHandleJobHooks() { + cReport := &sca.CheckInReport{ + Digest: "digest-code", + RegistrationUUID: suite.registration.UUID, + MimeType: v1.MimeTypeNativeReport, + RawReport: suite.rawReport, + } + + cRpJSON, err := cReport.ToJSON() + require.NoError(suite.T(), err) + + statusChange := &job.StatusChange{ + JobID: "the-job-id", + Status: "Success", + CheckIn: string(cRpJSON), + Metadata: &job.StatsInfo{ + Revision: (int64)(10000), + }, + } + + err = suite.c.HandleJobHooks("the-uuid-123", statusChange) + require.NoError(suite.T(), err) +} + +// Mock things + +// MockReportManager ... +type MockReportManager struct { + mock.Mock +} + +// Create ... +func (mrm *MockReportManager) Create(r *scan.Report) (string, error) { + args := mrm.Called(r) + + return args.String(0), args.Error(1) +} + +// UpdateScanJobID ... +func (mrm *MockReportManager) UpdateScanJobID(trackID string, jobID string) error { + args := mrm.Called(trackID, jobID) + + return args.Error(0) +} + +func (mrm *MockReportManager) UpdateStatus(trackID string, status string, rev int64) error { + args := mrm.Called(trackID, status, rev) + + return args.Error(0) +} + +func (mrm *MockReportManager) UpdateReportData(uuid string, report string, rev int64) error { + args := mrm.Called(uuid, report, rev) + + return args.Error(0) +} + +func (mrm *MockReportManager) GetBy(digest string, registrationUUID string, mimeTypes []string) ([]*scan.Report, error) { + args := mrm.Called(digest, registrationUUID, mimeTypes) + + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).([]*scan.Report), args.Error(1) +} + +func (mrm *MockReportManager) Get(uuid string) (*scan.Report, error) { + args := mrm.Called(uuid) + + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).(*scan.Report), args.Error(1) +} + +// MockScannerController ... +type MockScannerController struct { + mock.Mock +} + +// ListRegistrations ... +func (msc *MockScannerController) ListRegistrations(query *q.Query) ([]*scanner.Registration, error) { + args := msc.Called(query) + + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).([]*scanner.Registration), args.Error(1) +} + +// CreateRegistration ... +func (msc *MockScannerController) CreateRegistration(registration *scanner.Registration) (string, error) { + args := msc.Called(registration) + + return args.String(0), args.Error(1) +} + +// GetRegistration ... +func (msc *MockScannerController) GetRegistration(registrationUUID string) (*scanner.Registration, error) { + args := msc.Called(registrationUUID) + + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).(*scanner.Registration), args.Error(1) +} + +// RegistrationExists ... +func (msc *MockScannerController) RegistrationExists(registrationUUID string) bool { + args := msc.Called(registrationUUID) + + return args.Bool(0) +} + +// UpdateRegistration ... +func (msc *MockScannerController) UpdateRegistration(registration *scanner.Registration) error { + args := msc.Called(registration) + + return args.Error(0) +} + +// DeleteRegistration ... +func (msc *MockScannerController) DeleteRegistration(registrationUUID string) (*scanner.Registration, error) { + args := msc.Called(registrationUUID) + + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).(*scanner.Registration), args.Error(1) +} + +// SetDefaultRegistration ... +func (msc *MockScannerController) SetDefaultRegistration(registrationUUID string) error { + args := msc.Called(registrationUUID) + + return args.Error(0) +} + +// SetRegistrationByProject ... +func (msc *MockScannerController) SetRegistrationByProject(projectID int64, scannerID string) error { + args := msc.Called(projectID, scannerID) + + return args.Error(0) +} + +// GetRegistrationByProject ... +func (msc *MockScannerController) GetRegistrationByProject(projectID int64) (*scanner.Registration, error) { + args := msc.Called(projectID) + + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).(*scanner.Registration), args.Error(1) +} + +// Ping ... +func (msc *MockScannerController) Ping(registration *scanner.Registration) (*v1.ScannerAdapterMetadata, error) { + args := msc.Called(registration) + + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).(*v1.ScannerAdapterMetadata), args.Error(1) +} + +// GetMetadata ... +func (msc *MockScannerController) GetMetadata(registrationUUID string) (*v1.ScannerAdapterMetadata, error) { + args := msc.Called(registrationUUID) + + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).(*v1.ScannerAdapterMetadata), args.Error(1) +} + +// MockJobServiceClient ... +type MockJobServiceClient struct { + mock.Mock +} + +// SubmitJob ... +func (mjc *MockJobServiceClient) SubmitJob(jData *cjm.JobData) (string, error) { + args := mjc.Called(jData) + + return args.String(0), args.Error(1) +} + +// GetJobLog ... +func (mjc *MockJobServiceClient) GetJobLog(uuid string) ([]byte, error) { + args := mjc.Called(uuid) + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).([]byte), args.Error(1) +} + +// PostAction ... +func (mjc *MockJobServiceClient) PostAction(uuid, action string) error { + args := mjc.Called(uuid, action) + + return args.Error(0) +} + +func (mjc *MockJobServiceClient) GetExecutions(uuid string) ([]job.Stats, error) { + args := mjc.Called(uuid) + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).([]job.Stats), args.Error(1) +} + +// MockRobotController ... +type MockRobotController struct { + mock.Mock +} + +// GetRobotAccount ... +func (mrc *MockRobotController) GetRobotAccount(id int64) (*model.Robot, error) { + args := mrc.Called(id) + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).(*model.Robot), args.Error(1) +} + +// CreateRobotAccount ... +func (mrc *MockRobotController) CreateRobotAccount(robotReq *model.RobotCreate) (*model.Robot, error) { + args := mrc.Called(robotReq) + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).(*model.Robot), args.Error(1) +} + +// DeleteRobotAccount ... +func (mrc *MockRobotController) DeleteRobotAccount(id int64) error { + args := mrc.Called(id) + + return args.Error(0) +} + +// UpdateRobotAccount ... +func (mrc *MockRobotController) UpdateRobotAccount(r *model.Robot) error { + args := mrc.Called(r) + + return args.Error(0) +} + +// ListRobotAccount ... +func (mrc *MockRobotController) ListRobotAccount(query *q.Query) ([]*model.Robot, error) { + args := mrc.Called(query) + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).([]*model.Robot), args.Error(1) +} diff --git a/src/pkg/scan/api/scan/controller.go b/src/pkg/scan/api/scan/controller.go new file mode 100644 index 000000000..69752c403 --- /dev/null +++ b/src/pkg/scan/api/scan/controller.go @@ -0,0 +1,78 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scan + +import ( + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/pkg/scan/dao/scan" + v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1" +) + +// Controller provides the related operations for triggering scan. +// TODO: Here the artifact object is reused the v1 one which is sent to the adapter, +// it should be pointed to the general artifact object in future once it's ready. +type Controller interface { + // Scan the given artifact + // + // Arguments: + // artifact *v1.Artifact : artifact to be scanned + // + // Returns: + // error : non nil error if any errors occurred + Scan(artifact *v1.Artifact) error + + // GetReport gets the reports for the given artifact identified by the digest + // + // Arguments: + // artifact *v1.Artifact : the scanned artifact + // mimeTypes []string : the mime types of the reports + // + // Returns: + // []*scan.Report : scan results by different scanner vendors + // error : non nil error if any errors occurred + GetReport(artifact *v1.Artifact, mimeTypes []string) ([]*scan.Report, error) + + // GetSummary gets the summaries of the reports with given types. + // + // Arguments: + // artifact *v1.Artifact : the scanned artifact + // mimeTypes []string : the mime types of the reports + // + // Returns: + // map[string]interface{} : report summaries indexed by mime types + // error : non nil error if any errors occurred + GetSummary(artifact *v1.Artifact, mimeTypes []string) (map[string]interface{}, error) + + // Get the scan log for the specified artifact with the given digest + // + // Arguments: + // uuid string : the UUID of the scan report + // + // Returns: + // []byte : the log text stream + // error : non nil error if any errors occurred + GetScanLog(uuid string) ([]byte, error) + + // HandleJobHooks handle the hook events from the job service + // e.g : status change of the scan job or scan result + // + // Arguments: + // trackID string : UUID for the report record + // change *job.StatusChange : change event from the job service + // + // Returns: + // error : non nil error if any errors occurred + HandleJobHooks(trackID string, change *job.StatusChange) error +} diff --git a/src/pkg/scan/api/scanner/base_controller.go b/src/pkg/scan/api/scanner/base_controller.go new file mode 100644 index 000000000..2eb4688c4 --- /dev/null +++ b/src/pkg/scan/api/scanner/base_controller.go @@ -0,0 +1,304 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scanner + +import ( + "github.com/goharbor/harbor/src/core/promgr/metamgr" + "github.com/goharbor/harbor/src/jobservice/logger" + "github.com/goharbor/harbor/src/pkg/q" + "github.com/goharbor/harbor/src/pkg/scan/dao/scanner" + v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1" + rscanner "github.com/goharbor/harbor/src/pkg/scan/scanner" + "github.com/pkg/errors" +) + +const ( + proScannerMetaKey = "projectScanner" +) + +// DefaultController is a singleton api controller for plug scanners +var DefaultController = New() + +// New a basic controller +func New() Controller { + return &basicController{ + manager: rscanner.New(), + proMetaMgr: metamgr.NewDefaultProjectMetadataManager(), + clientPool: v1.DefaultClientPool, + } +} + +// basicController is default implementation of api.Controller interface +type basicController struct { + // Managers for managing the scanner registrations + manager rscanner.Manager + // For operating the project level configured scanner + proMetaMgr metamgr.ProjectMetadataManager + // Client pool for talking to adapters + clientPool v1.ClientPool +} + +// ListRegistrations ... +func (bc *basicController) ListRegistrations(query *q.Query) ([]*scanner.Registration, error) { + l, err := bc.manager.List(query) + if err != nil { + return nil, errors.Wrap(err, "api controller: list registrations") + } + + for _, r := range l { + _, err = bc.Ping(r) + r.Health = err == nil + } + + return l, nil +} + +// CreateRegistration ... +func (bc *basicController) CreateRegistration(registration *scanner.Registration) (string, error) { + // TODO: Check connection of the registration. + // Check if there are any registrations already existing. + l, err := bc.manager.List(&q.Query{ + PageSize: 1, + PageNumber: 1, + }) + if err != nil { + return "", errors.Wrap(err, "api controller: create registration") + } + + if len(l) == 0 && !registration.IsDefault { + // Mark the 1st as default automatically + registration.IsDefault = true + } + + return bc.manager.Create(registration) +} + +// GetRegistration ... +func (bc *basicController) GetRegistration(registrationUUID string) (*scanner.Registration, error) { + r, err := bc.manager.Get(registrationUUID) + if err != nil { + return nil, err + } + + _, err = bc.Ping(r) + r.Health = err == nil + + return r, nil +} + +// RegistrationExists ... +func (bc *basicController) RegistrationExists(registrationUUID string) bool { + registration, err := bc.manager.Get(registrationUUID) + + // Just logged when an error occurred + if err != nil { + logger.Errorf("Check existence of registration error: %s", err) + } + + return !(err == nil && registration == nil) +} + +// UpdateRegistration ... +func (bc *basicController) UpdateRegistration(registration *scanner.Registration) error { + if registration.IsDefault && registration.Disabled { + return errors.Errorf("default registration %s can not be marked to disabled", registration.UUID) + } + + return bc.manager.Update(registration) +} + +// SetDefaultRegistration ... +func (bc *basicController) DeleteRegistration(registrationUUID string) (*scanner.Registration, error) { + registration, err := bc.manager.Get(registrationUUID) + if registration == nil && err == nil { + // Not found + return nil, nil + } + + if err := bc.manager.Delete(registrationUUID); err != nil { + return nil, errors.Wrap(err, "api controller: delete registration") + } + + return registration, nil +} + +// SetDefaultRegistration ... +func (bc *basicController) SetDefaultRegistration(registrationUUID string) error { + return bc.manager.SetAsDefault(registrationUUID) +} + +// SetRegistrationByProject ... +func (bc *basicController) SetRegistrationByProject(projectID int64, registrationID string) error { + if projectID == 0 { + return errors.New("invalid project ID") + } + + if len(registrationID) == 0 { + return errors.New("missing scanner UUID") + } + + // Only keep the UUID in the metadata of the given project + // Scanner metadata existing? + m, err := bc.proMetaMgr.Get(projectID, proScannerMetaKey) + if err != nil { + return errors.Wrap(err, "api controller: set project scanner") + } + + // Update if exists + if len(m) > 0 { + // Compare and set new + if registrationID != m[proScannerMetaKey] { + m[proScannerMetaKey] = registrationID + if err := bc.proMetaMgr.Update(projectID, m); err != nil { + return errors.Wrap(err, "api controller: set project scanner") + } + } + } else { + meta := make(map[string]string, 1) + meta[proScannerMetaKey] = registrationID + if err := bc.proMetaMgr.Add(projectID, meta); err != nil { + return errors.Wrap(err, "api controller: set project scanner") + } + } + + return nil +} + +// GetRegistrationByProject ... +func (bc *basicController) GetRegistrationByProject(projectID int64) (*scanner.Registration, error) { + if projectID == 0 { + return nil, errors.New("invalid project ID") + } + + // First, get it from the project metadata + m, err := bc.proMetaMgr.Get(projectID, proScannerMetaKey) + if err != nil { + return nil, errors.Wrap(err, "api controller: get project scanner") + } + + var registration *scanner.Registration + if len(m) > 0 { + if registrationID, ok := m[proScannerMetaKey]; ok && len(registrationID) > 0 { + registration, err = bc.manager.Get(registrationID) + if err != nil { + return nil, errors.Wrap(err, "api controller: get project scanner") + } + + if registration == nil { + // Not found + // Might be deleted by the admin, the project scanner ID reference should be cleared + if err := bc.proMetaMgr.Delete(projectID, proScannerMetaKey); err != nil { + return nil, errors.Wrap(err, "api controller: get project scanner") + } + } + } + } + + if registration == nil { + // Second, get the default one + registration, err = bc.manager.GetDefault() + if err != nil { + return nil, errors.Wrap(err, "api controller: get project scanner") + } + } + + // Check status by the client later + if registration != nil { + if meta, err := bc.Ping(registration); err == nil { + registration.Scanner = meta.Scanner.Name + registration.Vendor = meta.Scanner.Vendor + registration.Version = meta.Scanner.Version + registration.Health = true + } else { + registration.Health = false + } + } + + return registration, err +} + +// Ping ... +// TODO: ADD UT CASES +func (bc *basicController) Ping(registration *scanner.Registration) (*v1.ScannerAdapterMetadata, error) { + if registration == nil { + return nil, errors.New("nil registration to ping") + } + + client, err := bc.clientPool.Get(registration) + if err != nil { + return nil, errors.Wrap(err, "scanner controller: ping") + } + + meta, err := client.GetMetadata() + if err != nil { + return nil, errors.Wrap(err, "scanner controller: ping") + } + + // Validate the required properties + if meta.Scanner == nil || + len(meta.Scanner.Name) == 0 || + len(meta.Scanner.Version) == 0 || + len(meta.Scanner.Vendor) == 0 { + return nil, errors.New("invalid scanner in metadata") + } + + if len(meta.Capabilities) == 0 { + return nil, errors.New("invalid capabilities in metadata") + } + + for _, ca := range meta.Capabilities { + // v1.MimeTypeDockerArtifact is required now + found := false + for _, cm := range ca.ConsumesMimeTypes { + if cm == v1.MimeTypeDockerArtifact { + found = true + break + } + } + if !found { + return nil, errors.Errorf("missing %s in consumes_mime_types", v1.MimeTypeDockerArtifact) + } + + // v1.MimeTypeNativeReport is required + found = false + for _, pm := range ca.ProducesMimeTypes { + if pm == v1.MimeTypeNativeReport { + found = true + break + } + } + + if !found { + return nil, errors.Errorf("missing %s in produces_mime_types", v1.MimeTypeNativeReport) + } + } + + return meta, err +} + +// GetMetadata ... +// TODO: ADD UT CASES +func (bc *basicController) GetMetadata(registrationUUID string) (*v1.ScannerAdapterMetadata, error) { + if len(registrationUUID) == 0 { + return nil, errors.New("empty registration uuid") + } + + r, err := bc.manager.Get(registrationUUID) + if err != nil { + return nil, errors.Wrap(err, "scanner controller: get metadata") + } + + return bc.Ping(r) +} diff --git a/src/pkg/scan/api/scanner/base_controller_test.go b/src/pkg/scan/api/scanner/base_controller_test.go new file mode 100644 index 000000000..eef26baf7 --- /dev/null +++ b/src/pkg/scan/api/scanner/base_controller_test.go @@ -0,0 +1,360 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scanner + +import ( + "testing" + + v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1" + + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/pkg/q" + "github.com/goharbor/harbor/src/pkg/scan/dao/scanner" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +// ControllerTestSuite is test suite to test the basic api controller. +type ControllerTestSuite struct { + suite.Suite + + c *basicController + mMgr *MockScannerManager + mMeta *MockProMetaManager + + sample *scanner.Registration +} + +// TestController is the entry of controller test suite +func TestController(t *testing.T) { + suite.Run(t, new(ControllerTestSuite)) +} + +// SetupSuite prepares env for the controller test suite +func (suite *ControllerTestSuite) SetupSuite() { + suite.mMgr = new(MockScannerManager) + suite.mMeta = new(MockProMetaManager) + + m := &v1.ScannerAdapterMetadata{ + Scanner: &v1.Scanner{ + Name: "Clair", + Vendor: "Harbor", + Version: "0.1.0", + }, + Capabilities: []*v1.ScannerCapability{{ + ConsumesMimeTypes: []string{ + v1.MimeTypeOCIArtifact, + v1.MimeTypeDockerArtifact, + }, + ProducesMimeTypes: []string{ + v1.MimeTypeNativeReport, + v1.MimeTypeRawReport, + }, + }}, + Properties: v1.ScannerProperties{ + "extra": "testing", + }, + } + + suite.sample = &scanner.Registration{ + Name: "forUT", + Description: "sample registration", + URL: "https://sample.scanner.com", + } + + mc := &MockClient{} + mc.On("GetMetadata").Return(m, nil) + + mcp := &MockClientPool{} + mcp.On("Get", suite.sample).Return(mc, nil) + suite.c = &basicController{ + manager: suite.mMgr, + proMetaMgr: suite.mMeta, + clientPool: mcp, + } +} + +// Clear test case +func (suite *ControllerTestSuite) TearDownTest() { + suite.sample.UUID = "" +} + +// TestListRegistrations tests ListRegistrations +func (suite *ControllerTestSuite) TestListRegistrations() { + query := &q.Query{ + PageSize: 10, + PageNumber: 1, + } + + suite.sample.UUID = "uuid" + l := []*scanner.Registration{suite.sample} + + suite.mMgr.On("List", query).Return(l, nil) + + rl, err := suite.c.ListRegistrations(query) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 1, len(rl)) +} + +// TestCreateRegistration tests CreateRegistration +func (suite *ControllerTestSuite) TestCreateRegistration() { + suite.mMgr.On("Create", suite.sample).Return("uuid", nil) + + uid, err := suite.mMgr.Create(suite.sample) + + require.NoError(suite.T(), err) + assert.Equal(suite.T(), uid, "uuid") +} + +// TestGetRegistration tests GetRegistration +func (suite *ControllerTestSuite) TestGetRegistration() { + suite.sample.UUID = "uuid" + suite.mMgr.On("Get", "uuid").Return(suite.sample, nil) + + rr, err := suite.c.GetRegistration("uuid") + require.NoError(suite.T(), err) + assert.NotNil(suite.T(), rr) + assert.Equal(suite.T(), "forUT", rr.Name) +} + +// TestRegistrationExists tests RegistrationExists +func (suite *ControllerTestSuite) TestRegistrationExists() { + suite.sample.UUID = "uuid" + suite.mMgr.On("Get", "uuid").Return(suite.sample, nil) + + exists := suite.c.RegistrationExists("uuid") + assert.Equal(suite.T(), true, exists) + + suite.mMgr.On("Get", "uuid2").Return(nil, nil) + + exists = suite.c.RegistrationExists("uuid2") + assert.Equal(suite.T(), false, exists) +} + +// TestUpdateRegistration tests UpdateRegistration +func (suite *ControllerTestSuite) TestUpdateRegistration() { + suite.sample.UUID = "uuid" + suite.mMgr.On("Update", suite.sample).Return(nil) + + err := suite.c.UpdateRegistration(suite.sample) + require.NoError(suite.T(), err) +} + +// TestDeleteRegistration tests DeleteRegistration +func (suite *ControllerTestSuite) TestDeleteRegistration() { + suite.sample.UUID = "uuid" + suite.mMgr.On("Get", "uuid").Return(suite.sample, nil) + suite.mMgr.On("Delete", "uuid").Return(nil) + + r, err := suite.c.DeleteRegistration("uuid") + require.NoError(suite.T(), err) + require.NotNil(suite.T(), r) + assert.Equal(suite.T(), "forUT", r.Name) +} + +// TestSetDefaultRegistration tests SetDefaultRegistration +func (suite *ControllerTestSuite) TestSetDefaultRegistration() { + suite.mMgr.On("SetAsDefault", "uuid").Return(nil) + + err := suite.c.SetDefaultRegistration("uuid") + require.NoError(suite.T(), err) +} + +// TestSetRegistrationByProject tests SetRegistrationByProject +func (suite *ControllerTestSuite) TestSetRegistrationByProject() { + m := make(map[string]string, 1) + mm := make(map[string]string, 1) + mmm := make(map[string]string, 1) + mm[proScannerMetaKey] = "uuid" + mmm[proScannerMetaKey] = "uuid2" + + var pid, pid2 int64 = 1, 2 + + // not set before + suite.mMeta.On("Get", pid, []string{proScannerMetaKey}).Return(m, nil) + suite.mMeta.On("Add", pid, mm).Return(nil) + + err := suite.c.SetRegistrationByProject(pid, "uuid") + require.NoError(suite.T(), err) + + // Set before + suite.mMeta.On("Get", pid2, []string{proScannerMetaKey}).Return(mm, nil) + suite.mMeta.On("Update", pid2, mmm).Return(nil) + + err = suite.c.SetRegistrationByProject(pid2, "uuid2") + require.NoError(suite.T(), err) +} + +// TestGetRegistrationByProject tests GetRegistrationByProject +func (suite *ControllerTestSuite) TestGetRegistrationByProject() { + m := make(map[string]string, 1) + m[proScannerMetaKey] = "uuid" + + // Configured at project level + var pid int64 = 1 + suite.sample.UUID = "uuid" + + suite.mMeta.On("Get", pid, []string{proScannerMetaKey}).Return(m, nil) + suite.mMgr.On("Get", "uuid").Return(suite.sample, nil) + + r, err := suite.c.GetRegistrationByProject(pid) + require.NoError(suite.T(), err) + require.Equal(suite.T(), "forUT", r.Name) + + // Not configured at project level, return system default + suite.mMeta.On("Get", pid, []string{proScannerMetaKey}).Return(nil, nil) + suite.mMgr.On("GetDefault").Return(suite.sample, nil) + + r, err = suite.c.GetRegistrationByProject(pid) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), r) + assert.Equal(suite.T(), "forUT", r.Name) +} + +// MockScannerManager is mock of the scanner manager +type MockScannerManager struct { + mock.Mock +} + +// List ... +func (m *MockScannerManager) List(query *q.Query) ([]*scanner.Registration, error) { + args := m.Called(query) + return args.Get(0).([]*scanner.Registration), args.Error(1) +} + +// Create ... +func (m *MockScannerManager) Create(registration *scanner.Registration) (string, error) { + args := m.Called(registration) + return args.String(0), args.Error(1) +} + +// Get ... +func (m *MockScannerManager) Get(registrationUUID string) (*scanner.Registration, error) { + args := m.Called(registrationUUID) + r := args.Get(0) + if r == nil { + return nil, args.Error(1) + } + + return r.(*scanner.Registration), args.Error(1) +} + +// Update ... +func (m *MockScannerManager) Update(registration *scanner.Registration) error { + args := m.Called(registration) + return args.Error(0) +} + +// Delete ... +func (m *MockScannerManager) Delete(registrationUUID string) error { + args := m.Called(registrationUUID) + return args.Error(0) +} + +// SetAsDefault ... +func (m *MockScannerManager) SetAsDefault(registrationUUID string) error { + args := m.Called(registrationUUID) + return args.Error(0) +} + +// GetDefault ... +func (m *MockScannerManager) GetDefault() (*scanner.Registration, error) { + args := m.Called() + return args.Get(0).(*scanner.Registration), args.Error(1) +} + +// MockProMetaManager is the mock of the ProjectMetadataManager +type MockProMetaManager struct { + mock.Mock +} + +// Add ... +func (m *MockProMetaManager) Add(projectID int64, meta map[string]string) error { + args := m.Called(projectID, meta) + return args.Error(0) +} + +// Delete ... +func (m *MockProMetaManager) Delete(projecdtID int64, meta ...string) error { + args := m.Called(projecdtID, meta) + return args.Error(0) +} + +// Update ... +func (m *MockProMetaManager) Update(projectID int64, meta map[string]string) error { + args := m.Called(projectID, meta) + return args.Error(0) +} + +// Get ... +func (m *MockProMetaManager) Get(projectID int64, meta ...string) (map[string]string, error) { + args := m.Called(projectID, meta) + return args.Get(0).(map[string]string), args.Error(1) +} + +// List ... +func (m *MockProMetaManager) List(name, value string) ([]*models.ProjectMetadata, error) { + args := m.Called(name, value) + return args.Get(0).([]*models.ProjectMetadata), args.Error(1) +} + +// MockClientPool is defined and referred by other UT cases. +type MockClientPool struct { + mock.Mock +} + +// Get client +func (mcp *MockClientPool) Get(r *scanner.Registration) (v1.Client, error) { + args := mcp.Called(r) + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).(v1.Client), args.Error(1) +} + +// MockClient is defined and referred in other UT cases. +type MockClient struct { + mock.Mock +} + +// GetMetadata ... +func (mc *MockClient) GetMetadata() (*v1.ScannerAdapterMetadata, error) { + args := mc.Called() + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).(*v1.ScannerAdapterMetadata), args.Error(1) +} + +// SubmitScan ... +func (mc *MockClient) SubmitScan(req *v1.ScanRequest) (*v1.ScanResponse, error) { + args := mc.Called(req) + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).(*v1.ScanResponse), args.Error(1) +} + +// GetScanReport ... +func (mc *MockClient) GetScanReport(scanRequestID, reportMIMEType string) (string, error) { + args := mc.Called(scanRequestID, reportMIMEType) + + return args.String(0), args.Error(1) +} diff --git a/src/pkg/scan/api/scanner/controller.go b/src/pkg/scan/api/scanner/controller.go new file mode 100644 index 000000000..d87928ca0 --- /dev/null +++ b/src/pkg/scan/api/scanner/controller.go @@ -0,0 +1,138 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scanner + +import ( + "github.com/goharbor/harbor/src/pkg/q" + "github.com/goharbor/harbor/src/pkg/scan/dao/scanner" + v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1" +) + +// Controller provides the related operations of scanner for the upper API. +// All the capabilities of the scanner are defined here. +type Controller interface { + // ListRegistrations returns a list of currently configured scanner registrations. + // Query parameters are optional + // + // Arguments: + // query *q.Query : query parameters + // + // Returns: + // []*scanner.Registration : scanner list of all the matched ones + // error : non nil error if any errors occurred + ListRegistrations(query *q.Query) ([]*scanner.Registration, error) + + // CreateRegistration creates a new scanner registration with the given data. + // Returns the scanner registration identifier. + // + // Arguments: + // registration *scanner.Registration : scanner registration to create + // + // Returns: + // string : the generated UUID of the new scanner + // error : non nil error if any errors occurred + CreateRegistration(registration *scanner.Registration) (string, error) + + // GetRegistration returns the details of the specified scanner registration. + // + // Arguments: + // registrationUUID string : the UUID of the given scanner + // + // Returns: + // *scanner.Registration : the required scanner + // error : non nil error if any errors occurred + GetRegistration(registrationUUID string) (*scanner.Registration, error) + + // RegistrationExists checks if the provided registration is there. + // + // Arguments: + // registrationUUID string : the UUID of the given scanner + // + // Returns: + // true for existing or false for not existing + RegistrationExists(registrationUUID string) bool + + // UpdateRegistration updates the specified scanner registration. + // + // Arguments: + // registration *scanner.Registration : scanner registration to update + // + // Returns: + // error : non nil error if any errors occurred + UpdateRegistration(registration *scanner.Registration) error + + // DeleteRegistration deletes the specified scanner registration. + // + // Arguments: + // registrationUUID string : the UUID of the given scanner which is going to be deleted + // + // Returns: + // *scanner.Registration : the deleted scanner + // error : non nil error if any errors occurred + DeleteRegistration(registrationUUID string) (*scanner.Registration, error) + + // SetDefaultRegistration marks the specified scanner registration as default. + // The implementation is supposed to unset any registration previously set as default. + // + // Arguments: + // registrationUUID string : the UUID of the given scanner which is marked as default + // + // Returns: + // error : non nil error if any errors occurred + SetDefaultRegistration(registrationUUID string) error + + // SetRegistrationByProject sets scanner for the given project. + // + // Arguments: + // projectID int64 : the ID of the given project + // scannerID string : the UUID of the the scanner + // + // Returns: + // error : non nil error if any errors occurred + SetRegistrationByProject(projectID int64, scannerID string) error + + // GetRegistrationByProject returns the configured scanner registration of the given project or + // the system default registration if exists or `nil` if no system registrations set. + // + // Arguments: + // projectID int64 : the ID of the given project + // + // Returns: + // *scanner.Registration : the default scanner registration + // error : non nil error if any errors occurred + GetRegistrationByProject(projectID int64) (*scanner.Registration, error) + + // Ping pings Scanner Adapter to test EndpointURL and Authorization settings. + // The implementation is supposed to call the GetMetadata method on scanner.Client. + // Returns `nil` if connection succeeded, a non `nil` error otherwise. + // + // Arguments: + // registration *scanner.Registration : scanner registration to ping + // + // Returns: + // *v1.ScannerAdapterMetadata : metadata returned by the scanner if successfully ping + // error : non nil error if any errors occurred + Ping(registration *scanner.Registration) (*v1.ScannerAdapterMetadata, error) + + // GetMetadata returns the metadata of the given scanner. + // + // Arguments: + // registrationUUID string : the UUID of the given scanner which is marked as default + // + // Returns: + // *v1.ScannerAdapterMetadata : metadata returned by the scanner if successfully ping + // error : non nil error if any errors occurred + GetMetadata(registrationUUID string) (*v1.ScannerAdapterMetadata, error) +} diff --git a/src/pkg/scan/dao/scan/model.go b/src/pkg/scan/dao/scan/model.go new file mode 100644 index 000000000..9d7f3ff4a --- /dev/null +++ b/src/pkg/scan/dao/scan/model.go @@ -0,0 +1,48 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scan + +import "time" + +// Report of the scan. +// Identified by the `digest`, `registration_uuid` and `mime_type`. +type Report struct { + ID int64 `orm:"pk;auto;column(id)"` + UUID string `orm:"unique;column(uuid)"` + Digest string `orm:"column(digest)"` + RegistrationUUID string `orm:"column(registration_uuid)"` + MimeType string `orm:"column(mime_type)"` + JobID string `orm:"column(job_id)"` + TrackID string `orm:"column(track_id)"` + Status string `orm:"column(status)"` + StatusCode int `orm:"column(status_code)"` + StatusRevision int64 `orm:"column(status_rev)"` + Report string `orm:"column(report);type(json)"` + StartTime time.Time `orm:"column(start_time);auto_now_add;type(datetime)"` + EndTime time.Time `orm:"column(end_time);type(datetime)"` +} + +// TableName for Report +func (r *Report) TableName() string { + return "scan_report" +} + +// TableUnique for Report +func (r *Report) TableUnique() [][]string { + return [][]string{ + {"uuid"}, + {"digest", "registration_uuid", "mime_type"}, + } +} diff --git a/src/pkg/scan/dao/scan/report.go b/src/pkg/scan/dao/scan/report.go new file mode 100644 index 000000000..6b428f8a2 --- /dev/null +++ b/src/pkg/scan/dao/scan/report.go @@ -0,0 +1,147 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scan + +import ( + "fmt" + "time" + + "github.com/astaxie/beego/orm" + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/pkg/q" + "github.com/pkg/errors" +) + +func init() { + orm.RegisterModel(new(Report)) +} + +// CreateReport creates new report +func CreateReport(r *Report) (int64, error) { + o := dao.GetOrmer() + return o.Insert(r) +} + +// DeleteReport deletes the given report +func DeleteReport(uuid string) error { + o := dao.GetOrmer() + qt := o.QueryTable(new(Report)) + + // Delete report with query way + count, err := qt.Filter("uuid", uuid).Delete() + if err != nil { + return err + } + + if count == 0 { + return errors.Errorf("no report with uuid %s deleted", uuid) + } + + return nil +} + +// ListReports lists the reports with given query parameters. +// Keywords in query here will be enforced with `exact` way. +func ListReports(query *q.Query) ([]*Report, error) { + o := dao.GetOrmer() + qt := o.QueryTable(new(Report)) + + if query != nil { + if len(query.Keywords) > 0 { + for k, v := range query.Keywords { + if vv, ok := v.([]interface{}); ok { + qt = qt.Filter(fmt.Sprintf("%s__in", k), vv...) + } + + qt = qt.Filter(k, v) + } + } + + if query.PageNumber > 0 && query.PageSize > 0 { + qt = qt.Limit(query.PageSize, (query.PageNumber-1)*query.PageSize) + } + } + + l := make([]*Report, 0) + _, err := qt.All(&l) + + return l, err +} + +// UpdateReportData only updates the `report` column with conditions matched. +func UpdateReportData(uuid string, report string, statusRev int64) error { + o := dao.GetOrmer() + qt := o.QueryTable(new(Report)) + + data := make(orm.Params) + data["report"] = report + data["status_rev"] = statusRev + + count, err := qt.Filter("uuid", uuid). + Filter("status_rev__lte", statusRev).Update(data) + + if err != nil { + return err + } + + if count == 0 { + return errors.Errorf("no report with uuid %s updated", uuid) + } + + return nil +} + +// UpdateReportStatus updates the report `status` with conditions matched. +func UpdateReportStatus(trackID string, status string, statusCode int, statusRev int64) error { + o := dao.GetOrmer() + qt := o.QueryTable(new(Report)) + + data := make(orm.Params) + data["status"] = status + data["status_code"] = statusCode + data["status_rev"] = statusRev + + // Technically it is not correct, just to avoid changing interface and adding more code. + // running==2 + if statusCode > 2 { + data["end_time"] = time.Now().UTC() + } + + count, err := qt.Filter("track_id", trackID). + Filter("status_rev__lte", statusRev). + Filter("status_code__lte", statusCode).Update(data) + + if err != nil { + return err + } + + if count == 0 { + return errors.Errorf("no report with track_id %s updated", trackID) + } + + return nil +} + +// UpdateJobID updates the report `job_id` column +func UpdateJobID(trackID string, jobID string) error { + o := dao.GetOrmer() + qt := o.QueryTable(new(Report)) + + params := make(orm.Params, 1) + params["job_id"] = jobID + _, err := qt.Filter("track_id", trackID).Update(params) + + return err +} diff --git a/src/pkg/scan/dao/scan/report_test.go b/src/pkg/scan/dao/scan/report_test.go new file mode 100644 index 000000000..f2193b9bc --- /dev/null +++ b/src/pkg/scan/dao/scan/report_test.go @@ -0,0 +1,132 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scan + +import ( + "testing" + + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/pkg/q" + v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +// ReportTestSuite is test suite of testing report DAO. +type ReportTestSuite struct { + suite.Suite +} + +// TestReport is the entry of ReportTestSuite. +func TestReport(t *testing.T) { + suite.Run(t, &ReportTestSuite{}) +} + +// SetupSuite prepares env for test suite. +func (suite *ReportTestSuite) SetupSuite() { + dao.PrepareTestForPostgresSQL() +} + +// SetupTest prepares env for test case. +func (suite *ReportTestSuite) SetupTest() { + r := &Report{ + UUID: "uuid", + TrackID: "track-uuid", + Digest: "digest1001", + RegistrationUUID: "ruuid", + MimeType: v1.MimeTypeNativeReport, + Status: job.PendingStatus.String(), + StatusCode: job.PendingStatus.Code(), + } + + id, err := CreateReport(r) + require.NoError(suite.T(), err) + require.Condition(suite.T(), func() (success bool) { + success = id > 0 + return + }) +} + +// TearDownTest clears enf for test case. +func (suite *ReportTestSuite) TearDownTest() { + err := DeleteReport("uuid") + require.NoError(suite.T(), err) +} + +// TestReportList tests list reports with query parameters. +func (suite *ReportTestSuite) TestReportList() { + query1 := &q.Query{ + PageSize: 1, + PageNumber: 1, + Keywords: map[string]interface{}{ + "digest": "digest1001", + "registration_uuid": "ruuid", + "mime_type": v1.MimeTypeNativeReport, + }, + } + l, err := ListReports(query1) + require.NoError(suite.T(), err) + require.Equal(suite.T(), 1, len(l)) + + query2 := &q.Query{ + PageSize: 1, + PageNumber: 1, + Keywords: map[string]interface{}{ + "digest": "digest1002", + }, + } + l, err = ListReports(query2) + require.NoError(suite.T(), err) + require.Equal(suite.T(), 0, len(l)) +} + +// TestReportUpdateJobID tests update job ID of the report. +func (suite *ReportTestSuite) TestReportUpdateJobID() { + err := UpdateJobID("track-uuid", "jobid001") + require.NoError(suite.T(), err) + + l, err := ListReports(nil) + require.NoError(suite.T(), err) + require.Equal(suite.T(), 1, len(l)) + assert.Equal(suite.T(), "jobid001", l[0].JobID) +} + +// TestReportUpdateReportData tests update the report data. +func (suite *ReportTestSuite) TestReportUpdateReportData() { + err := UpdateReportData("uuid", "{}", 1000) + require.NoError(suite.T(), err) + + l, err := ListReports(nil) + require.NoError(suite.T(), err) + require.Equal(suite.T(), 1, len(l)) + assert.Equal(suite.T(), "{}", l[0].Report) + + err = UpdateReportData("uuid", "{\"a\": 900}", 900) + require.Error(suite.T(), err) +} + +// TestReportUpdateStatus tests update the report status. +func (suite *ReportTestSuite) TestReportUpdateStatus() { + err := UpdateReportStatus("track-uuid", job.RunningStatus.String(), job.RunningStatus.Code(), 1000) + require.NoError(suite.T(), err) + + err = UpdateReportStatus("track-uuid", job.RunningStatus.String(), job.RunningStatus.Code(), 900) + require.Error(suite.T(), err) + + err = UpdateReportStatus("track-uuid", job.PendingStatus.String(), job.PendingStatus.Code(), 1000) + require.Error(suite.T(), err) +} diff --git a/src/pkg/scan/dao/scanner/model.go b/src/pkg/scan/dao/scanner/model.go new file mode 100644 index 000000000..8186aa007 --- /dev/null +++ b/src/pkg/scan/dao/scanner/model.go @@ -0,0 +1,127 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scanner + +import ( + "encoding/json" + "net/url" + "strings" + "time" + + "github.com/goharbor/harbor/src/pkg/scan/rest/auth" + + "github.com/pkg/errors" +) + +// Registration represents a named configuration for invoking a scanner via its adapter. +// UUID will be used to track the scanner.Endpoint as unique ID +type Registration struct { + // Basic information + // int64 ID is kept for being aligned with previous DB schema + ID int64 `orm:"pk;auto;column(id)" json:"-"` + UUID string `orm:"unique;column(uuid)" json:"uuid"` + Name string `orm:"unique;column(name);size(128)" json:"name"` + Description string `orm:"column(description);null;size(1024)" json:"description"` + URL string `orm:"column(url);unique;size(512)" json:"url"` + Disabled bool `orm:"column(disabled);default(true)" json:"disabled"` + IsDefault bool `orm:"column(is_default);default(false)" json:"is_default"` + Health bool `orm:"-" json:"health"` + + // Authentication settings + // "None","Basic" and "Bearer" can be supported + Auth string `orm:"column(auth);size(16)" json:"auth"` + AccessCredential string `orm:"column(access_cred);null;size(512)" json:"access_credential,omitempty"` + + // Http connection settings + SkipCertVerify bool `orm:"column(skip_cert_verify);default(false)" json:"skip_certVerify"` + + // Extra info about the scanner + Scanner string `orm:"-" json:"scanner,omitempty"` + Vendor string `orm:"-" json:"vendor,omitempty"` + Version string `orm:"-" json:"version,omitempty"` + + // Timestamps + CreateTime time.Time `orm:"column(create_time);auto_now_add;type(datetime)" json:"create_time"` + UpdateTime time.Time `orm:"column(update_time);auto_now;type(datetime)" json:"update_time"` +} + +// TableName for Endpoint +func (r *Registration) TableName() string { + return "scanner_registration" +} + +// FromJSON parses registration from json data +func (r *Registration) FromJSON(jsonData string) error { + if len(jsonData) == 0 { + return errors.New("empty json data to parse") + } + + return json.Unmarshal([]byte(jsonData), r) +} + +// ToJSON marshals registration to JSON data +func (r *Registration) ToJSON() (string, error) { + data, err := json.Marshal(r) + if err != nil { + return "", err + } + + return string(data), nil +} + +// Validate registration +func (r *Registration) Validate(checkUUID bool) error { + if checkUUID && len(r.UUID) == 0 { + return errors.New("malformed endpoint") + } + + if len(r.Name) == 0 { + return errors.New("missing registration name") + } + + err := checkURL(r.URL) + if err != nil { + return errors.Wrap(err, "scanner registration validate") + } + + if len(r.Auth) > 0 && + r.Auth != auth.Basic && + r.Auth != auth.Bearer && + r.Auth != auth.APIKey { + return errors.Errorf("auth type %s is not supported", r.Auth) + } + + if len(r.Auth) > 0 && len(r.AccessCredential) == 0 { + return errors.Errorf("access_credential is required for auth type %s", r.Auth) + } + + return nil +} + +// Check the registration URL with url package +func checkURL(u string) error { + if len(strings.TrimSpace(u)) == 0 { + return errors.New("empty url") + } + + uri, err := url.Parse(u) + if err == nil { + if uri.Scheme != "http" && uri.Scheme != "https" { + err = errors.New("invalid scheme") + } + } + + return err +} diff --git a/src/pkg/scan/dao/scanner/model_test.go b/src/pkg/scan/dao/scanner/model_test.go new file mode 100644 index 000000000..aaf185fff --- /dev/null +++ b/src/pkg/scan/dao/scanner/model_test.go @@ -0,0 +1,81 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scanner + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +// ModelTestSuite tests the utility functions of the model +type ModelTestSuite struct { + suite.Suite +} + +// TestModel is the entry of the model test suite +func TestModel(t *testing.T) { + suite.Run(t, new(ModelTestSuite)) +} + +// TestJSON tests the marshal and unmarshal functions +func (suite *ModelTestSuite) TestJSON() { + r := &Registration{ + Name: "forUT", + Description: "sample registration", + URL: "https://sample.scanner.com", + } + + json, err := r.ToJSON() + require.NoError(suite.T(), err) + assert.Condition(suite.T(), func() (success bool) { + success = len(json) > 0 + return + }) + + r2 := &Registration{} + err = r2.FromJSON(json) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), "forUT", r2.Name) +} + +// TestValidate tests the validate function +func (suite *ModelTestSuite) TestValidate() { + r := &Registration{} + + err := r.Validate(true) + require.Error(suite.T(), err) + + r.UUID = "uuid" + err = r.Validate(true) + require.Error(suite.T(), err) + + r.Name = "forUT" + err = r.Validate(true) + require.Error(suite.T(), err) + + r.URL = "a.b.c" + err = r.Validate(true) + require.Error(suite.T(), err) + + r.URL = "http://a.b.c" + err = r.Validate(true) + require.NoError(suite.T(), err) + + err = r.Validate(true) + require.NoError(suite.T(), err) +} diff --git a/src/pkg/scan/dao/scanner/registration.go b/src/pkg/scan/dao/scanner/registration.go new file mode 100644 index 000000000..db489fe17 --- /dev/null +++ b/src/pkg/scan/dao/scanner/registration.go @@ -0,0 +1,172 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scanner + +import ( + "fmt" + "strings" + + "github.com/astaxie/beego/orm" + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/pkg/q" + "github.com/pkg/errors" +) + +func init() { + orm.RegisterModel(new(Registration)) +} + +// AddRegistration adds a new registration +func AddRegistration(r *Registration) (int64, error) { + o := dao.GetOrmer() + return o.Insert(r) +} + +// GetRegistration gets the specified registration +func GetRegistration(UUID string) (*Registration, error) { + e := &Registration{} + + o := dao.GetOrmer() + qs := o.QueryTable(new(Registration)) + + if err := qs.Filter("uuid", UUID).One(e); err != nil { + if err == orm.ErrNoRows { + // Not existing case + return nil, nil + } + return nil, err + } + + return e, nil +} + +// UpdateRegistration update the specified registration +func UpdateRegistration(r *Registration, cols ...string) error { + o := dao.GetOrmer() + count, err := o.Update(r, cols...) + if err != nil { + return err + } + + if count == 0 { + return errors.Errorf("no item with UUID %s is updated", r.UUID) + } + + return nil +} + +// DeleteRegistration deletes the registration with the specified UUID +func DeleteRegistration(UUID string) error { + o := dao.GetOrmer() + qt := o.QueryTable(new(Registration)) + + // delete with query way + count, err := qt.Filter("uuid", UUID).Delete() + + if err != nil { + return err + } + + if count == 0 { + return errors.Errorf("no item with UUID %s is deleted", UUID) + } + + return nil +} + +// ListRegistrations lists all the existing registrations +func ListRegistrations(query *q.Query) ([]*Registration, error) { + o := dao.GetOrmer() + qt := o.QueryTable(new(Registration)) + + if query != nil { + if len(query.Keywords) > 0 { + for k, v := range query.Keywords { + if strings.HasPrefix(k, "ex_") { + kk := strings.TrimPrefix(k, "ex_") + qt = qt.Filter(kk, v) + continue + } + + qt = qt.Filter(fmt.Sprintf("%s__icontains", k), v) + } + } + + if query.PageNumber > 0 && query.PageSize > 0 { + qt = qt.Limit(query.PageSize, (query.PageNumber-1)*query.PageSize) + } + } + + l := make([]*Registration, 0) + _, err := qt.All(&l) + + return l, err +} + +// SetDefaultRegistration sets the specified registration as default one +func SetDefaultRegistration(UUID string) error { + o := dao.GetOrmer() + err := o.Begin() + if err != nil { + return err + } + + var count int64 + qt := o.QueryTable(new(Registration)) + count, err = qt.Filter("uuid", UUID). + Filter("disabled", false). + Update(orm.Params{ + "is_default": true, + }) + if err == nil && count == 0 { + err = errors.Errorf("set default for %s failed", UUID) + } + + if err == nil { + qt2 := o.QueryTable(new(Registration)) + _, err = qt2.Exclude("uuid__exact", UUID). + Filter("is_default", true). + Update(orm.Params{ + "is_default": false, + }) + } + + if err != nil { + if e := o.Rollback(); e != nil { + err = errors.Wrap(e, err.Error()) + } + } else { + err = o.Commit() + } + + return err +} + +// GetDefaultRegistration gets the default registration +func GetDefaultRegistration() (*Registration, error) { + o := dao.GetOrmer() + qt := o.QueryTable(new(Registration)) + + e := &Registration{} + if err := qt.Filter("is_default", true).One(e); err != nil { + if err == orm.ErrNoRows { + return nil, nil + } + + return nil, err + } + + return e, nil +} diff --git a/src/pkg/scan/dao/scanner/registration_test.go b/src/pkg/scan/dao/scanner/registration_test.go new file mode 100644 index 000000000..b44c4a435 --- /dev/null +++ b/src/pkg/scan/dao/scanner/registration_test.go @@ -0,0 +1,164 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scanner + +import ( + "testing" + + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/pkg/q" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +// RegistrationDAOTestSuite is test suite of testing registration DAO +type RegistrationDAOTestSuite struct { + suite.Suite + + registrationID string +} + +// TestRegistrationDAO is entry of test cases +func TestRegistrationDAO(t *testing.T) { + suite.Run(t, new(RegistrationDAOTestSuite)) +} + +// SetupSuite prepare testing env for the suite +func (suite *RegistrationDAOTestSuite) SetupSuite() { + dao.PrepareTestForPostgresSQL() +} + +// SetupTest prepare stuff for test cases +func (suite *RegistrationDAOTestSuite) SetupTest() { + suite.registrationID = uuid.New().String() + r := &Registration{ + UUID: suite.registrationID, + Name: "forUT", + Description: "sample registration", + URL: "https://sample.scanner.com", + } + + _, err := AddRegistration(r) + require.NoError(suite.T(), err, "add new registration") + +} + +// TearDownTest clears all the stuff of test cases +func (suite *RegistrationDAOTestSuite) TearDownTest() { + err := DeleteRegistration(suite.registrationID) + require.NoError(suite.T(), err, "clear registration") +} + +// TestGet tests get registration +func (suite *RegistrationDAOTestSuite) TestGet() { + // Found + r, err := GetRegistration(suite.registrationID) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), r) + assert.Equal(suite.T(), r.Name, "forUT") + + // Not found + re, err := GetRegistration("not_found") + require.NoError(suite.T(), err) + require.Nil(suite.T(), re) +} + +// TestUpdate tests update registration +func (suite *RegistrationDAOTestSuite) TestUpdate() { + r, err := GetRegistration(suite.registrationID) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), r) + + r.Disabled = true + r.IsDefault = true + r.URL = "http://updated.registration.com" + + err = UpdateRegistration(r) + require.NoError(suite.T(), err, "update registration") + + r, err = GetRegistration(suite.registrationID) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), r) + + assert.Equal(suite.T(), true, r.Disabled) + assert.Equal(suite.T(), true, r.IsDefault) + assert.Equal(suite.T(), "http://updated.registration.com", r.URL) +} + +// TestList tests list registrations +func (suite *RegistrationDAOTestSuite) TestList() { + // no query + l, err := ListRegistrations(nil) + require.NoError(suite.T(), err) + require.Equal(suite.T(), 1, len(l)) + + // with query and found items + keywords := make(map[string]interface{}) + keywords["description"] = "sample" + l, err = ListRegistrations(&q.Query{ + PageSize: 5, + PageNumber: 1, + Keywords: keywords, + }) + require.NoError(suite.T(), err) + require.Equal(suite.T(), 1, len(l)) + + // With query and not found items + keywords["description"] = "not_exist" + l, err = ListRegistrations(&q.Query{ + Keywords: keywords, + }) + require.NoError(suite.T(), err) + require.Equal(suite.T(), 0, len(l)) + + // Exact match + exactKeywords := make(map[string]interface{}) + exactKeywords["ex_name"] = "forUT" + l, err = ListRegistrations(&q.Query{ + Keywords: exactKeywords, + }) + require.NoError(suite.T(), err) + require.Equal(suite.T(), 1, len(l)) + + exactKeywords["ex_name"] = "forU" + l, err = ListRegistrations(&q.Query{ + Keywords: exactKeywords, + }) + require.NoError(suite.T(), err) + require.Equal(suite.T(), 0, len(l)) +} + +// TestDefault tests set/get default +func (suite *RegistrationDAOTestSuite) TestDefault() { + dr, err := GetDefaultRegistration() + require.NoError(suite.T(), err, "not found") + require.Nil(suite.T(), dr) + + err = SetDefaultRegistration(suite.registrationID) + require.NoError(suite.T(), err) + + dr, err = GetDefaultRegistration() + require.NoError(suite.T(), err) + require.NotNil(suite.T(), dr) + + dr.Disabled = true + err = UpdateRegistration(dr, "disabled") + require.NoError(suite.T(), err) + + err = SetDefaultRegistration(suite.registrationID) + require.Error(suite.T(), err) +} diff --git a/src/pkg/scan/job.go b/src/pkg/scan/job.go new file mode 100644 index 000000000..f8d10c104 --- /dev/null +++ b/src/pkg/scan/job.go @@ -0,0 +1,351 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scan + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "sync" + "time" + + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/jobservice/logger" + "github.com/goharbor/harbor/src/pkg/scan/dao/scanner" + "github.com/goharbor/harbor/src/pkg/scan/report" + v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1" + "github.com/pkg/errors" +) + +const ( + // JobParamRegistration ... + JobParamRegistration = "registration" + // JobParameterRequest ... + JobParameterRequest = "scanRequest" + // JobParameterMimes ... + JobParameterMimes = "mimeTypes" + + checkTimeout = 30 * time.Minute + firstCheckInterval = 2 * time.Second +) + +// CheckInReport defines model for checking in the scan report with specified mime. +type CheckInReport struct { + Digest string `json:"digest"` + RegistrationUUID string `json:"registration_uuid"` + MimeType string `json:"mime_type"` + RawReport string `json:"raw_report"` +} + +// FromJSON parse json to CheckInReport +func (cir *CheckInReport) FromJSON(jsonData string) error { + if len(jsonData) == 0 { + return errors.New("empty JSON data") + } + + return json.Unmarshal([]byte(jsonData), cir) +} + +// ToJSON marshal CheckInReport to JSON +func (cir *CheckInReport) ToJSON() (string, error) { + jsonData, err := json.Marshal(cir) + if err != nil { + return "", errors.Wrap(err, "To JSON: CheckInReport") + } + + return string(jsonData), nil +} + +// Job for running scan in the job service with async way +type Job struct{} + +// MaxFails for defining the number of retries +func (j *Job) MaxFails() uint { + return 3 +} + +// ShouldRetry indicates if the job should be retried +func (j *Job) ShouldRetry() bool { + return true +} + +// Validate the parameters of this job +func (j *Job) Validate(params job.Parameters) error { + if params == nil { + // Params are required + return errors.New("missing parameter of scan job") + } + + if _, err := extractRegistration(params); err != nil { + return errors.Wrap(err, "job validate") + } + + if _, err := extractScanReq(params); err != nil { + return errors.Wrap(err, "job validate") + } + + if _, err := extractMimeTypes(params); err != nil { + return errors.Wrap(err, "job validate") + } + + return nil +} + +// Run the job +func (j *Job) Run(ctx job.Context, params job.Parameters) error { + // Get logger + myLogger := ctx.GetLogger() + + // Ignore errors as they have been validated already + r, _ := extractRegistration(params) + req, _ := extractScanReq(params) + mimes, _ := extractMimeTypes(params) + + // Print related infos to log + printJSONParameter(JobParamRegistration, params[JobParamRegistration].(string), myLogger) + printJSONParameter(JobParameterRequest, removeAuthInfo(req), myLogger) + myLogger.Infof("Report mime types: %v\n", mimes) + + // Submit scan request to the scanner adapter + client, err := v1.DefaultClientPool.Get(r) + if err != nil { + return logAndWrapError(myLogger, err, "scan job: get client") + } + + resp, err := client.SubmitScan(req) + if err != nil { + return logAndWrapError(myLogger, err, "scan job: submit scan request") + } + + // For collecting errors + errs := make([]error, len(mimes)) + + // Concurrently retrieving report by different mime types + wg := &sync.WaitGroup{} + wg.Add(len(mimes)) + + for i, mt := range mimes { + go func(i int, m string) { + defer wg.Done() + + // Log info + myLogger.Infof("Get report for mime type: %s", m) + + // Loop check if the report is ready + tm := time.NewTimer(firstCheckInterval) + defer tm.Stop() + + for { + select { + case t := <-tm.C: + myLogger.Debugf("check scan report for mime %s at %s", m, t.Format("2006/01/02 15:04:05")) + + rawReport, err := client.GetScanReport(resp.ID, m) + if err != nil { + // Not ready yet + if notReadyErr, ok := err.(*v1.ReportNotReadyError); ok { + // Reset to the new check interval + tm.Reset(time.Duration(notReadyErr.RetryAfter) * time.Second) + myLogger.Infof("Report with mime type %s is not ready yet, retry after %d seconds", m, notReadyErr.RetryAfter) + + continue + } + + errs[i] = errors.Wrap(err, fmt.Sprintf("check scan report with mime type %s", m)) + return + } + + // Make sure the data is aligned with the v1 spec. + if _, err = report.ResolveData(m, []byte(rawReport)); err != nil { + errs[i] = errors.Wrap(err, "scan job: resolve report data") + return + } + + // Check in + cir := &CheckInReport{ + Digest: req.Artifact.Digest, + RegistrationUUID: r.UUID, + MimeType: m, + RawReport: rawReport, + } + + var ( + jsonData string + er error + ) + if jsonData, er = cir.ToJSON(); er == nil { + if er = ctx.Checkin(jsonData); er == nil { + // Done! + myLogger.Infof("Report with mime type %s is checked in", m) + return + } + } + + // Send error and exit + errs[i] = errors.Wrap(er, fmt.Sprintf("check in scan report for mime type %s", m)) + return + case <-ctx.SystemContext().Done(): + // Terminated by system + return + case <-time.After(checkTimeout): + errs[i] = errors.New("check scan report timeout") + return + } + } + }(i, mt) + } + + // Wait for all the retrieving routines are completed + wg.Wait() + + // Merge errors + for _, e := range errs { + if e != nil { + if err != nil { + err = errors.Wrap(e, err.Error()) + } else { + err = e + } + } + } + + // Log error to the job log + if err != nil { + myLogger.Error(err) + } + + return err +} + +func logAndWrapError(logger logger.Interface, err error, message string) error { + e := errors.Wrap(err, message) + logger.Error(e) + + return e +} + +func printJSONParameter(parameter string, v string, logger logger.Interface) { + logger.Infof("%s:\n", parameter) + printPrettyJSON([]byte(v), logger) +} + +func printPrettyJSON(in []byte, logger logger.Interface) { + var out bytes.Buffer + if err := json.Indent(&out, in, "", " "); err != nil { + logger.Errorf("Print pretty JSON error: %s", err) + return + } + + logger.Infof("%s\n", out.String()) +} + +func removeAuthInfo(sr *v1.ScanRequest) string { + req := &v1.ScanRequest{ + Artifact: sr.Artifact, + Registry: &v1.Registry{ + URL: sr.Registry.URL, + Authorization: "[HIDDEN]", + }, + } + + str, err := req.ToJSON() + if err != nil { + logger.Error(errors.Wrap(err, "scan job: remove auth")) + } + + return str +} + +func extractScanReq(params job.Parameters) (*v1.ScanRequest, error) { + v, ok := params[JobParameterRequest] + if !ok { + return nil, errors.Errorf("missing job parameter '%s'", JobParameterRequest) + } + + jsonData, ok := v.(string) + if !ok { + return nil, errors.Errorf( + "malformed job parameter '%s', expecting string but got %s", + JobParameterRequest, + reflect.TypeOf(v).String(), + ) + } + + req := &v1.ScanRequest{} + if err := req.FromJSON(jsonData); err != nil { + return nil, err + } + if err := req.Validate(); err != nil { + return nil, err + } + + return req, nil +} + +func extractRegistration(params job.Parameters) (*scanner.Registration, error) { + v, ok := params[JobParamRegistration] + if !ok { + return nil, errors.Errorf("missing job parameter '%s'", JobParamRegistration) + } + + jsonData, ok := v.(string) + if !ok { + return nil, errors.Errorf( + "malformed job parameter '%s', expecting string but got %s", + JobParamRegistration, + reflect.TypeOf(v).String(), + ) + } + + r := &scanner.Registration{} + if err := r.FromJSON(jsonData); err != nil { + return nil, err + } + + if err := r.Validate(true); err != nil { + return nil, err + } + + return r, nil +} + +func extractMimeTypes(params job.Parameters) ([]string, error) { + v, ok := params[JobParameterMimes] + if !ok { + return nil, errors.Errorf("missing job parameter '%s'", JobParameterMimes) + } + + l, ok := v.([]interface{}) + if !ok { + return nil, errors.Errorf( + "malformed job parameter '%s', expecting []interface{} but got %s", + JobParameterMimes, + reflect.TypeOf(v).String(), + ) + } + + mimes := make([]string, 0) + for _, v := range l { + mime, ok := v.(string) + if !ok { + return nil, errors.Errorf("expect string but got %s", reflect.TypeOf(v).String()) + } + + mimes = append(mimes, mime) + } + + return mimes, nil +} diff --git a/src/pkg/scan/job_test.go b/src/pkg/scan/job_test.go new file mode 100644 index 000000000..26af182b3 --- /dev/null +++ b/src/pkg/scan/job_test.go @@ -0,0 +1,306 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scan + +import ( + "context" + "encoding/json" + "testing" + "time" + + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/jobservice/logger" + "github.com/goharbor/harbor/src/pkg/scan/dao/scanner" + v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1" + "github.com/goharbor/harbor/src/pkg/scan/vuln" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +// JobTestSuite is a test suite to test the scan job. +type JobTestSuite struct { + suite.Suite + + defaultClientPool v1.ClientPool + mcp *MockClientPool +} + +// TestJob is the entry of JobTestSuite. +func TestJob(t *testing.T) { + suite.Run(t, &JobTestSuite{}) +} + +// SetupSuite sets up test env for JobTestSuite. +func (suite *JobTestSuite) SetupSuite() { + mcp := &MockClientPool{} + suite.defaultClientPool = v1.DefaultClientPool + v1.DefaultClientPool = mcp + + suite.mcp = mcp +} + +// TeraDownSuite clears test env for TeraDownSuite. +func (suite *JobTestSuite) TeraDownSuite() { + v1.DefaultClientPool = suite.defaultClientPool +} + +// TestJob tests the scan job +func (suite *JobTestSuite) TestJob() { + ctx := &MockJobContext{} + lg := &MockJobLogger{} + + ctx.On("GetLogger").Return(lg) + + r := &scanner.Registration{ + ID: 0, + UUID: "uuid", + Name: "TestJob", + URL: "https://clair.com:8080", + } + + rData, err := r.ToJSON() + require.NoError(suite.T(), err) + + sr := &v1.ScanRequest{ + Registry: &v1.Registry{ + URL: "http://localhost:5000", + Authorization: "the_token", + }, + Artifact: &v1.Artifact{ + Repository: "library/test_job", + Digest: "sha256:data", + MimeType: v1.MimeTypeDockerArtifact, + }, + } + + sData, err := sr.ToJSON() + require.NoError(suite.T(), err) + + mimeTypes := []string{v1.MimeTypeNativeReport} + + jp := make(job.Parameters) + jp[JobParamRegistration] = rData + jp[JobParameterRequest] = sData + jp[JobParameterMimes] = mimeTypes + + mc := &MockClient{} + sre := &v1.ScanResponse{ + ID: "scan_id", + } + mc.On("SubmitScan", sr).Return(sre, nil) + + rp := vuln.Report{ + GeneratedAt: time.Now().UTC().String(), + Scanner: &v1.Scanner{ + Name: "Clair", + Vendor: "Harbor", + Version: "0.1.0", + }, + Severity: vuln.High, + Vulnerabilities: []*vuln.VulnerabilityItem{ + { + ID: "2019-0980-0909", + Package: "dpkg", + Version: "0.9.1", + FixVersion: "0.9.2", + Severity: vuln.High, + Description: "mock one", + Links: []string{"https://vuln.com"}, + }, + }, + } + + jRep, err := json.Marshal(rp) + require.NoError(suite.T(), err) + + mc.On("GetScanReport", "scan_id", v1.MimeTypeNativeReport).Return(string(jRep), nil) + suite.mcp.On("Get", r).Return(mc, nil) + + crp := &CheckInReport{ + Digest: sr.Artifact.Digest, + RegistrationUUID: r.UUID, + MimeType: v1.MimeTypeNativeReport, + RawReport: string(jRep), + } + + jsonData, err := crp.ToJSON() + require.NoError(suite.T(), err) + + ctx.On("Checkin", string(jsonData)).Return(nil) + j := &Job{} + err = j.Run(ctx, jp) + require.NoError(suite.T(), err) +} + +// MockJobContext mocks job context interface. +// TODO: Maybe moved to a separate `mock` pkg for sharing in future. +type MockJobContext struct { + mock.Mock +} + +// Build ... +func (mjc *MockJobContext) Build(tracker job.Tracker) (job.Context, error) { + args := mjc.Called(tracker) + c := args.Get(0) + if c != nil { + return c.(job.Context), nil + } + + return nil, args.Error(1) +} + +// Get ... +func (mjc *MockJobContext) Get(prop string) (interface{}, bool) { + args := mjc.Called(prop) + return args.Get(0), args.Bool(1) +} + +// SystemContext ... +func (mjc *MockJobContext) SystemContext() context.Context { + return context.TODO() +} + +// Checkin ... +func (mjc *MockJobContext) Checkin(status string) error { + args := mjc.Called(status) + return args.Error(0) +} + +// OPCommand ... +func (mjc *MockJobContext) OPCommand() (job.OPCommand, bool) { + args := mjc.Called() + return (job.OPCommand)(args.String(0)), args.Bool(1) +} + +// GetLogger ... +func (mjc *MockJobContext) GetLogger() logger.Interface { + return &MockJobLogger{} +} + +// Tracker ... +func (mjc *MockJobContext) Tracker() job.Tracker { + args := mjc.Called() + if t := args.Get(0); t != nil { + return t.(job.Tracker) + } + + return nil +} + +// MockJobLogger mocks the job logger interface. +// TODO: Maybe moved to a separate `mock` pkg for sharing in future. +type MockJobLogger struct { + mock.Mock +} + +// Debug ... +func (mjl *MockJobLogger) Debug(v ...interface{}) { + logger.Debug(v...) +} + +// Debugf ... +func (mjl *MockJobLogger) Debugf(format string, v ...interface{}) { + logger.Debugf(format, v...) +} + +// Info ... +func (mjl *MockJobLogger) Info(v ...interface{}) { + logger.Info(v...) +} + +// Infof ... +func (mjl *MockJobLogger) Infof(format string, v ...interface{}) { + logger.Infof(format, v...) +} + +// Warning ... +func (mjl *MockJobLogger) Warning(v ...interface{}) { + logger.Warning(v...) +} + +// Warningf ... +func (mjl *MockJobLogger) Warningf(format string, v ...interface{}) { + logger.Warningf(format, v...) +} + +// Error ... +func (mjl *MockJobLogger) Error(v ...interface{}) { + logger.Error(v...) +} + +// Errorf ... +func (mjl *MockJobLogger) Errorf(format string, v ...interface{}) { + logger.Errorf(format, v...) +} + +// Fatal ... +func (mjl *MockJobLogger) Fatal(v ...interface{}) { + logger.Fatal(v...) +} + +// Fatalf ... +func (mjl *MockJobLogger) Fatalf(format string, v ...interface{}) { + logger.Fatalf(format, v...) +} + +// MockClientPool mocks the client pool +type MockClientPool struct { + mock.Mock +} + +// Get v1 client +func (mcp *MockClientPool) Get(r *scanner.Registration) (v1.Client, error) { + args := mcp.Called(r) + c := args.Get(0) + if c != nil { + return c.(v1.Client), nil + } + + return nil, args.Error(1) +} + +// MockClient mocks the v1 client +type MockClient struct { + mock.Mock +} + +// GetMetadata ... +func (mc *MockClient) GetMetadata() (*v1.ScannerAdapterMetadata, error) { + args := mc.Called() + s := args.Get(0) + if s != nil { + return s.(*v1.ScannerAdapterMetadata), nil + } + + return nil, args.Error(1) +} + +// SubmitScan ... +func (mc *MockClient) SubmitScan(req *v1.ScanRequest) (*v1.ScanResponse, error) { + args := mc.Called(req) + sr := args.Get(0) + if sr != nil { + return sr.(*v1.ScanResponse), nil + } + + return nil, args.Error(1) +} + +// GetScanReport ... +func (mc *MockClient) GetScanReport(scanRequestID, reportMIMEType string) (string, error) { + args := mc.Called(scanRequestID, reportMIMEType) + return args.String(0), args.Error(1) +} diff --git a/src/pkg/scan/report/base_manager.go b/src/pkg/scan/report/base_manager.go new file mode 100644 index 000000000..b4645eed7 --- /dev/null +++ b/src/pkg/scan/report/base_manager.go @@ -0,0 +1,194 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package report + +import ( + "time" + + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/pkg/q" + "github.com/goharbor/harbor/src/pkg/scan/dao/scan" + "github.com/google/uuid" + "github.com/pkg/errors" +) + +// basicManager is a default implementation of report manager. +type basicManager struct{} + +// NewManager news basic manager. +func NewManager() Manager { + return &basicManager{} +} + +// Create ... +func (bm *basicManager) Create(r *scan.Report) (string, error) { + // Validate report object + if r == nil { + return "", errors.New("nil scan report object") + } + + if len(r.Digest) == 0 || len(r.RegistrationUUID) == 0 || len(r.MimeType) == 0 { + return "", errors.New("malformed scan report object") + } + + // Check if there is existing report copy + // Limit only one scanning performed by a given provider on the specified artifact can be there + kws := make(map[string]interface{}, 3) + kws["digest"] = r.Digest + kws["registration_uuid"] = r.RegistrationUUID + kws["mime_type"] = []interface{}{r.MimeType} + + existingCopies, err := scan.ListReports(&q.Query{ + PageNumber: 1, + PageSize: 1, + Keywords: kws, + }) + + if err != nil { + return "", errors.Wrap(err, "create report: check existence of report") + } + + // Delete existing copy + if len(existingCopies) > 0 { + theCopy := existingCopies[0] + + theStatus := job.Status(theCopy.Status) + // Status is an error message + if theStatus.Code() == -1 && theCopy.StatusCode == job.ErrorStatus.Code() { + // Mark as regular error status + theStatus = job.ErrorStatus + } + + // Status conflict + if theStatus.Compare(job.RunningStatus) <= 0 { + return "", errors.Errorf("conflict: a previous scanning is %s", theCopy.Status) + } + + // Otherwise it will be a completed report + // Clear it before insert this new one + if err := scan.DeleteReport(theCopy.UUID); err != nil { + return "", errors.Wrap(err, "create report: clear old scan report") + } + } + + // Assign uuid + UUID, err := uuid.NewUUID() + if err != nil { + return "", errors.Wrap(err, "create report: new UUID") + } + r.UUID = UUID.String() + + // Fill in / override the related properties + r.StartTime = time.Now().UTC() + r.Status = job.PendingStatus.String() + r.StatusCode = job.PendingStatus.Code() + + // Insert + if _, err = scan.CreateReport(r); err != nil { + return "", errors.Wrap(err, "create report: insert") + } + + return r.UUID, nil +} + +// Get ... +func (bm *basicManager) Get(uuid string) (*scan.Report, error) { + if len(uuid) == 0 { + return nil, errors.New("empty uuid to get scan report") + } + + kws := make(map[string]interface{}) + kws["uuid"] = uuid + + l, err := scan.ListReports(&q.Query{ + PageNumber: 1, + PageSize: 1, + Keywords: kws, + }) + + if err != nil { + return nil, errors.Wrap(err, "report manager: get") + } + + if len(l) == 0 { + // Not found + return nil, nil + } + + return l[0], nil +} + +// GetBy ... +func (bm *basicManager) GetBy(digest string, registrationUUID string, mimeTypes []string) ([]*scan.Report, error) { + if len(digest) == 0 { + return nil, errors.New("empty digest to get report data") + } + + kws := make(map[string]interface{}) + kws["digest"] = digest + if len(registrationUUID) > 0 { + kws["registration_uuid"] = registrationUUID + } + if len(mimeTypes) > 0 { + kws["mime_type"] = mimeTypes + } + // Query all + query := &q.Query{ + PageNumber: 0, + Keywords: kws, + } + + return scan.ListReports(query) +} + +// UpdateScanJobID ... +func (bm *basicManager) UpdateScanJobID(trackID string, jobID string) error { + if len(trackID) == 0 || len(jobID) == 0 { + return errors.New("bad arguments") + } + + return scan.UpdateJobID(trackID, jobID) +} + +// UpdateStatus ... +func (bm *basicManager) UpdateStatus(trackID string, status string, rev int64) error { + if len(trackID) == 0 { + return errors.New("missing uuid") + } + + stCode := job.ErrorStatus.Code() + st := job.Status(status) + // Check if it is job valid status. + // Probably an error happened before submitting jobs. + if st.Code() != -1 { + // Assign error code + stCode = st.Code() + } + + return scan.UpdateReportStatus(trackID, status, stCode, rev) +} + +// UpdateReportData ... +func (bm *basicManager) UpdateReportData(uuid string, report string, rev int64) error { + if len(uuid) == 0 { + return errors.New("missing uuid") + } + + if len(report) == 0 { + return errors.New("missing report JSON data") + } + + return scan.UpdateReportData(uuid, report, rev) +} diff --git a/src/pkg/scan/report/base_manager_test.go b/src/pkg/scan/report/base_manager_test.go new file mode 100644 index 000000000..17d5fef1d --- /dev/null +++ b/src/pkg/scan/report/base_manager_test.go @@ -0,0 +1,168 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package report + +import ( + "testing" + + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/pkg/scan/dao/scan" + v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +// TestManagerSuite is a test suite for the report manager. +type TestManagerSuite struct { + suite.Suite + + m Manager + rpUUID string +} + +// TestManager is an entry of suite TestManagerSuite. +func TestManager(t *testing.T) { + suite.Run(t, &TestManagerSuite{}) +} + +// SetupSuite prepares test env for suite TestManagerSuite. +func (suite *TestManagerSuite) SetupSuite() { + dao.PrepareTestForPostgresSQL() + + suite.m = NewManager() +} + +// SetupTest prepares env for test cases. +func (suite *TestManagerSuite) SetupTest() { + rp := &scan.Report{ + Digest: "d1000", + RegistrationUUID: "ruuid", + MimeType: v1.MimeTypeNativeReport, + TrackID: "tid001", + } + + uuid, err := suite.m.Create(rp) + require.NoError(suite.T(), err) + require.NotEmpty(suite.T(), uuid) + suite.rpUUID = uuid +} + +// TearDownTest clears test env for test cases. +func (suite *TestManagerSuite) TearDownTest() { + // No delete method defined in manager as no requirement, + // so, to clear env, call dao method here + err := scan.DeleteReport(suite.rpUUID) + require.NoError(suite.T(), err) +} + +// TestManagerCreateWithExisting tests the case that a copy already is there when creating report. +func (suite *TestManagerSuite) TestManagerCreateWithExisting() { + err := suite.m.UpdateStatus("tid001", job.SuccessStatus.String(), 2000) + require.NoError(suite.T(), err) + + rp := &scan.Report{ + Digest: "d1000", + RegistrationUUID: "ruuid", + MimeType: v1.MimeTypeNativeReport, + TrackID: "tid002", + } + + uuid, err := suite.m.Create(rp) + require.NoError(suite.T(), err) + require.NotEmpty(suite.T(), uuid) + + assert.NotEqual(suite.T(), suite.rpUUID, uuid) + suite.rpUUID = uuid +} + +// TestManagerGet tests the get method. +func (suite *TestManagerSuite) TestManagerGet() { + sr, err := suite.m.Get(suite.rpUUID) + + require.NoError(suite.T(), err) + require.NotNil(suite.T(), sr) + + assert.Equal(suite.T(), "d1000", sr.Digest) +} + +// TestManagerGetBy tests the get by method. +func (suite *TestManagerSuite) TestManagerGetBy() { + l, err := suite.m.GetBy("d1000", "ruuid", []string{v1.MimeTypeNativeReport}) + require.NoError(suite.T(), err) + require.Equal(suite.T(), 1, len(l)) + assert.Equal(suite.T(), suite.rpUUID, l[0].UUID) + + l, err = suite.m.GetBy("d1000", "ruuid", nil) + require.NoError(suite.T(), err) + require.Equal(suite.T(), 1, len(l)) + assert.Equal(suite.T(), suite.rpUUID, l[0].UUID) + + l, err = suite.m.GetBy("d1000", "", nil) + require.NoError(suite.T(), err) + require.Equal(suite.T(), 1, len(l)) + assert.Equal(suite.T(), suite.rpUUID, l[0].UUID) +} + +// TestManagerUpdateJobID tests update job ID method. +func (suite *TestManagerSuite) TestManagerUpdateJobID() { + l, err := suite.m.GetBy("d1000", "ruuid", []string{v1.MimeTypeNativeReport}) + require.NoError(suite.T(), err) + require.Equal(suite.T(), 1, len(l)) + + oldJID := l[0].JobID + + err = suite.m.UpdateScanJobID("tid001", "jID1001") + require.NoError(suite.T(), err) + + l, err = suite.m.GetBy("d1000", "ruuid", []string{v1.MimeTypeNativeReport}) + require.NoError(suite.T(), err) + require.Equal(suite.T(), 1, len(l)) + + assert.NotEqual(suite.T(), oldJID, l[0].JobID) + assert.Equal(suite.T(), "jID1001", l[0].JobID) +} + +// TestManagerUpdateStatus tests update status method +func (suite *TestManagerSuite) TestManagerUpdateStatus() { + l, err := suite.m.GetBy("d1000", "ruuid", []string{v1.MimeTypeNativeReport}) + require.NoError(suite.T(), err) + require.Equal(suite.T(), 1, len(l)) + + oldSt := l[0].Status + + err = suite.m.UpdateStatus("tid001", job.SuccessStatus.String(), 10000) + require.NoError(suite.T(), err) + + l, err = suite.m.GetBy("d1000", "ruuid", []string{v1.MimeTypeNativeReport}) + require.NoError(suite.T(), err) + require.Equal(suite.T(), 1, len(l)) + + assert.NotEqual(suite.T(), oldSt, l[0].Status) + assert.Equal(suite.T(), job.SuccessStatus.String(), l[0].Status) +} + +// TestManagerUpdateReportData tests update job report data. +func (suite *TestManagerSuite) TestManagerUpdateReportData() { + err := suite.m.UpdateReportData(suite.rpUUID, "{\"a\":1000}", 1000) + require.NoError(suite.T(), err) + + l, err := suite.m.GetBy("d1000", "ruuid", []string{v1.MimeTypeNativeReport}) + require.NoError(suite.T(), err) + require.Equal(suite.T(), 1, len(l)) + + assert.Equal(suite.T(), "{\"a\":1000}", l[0].Report) +} diff --git a/src/pkg/scan/report/manager.go b/src/pkg/scan/report/manager.go new file mode 100644 index 000000000..f4059abcc --- /dev/null +++ b/src/pkg/scan/report/manager.go @@ -0,0 +1,90 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package report + +import "github.com/goharbor/harbor/src/pkg/scan/dao/scan" + +// Manager is used to manage the scan reports. +type Manager interface { + // Create a new report record. + // + // Arguments: + // r *scan.Report : report model to be created + // + // Returns: + // string : uuid of the new report + // error : non nil error if any errors occurred + // + Create(r *scan.Report) (string, error) + + // Update the scan job ID of the given report. + // + // Arguments: + // trackID string : uuid to identify the report + // jobID string : scan job ID + // + // Returns: + // error : non nil error if any errors occurred + // + UpdateScanJobID(trackID string, jobID string) error + + // Update the status (mapping to the scan job status) of the given report. + // + // Arguments: + // trackID string : uuid to identify the report + // status string : status info + // rev int64 : data revision info + // + // Returns: + // error : non nil error if any errors occurred + // + UpdateStatus(trackID string, status string, rev int64) error + + // Update the report data (with JSON format) of the given report. + // + // Arguments: + // uuid string : uuid to identify the report + // report string : report JSON data + // rev int64 : data revision info + // + // Returns: + // error : non nil error if any errors occurred + // + UpdateReportData(uuid string, report string, rev int64) error + + // Get the reports for the given digest by other properties. + // + // Arguments: + // digest string : digest of the artifact + // registrationUUID string : [optional] the report generated by which registration. + // If it is empty, reports by all the registrations are retrieved. + // mimeTypes []string : [optional] mime types of the reports requiring + // If empty array is specified, reports with all the supported mimes are retrieved. + // + // Returns: + // []*scan.Report : report list + // error : non nil error if any errors occurred + GetBy(digest string, registrationUUID string, mimeTypes []string) ([]*scan.Report, error) + + // Get the report for the given uuid. + // + // Arguments: + // uuid string : uuid of the scan report + // + // Returns: + // *scan.Report : scan report + // error : non nil error if any errors occurred + Get(uuid string) (*scan.Report, error) +} diff --git a/src/pkg/scan/report/summary.go b/src/pkg/scan/report/summary.go new file mode 100644 index 000000000..5c5e37d57 --- /dev/null +++ b/src/pkg/scan/report/summary.go @@ -0,0 +1,97 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package report + +import ( + "reflect" + + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/pkg/scan/dao/scan" + v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1" + "github.com/goharbor/harbor/src/pkg/scan/vuln" + "github.com/pkg/errors" +) + +// SupportedGenerators declares mappings between mime type and summary generator func. +var SupportedGenerators = map[string]SummaryGenerator{ + v1.MimeTypeNativeReport: GenerateNativeSummary, +} + +// GenerateSummary is a helper function to generate report +// summary based on the given report. +func GenerateSummary(r *scan.Report) (interface{}, error) { + g, ok := SupportedGenerators[r.MimeType] + if !ok { + return nil, errors.Errorf("no generator bound with mime type %s", r.MimeType) + } + + return g(r) +} + +// SummaryGenerator is a func template which used to generated report +// summary for relevant mime type. +type SummaryGenerator func(r *scan.Report) (interface{}, error) + +// GenerateNativeSummary generates the report summary for the native report. +func GenerateNativeSummary(r *scan.Report) (interface{}, error) { + sum := &vuln.NativeReportSummary{} + sum.ReportID = r.UUID + sum.StartTime = r.StartTime + sum.EndTime = r.EndTime + sum.Duration = r.EndTime.Unix() - r.EndTime.Unix() + + sum.ScanStatus = job.ErrorStatus.String() + if job.Status(r.Status).Code() != -1 { + sum.ScanStatus = r.Status + } + + // If the status is not success/stopped, there will not be any report. + if r.Status != job.SuccessStatus.String() && + r.Status != job.StoppedStatus.String() { + return sum, nil + } + + // Probably no report data if the job is interrupted + if len(r.Report) == 0 { + return nil, errors.Errorf("no report data for %s, status is: %s", r.UUID, sum.ScanStatus) + } + + raw, err := ResolveData(r.MimeType, []byte(r.Report)) + if err != nil { + return nil, err + } + + rp, ok := raw.(*vuln.Report) + if !ok { + return nil, errors.Errorf("type mismatch: expect *vuln.Report but got %s", reflect.TypeOf(raw).String()) + } + + sum.Severity = rp.Severity + vsum := &vuln.VulnerabilitySummary{ + Total: len(rp.Vulnerabilities), + Summary: make(vuln.SeveritySummary), + } + + for _, v := range rp.Vulnerabilities { + if num, ok := vsum.Summary[v.Severity]; ok { + vsum.Summary[v.Severity] = num + 1 + } else { + vsum.Summary[v.Severity] = 1 + } + } + sum.Summary = vsum + + return sum, nil +} diff --git a/src/pkg/scan/report/supported_mime_test.go b/src/pkg/scan/report/supported_mime_test.go new file mode 100644 index 000000000..c4c167e0a --- /dev/null +++ b/src/pkg/scan/report/supported_mime_test.go @@ -0,0 +1,79 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package report + +import ( + "encoding/json" + "testing" + "time" + + v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1" + "github.com/goharbor/harbor/src/pkg/scan/vuln" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +// SupportedMimesSuite is a suite to test SupportedMimes. +type SupportedMimesSuite struct { + suite.Suite + + mockData []byte +} + +// TestSupportedMimesSuite is the entry of SupportedMimesSuite. +func TestSupportedMimesSuite(t *testing.T) { + suite.Run(t, new(SupportedMimesSuite)) +} + +// SetupSuite prepares the test suite env. +func (suite *SupportedMimesSuite) SetupSuite() { + rp := vuln.Report{ + GeneratedAt: time.Now().UTC().String(), + Scanner: &v1.Scanner{ + Name: "Clair", + Vendor: "Harbor", + Version: "0.1.0", + }, + Severity: vuln.High, + Vulnerabilities: []*vuln.VulnerabilityItem{ + { + ID: "2019-0980-0909", + Package: "dpkg", + Version: "0.9.1", + FixVersion: "0.9.2", + Severity: vuln.High, + Description: "mock one", + Links: []string{"https://vuln.com"}, + }, + }, + } + + jsonData, err := json.Marshal(rp) + require.NoError(suite.T(), err) + suite.mockData = jsonData +} + +// TestResolveData tests the ResolveData. +func (suite *SupportedMimesSuite) TestResolveData() { + obj, err := ResolveData(v1.MimeTypeNativeReport, suite.mockData) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), obj) + require.Condition(suite.T(), func() (success bool) { + rp, ok := obj.(*vuln.Report) + success = ok && rp != nil && rp.Severity == vuln.High + + return + }) +} diff --git a/src/pkg/scan/report/supported_mimes.go b/src/pkg/scan/report/supported_mimes.go new file mode 100644 index 000000000..fc09aed65 --- /dev/null +++ b/src/pkg/scan/report/supported_mimes.go @@ -0,0 +1,58 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package report + +import ( + "encoding/json" + "reflect" + + v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1" + "github.com/goharbor/harbor/src/pkg/scan/vuln" + "github.com/pkg/errors" +) + +// SupportedMimes indicates what mime types are supported to render at UI end. +var SupportedMimes = map[string]interface{}{ + // The native report type + v1.MimeTypeNativeReport: (*vuln.Report)(nil), +} + +// ResolveData is a helper func to parse the JSON data with the given mime type. +func ResolveData(mime string, jsonData []byte) (interface{}, error) { + // If no resolver defined for the given mime types, directly ignore it. + // The raw data will be used. + t, ok := SupportedMimes[mime] + if !ok { + return nil, nil + } + + if len(jsonData) == 0 { + return nil, errors.New("empty JSON data") + } + + ty := reflect.TypeOf(t) + if ty.Kind() == reflect.Ptr { + ty = ty.Elem() + } + + // New one + rp := reflect.New(ty).Elem().Addr().Interface() + + if err := json.Unmarshal(jsonData, rp); err != nil { + return nil, err + } + + return rp, nil +} diff --git a/src/pkg/scan/rest/auth/api_key_auth.go b/src/pkg/scan/rest/auth/api_key_auth.go new file mode 100644 index 000000000..80f79fd6d --- /dev/null +++ b/src/pkg/scan/rest/auth/api_key_auth.go @@ -0,0 +1,45 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "net/http" + + "github.com/pkg/errors" +) + +// apiKeyAuthorizer authorize by adding a header `X-ScannerAdapter-API-Key` with value "credential" +type apiKeyAuthorizer struct { + typeID string + accessCred string +} + +// Authorize the requests +func (aa *apiKeyAuthorizer) Authorize(req *http.Request) error { + if req != nil && len(aa.accessCred) > 0 { + req.Header.Add(aa.typeID, aa.accessCred) + return nil + } + + return errors.Errorf("%s: %s", aa.typeID, "missing data to authorize request") +} + +// NewAPIKeyAuthorizer news a apiKeyAuthorizer +func NewAPIKeyAuthorizer(accessCred string) Authorizer { + return &apiKeyAuthorizer{ + typeID: APIKey, + accessCred: accessCred, + } +} diff --git a/src/pkg/scan/rest/auth/auth.go b/src/pkg/scan/rest/auth/auth.go new file mode 100644 index 000000000..f4cf29b40 --- /dev/null +++ b/src/pkg/scan/rest/auth/auth.go @@ -0,0 +1,54 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "net/http" + "strings" + + "github.com/pkg/errors" +) + +const ( + authorization = "Authorization" + // Basic ... + Basic = "Basic" + // Bearer ... + Bearer = "Bearer" + // APIKey ... + APIKey = "X-ScannerAdapter-API-Key" +) + +// Authorizer defines operation for authorizing the requests +type Authorizer interface { + Authorize(req *http.Request) error +} + +// GetAuthorizer is a factory method for getting an authorizer based on the given auth type +func GetAuthorizer(auth, cred string) (Authorizer, error) { + switch strings.TrimSpace(auth) { + // No authorizer required + case "": + return NewNoAuth(), nil + case Basic: + return NewBasicAuth(cred), nil + case Bearer: + return NewBearerAuth(cred), nil + case APIKey: + return NewAPIKeyAuthorizer(cred), nil + default: + return nil, errors.Errorf("auth type %s is not supported", auth) + } +} diff --git a/src/pkg/scan/rest/auth/basic_auth.go b/src/pkg/scan/rest/auth/basic_auth.go new file mode 100644 index 000000000..25256c2cf --- /dev/null +++ b/src/pkg/scan/rest/auth/basic_auth.go @@ -0,0 +1,48 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "encoding/base64" + "fmt" + "net/http" + + "github.com/pkg/errors" +) + +// basicAuthorizer authorizes the request by adding `Authorization Basic base64(credential)` header +type basicAuthorizer struct { + typeID string + accessCred string +} + +// Authorize requests +func (ba *basicAuthorizer) Authorize(req *http.Request) error { + if len(ba.accessCred) == 0 { + return errors.Errorf("%s:%s", ba.typeID, "missing access credential") + } + + if req != nil && len(ba.accessCred) > 0 { + data := base64.StdEncoding.EncodeToString([]byte(ba.accessCred)) + req.Header.Add(authorization, fmt.Sprintf("%s %s", ba.typeID, data)) + } + + return errors.Errorf("%s: %s", ba.typeID, "missing data to authorize request") +} + +// NewBasicAuth basic authorizer +func NewBasicAuth(accessCred string) Authorizer { + return &basicAuthorizer{Basic, accessCred} +} diff --git a/src/pkg/scan/rest/auth/bearer_auth.go b/src/pkg/scan/rest/auth/bearer_auth.go new file mode 100644 index 000000000..a21eb1117 --- /dev/null +++ b/src/pkg/scan/rest/auth/bearer_auth.go @@ -0,0 +1,42 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "fmt" + "net/http" + + "github.com/pkg/errors" +) + +// bearerAuthorizer authorizes the request by adding `Authorization Bearer credential` header +type bearerAuthorizer struct { + typeID string + accessCred string +} + +// Authorize requests +func (ba *bearerAuthorizer) Authorize(req *http.Request) error { + if req != nil && len(ba.accessCred) > 0 { + req.Header.Add(authorization, fmt.Sprintf("%s %s", ba.typeID, ba.accessCred)) + } + + return errors.Errorf("%s: %s", ba.typeID, "missing data to authorize request") +} + +// NewBearerAuth create bearer authorizer +func NewBearerAuth(accessCred string) Authorizer { + return &bearerAuthorizer{Bearer, accessCred} +} diff --git a/src/pkg/scan/rest/auth/no_auth.go b/src/pkg/scan/rest/auth/no_auth.go new file mode 100644 index 000000000..c41375a4b --- /dev/null +++ b/src/pkg/scan/rest/auth/no_auth.go @@ -0,0 +1,33 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "net/http" +) + +// noAuth is created to handle the no authorization case which is acceptable +type noAuth struct{} + +// Authorize the incoming request +func (na *noAuth) Authorize(req *http.Request) error { + // Do nothing + return nil +} + +// NewNoAuth creates a noAuth authorizer +func NewNoAuth() Authorizer { + return &noAuth{} +} diff --git a/src/pkg/scan/rest/v1/client.go b/src/pkg/scan/rest/v1/client.go new file mode 100644 index 000000000..a24eba3fb --- /dev/null +++ b/src/pkg/scan/rest/v1/client.go @@ -0,0 +1,291 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "strconv" + "time" + + "github.com/goharbor/harbor/src/jobservice/logger" + "github.com/goharbor/harbor/src/pkg/scan/dao/scanner" + "github.com/goharbor/harbor/src/pkg/scan/rest/auth" + "github.com/pkg/errors" +) + +const ( + // defaultRefreshInterval is the default interval with seconds of refreshing report + defaultRefreshInterval = 5 + // refreshAfterHeader provides the refresh interval value + refreshAfterHeader = "Refresh-After" +) + +// Client defines the methods to access the adapter services that +// implement the REST API specs +type Client interface { + // GetMetadata gets the metadata of the given scanner + // + // Returns: + // *ScannerAdapterMetadata : metadata of the given scanner + // error : non nil error if any errors occurred + GetMetadata() (*ScannerAdapterMetadata, error) + + // SubmitScan initiates a scanning of the given artifact. + // Returns `nil` if the request was accepted, a non `nil` error otherwise. + // + // Arguments: + // req *ScanRequest : request including the registry and artifact data + // + // Returns: + // *ScanResponse : response with UUID for tracking the scan results + // error : non nil error if any errors occurred + SubmitScan(req *ScanRequest) (*ScanResponse, error) + + // GetScanReport gets the scan result for the corresponding ScanRequest identifier. + // Note that this is a blocking method which either returns a non `nil` scan report or error. + // A caller is supposed to cast the returned interface{} to a structure that corresponds + // to the specified MIME type. + // + // Arguments: + // scanRequestID string : the ID of the scan submitted before + // reportMIMEType string : the report mime type + // Returns: + // string : the scan report of the given artifact + // error : non nil error if any errors occurred + GetScanReport(scanRequestID, reportMIMEType string) (string, error) +} + +// basicClient is default implementation of the Client interface +type basicClient struct { + httpClient *http.Client + spec *Spec + authorizer auth.Authorizer +} + +// NewClient news a basic client +func NewClient(r *scanner.Registration) (Client, error) { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: r.SkipCertVerify, + }, + } + + authorizer, err := auth.GetAuthorizer(r.Auth, r.AccessCredential) + if err != nil { + return nil, errors.Wrap(err, "new v1 client") + } + + return &basicClient{ + httpClient: &http.Client{ + Transport: transport, + CheckRedirect: func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + }, + }, + spec: NewSpec(r.URL), + authorizer: authorizer, + }, nil +} + +// GetMetadata ... +func (c *basicClient) GetMetadata() (*ScannerAdapterMetadata, error) { + def := c.spec.Metadata() + + request, err := http.NewRequest(http.MethodGet, def.URL, nil) + if err != nil { + return nil, errors.Wrap(err, "v1 client: get metadata") + } + + // Resolve header + def.Resolver(request) + + // Send request + respData, err := c.send(request, generalResponseHandler(http.StatusOK)) + if err != nil { + return nil, errors.Wrap(err, "v1 client: get metadata") + } + + // Unmarshal data + meta := &ScannerAdapterMetadata{} + if err := json.Unmarshal(respData, meta); err != nil { + return nil, errors.Wrap(err, "v1 client: get metadata") + } + + return meta, nil +} + +// SubmitScan ... +func (c *basicClient) SubmitScan(req *ScanRequest) (*ScanResponse, error) { + if req == nil { + return nil, errors.New("nil request") + } + + data, err := json.Marshal(req) + if err != nil { + return nil, errors.Wrap(err, "v1 client: submit scan") + } + + def := c.spec.SubmitScan() + request, err := http.NewRequest(http.MethodPost, def.URL, bytes.NewReader(data)) + if err != nil { + return nil, errors.Wrap(err, "v1 client: submit scan") + } + + respData, err := c.send(request, generalResponseHandler(http.StatusAccepted)) + if err != nil { + return nil, errors.Wrap(err, "v1 client: submit scan") + } + + resp := &ScanResponse{} + if err := json.Unmarshal(respData, resp); err != nil { + return nil, errors.Wrap(err, "v1 client: submit scan") + } + + return resp, nil +} + +// GetScanReport ... +func (c *basicClient) GetScanReport(scanRequestID, reportMIMEType string) (string, error) { + if len(scanRequestID) == 0 { + return "", errors.New("empty scan request ID") + } + + if len(reportMIMEType) == 0 { + return "", errors.New("missing report mime type") + } + + def := c.spec.GetScanReport(scanRequestID, reportMIMEType) + + req, err := http.NewRequest(http.MethodGet, def.URL, nil) + if err != nil { + return "", errors.Wrap(err, "v1 client: get scan report") + } + + respData, err := c.send(req, reportResponseHandler()) + if err != nil { + // This error should not be wrapped + return "", err + } + + return string(respData), nil +} + +func (c *basicClient) send(req *http.Request, h responseHandler) ([]byte, error) { + if c.authorizer != nil { + if err := c.authorizer.Authorize(req); err != nil { + return nil, errors.Wrap(err, "send: authorization") + } + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + + defer func() { + if err := resp.Body.Close(); err != nil { + // Just logged + logger.Errorf("close response body error: %s", err) + } + }() + + return h(resp.StatusCode, resp) +} + +// responseHandlerFunc is a handler func template for handling the http response data, +// especially the error part. +type responseHandler func(code int, resp *http.Response) ([]byte, error) + +// generalResponseHandler create a general response handler to cover the common cases. +func generalResponseHandler(expectedCode int) responseHandler { + return func(code int, resp *http.Response) ([]byte, error) { + return generalRespHandlerFunc(expectedCode, code, resp) + } +} + +// reportResponseHandler creates response handler for get report special case. +func reportResponseHandler() responseHandler { + return func(code int, resp *http.Response) ([]byte, error) { + if code == http.StatusFound { + // Set default + retryAfter := defaultRefreshInterval // seconds + // Read `retry after` info from header + v := resp.Header.Get(refreshAfterHeader) + if len(v) > 0 { + if i, err := strconv.ParseInt(v, 10, 8); err == nil { + retryAfter = int(i) + } else { + // log error + logger.Errorf("Parse `%s` error: %s", refreshAfterHeader, err) + } + } + + return nil, &ReportNotReadyError{RetryAfter: retryAfter} + } + + return generalRespHandlerFunc(http.StatusOK, code, resp) + } +} + +// generalRespHandlerFunc is a handler to cover the general cases +func generalRespHandlerFunc(expectedCode, code int, resp *http.Response) ([]byte, error) { + buf, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if code != expectedCode { + if len(buf) > 0 { + // Try to read error response + eResp := &ErrorResponse{ + Err: &Error{}, + } + + err := json.Unmarshal(buf, eResp) + if err != nil { + return nil, errors.Wrap(err, "general response handler") + } + + // Append more contexts + eResp.Err.Message = fmt.Sprintf( + "%s: general response handler: unexpected status code: %d, expected: %d", + eResp.Err.Message, + code, + expectedCode, + ) + + return nil, eResp + } + + return nil, errors.Errorf("general response handler: unexpected status code: %d, expected: %d", code, expectedCode) + } + + return buf, nil +} diff --git a/src/pkg/scan/rest/v1/client_pool.go b/src/pkg/scan/rest/v1/client_pool.go new file mode 100644 index 000000000..6e4588101 --- /dev/null +++ b/src/pkg/scan/rest/v1/client_pool.go @@ -0,0 +1,168 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "fmt" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/goharbor/harbor/src/pkg/scan/dao/scanner" + "github.com/pkg/errors" +) + +const ( + defaultDeadCheckInterval = 1 * time.Minute + defaultExpireTime = 5 * time.Minute +) + +// DefaultClientPool is a default client pool. +var DefaultClientPool = NewClientPool(nil) + +// ClientPool defines operations for the client pool which provides v1 client cache. +type ClientPool interface { + // Get a v1 client interface for the specified registration. + // + // Arguments: + // r *scanner.Registration : registration for client connecting to + // + // Returns: + // Client : v1 client + // error : non nil error if any errors occurred + Get(r *scanner.Registration) (Client, error) +} + +// PoolConfig provides configurations for the client pool. +type PoolConfig struct { + // Interval for checking dead instance. + DeadCheckInterval time.Duration + // Expire time for the instance to be marked as dead. + ExpireTime time.Duration +} + +// poolItem append timestamp for the caching client instance. +type poolItem struct { + c Client + timestamp time.Time +} + +// basicClientPool is default implementation of client pool interface. +type basicClientPool struct { + pool *sync.Map + config *PoolConfig +} + +// NewClientPool news a basic client pool. +func NewClientPool(config *PoolConfig) ClientPool { + bcp := &basicClientPool{ + pool: &sync.Map{}, + config: config, + } + + // Set config + if bcp.config == nil { + bcp.config = &PoolConfig{} + } + + if bcp.config.DeadCheckInterval == 0 { + bcp.config.DeadCheckInterval = defaultDeadCheckInterval + } + + if bcp.config.ExpireTime == 0 { + bcp.config.ExpireTime = defaultExpireTime + } + + return bcp +} + +// Get client for the specified registration. +// So far, there will not be too many scanner registrations. An then +// no need to do client instance clear work. +// If one day, we have to clear unactivated client instances in the pool, +// add the following func after the first time initializing the client. +// pool item represents the client with a timestamp of last accessed. + +func (bcp *basicClientPool) Get(r *scanner.Registration) (Client, error) { + if r == nil { + return nil, errors.New("nil scanner registration") + } + + if err := r.Validate(false); err != nil { + return nil, errors.Wrap(err, "client pool: get") + } + + k := key(r) + + item, ok := bcp.pool.Load(k) + if !ok { + nc, err := NewClient(r) + if err != nil { + return nil, errors.Wrap(err, "client pool: get") + } + + // Cache it + npi := &poolItem{ + c: nc, + timestamp: time.Now().UTC(), + } + + bcp.pool.Store(k, npi) + item = npi + + // dead check + bcp.deadCheck(k, npi) + } + + return item.(*poolItem).c, nil +} + +func (bcp *basicClientPool) deadCheck(key string, item *poolItem) { + // Run in a separate goroutine + go func() { + // As we do not have a global context, let's watch the system signal to + // exit the goroutine correctly. + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt, syscall.SIGTERM, os.Kill) + + tk := time.NewTicker(bcp.config.DeadCheckInterval) + defer tk.Stop() + + for { + select { + case t := <-tk.C: + if item.timestamp.Add(bcp.config.ExpireTime).Before(t.UTC()) { + // Expired + bcp.pool.Delete(key) + return + } + case <-sig: + // Terminated by system + return + } + } + }() +} + +func key(r *scanner.Registration) string { + return fmt.Sprintf("%s:%s:%s:%v", + r.URL, + r.Auth, + r.AccessCredential, + r.SkipCertVerify, + ) +} diff --git a/src/pkg/scan/rest/v1/client_pool_test.go b/src/pkg/scan/rest/v1/client_pool_test.go new file mode 100644 index 000000000..9666f4067 --- /dev/null +++ b/src/pkg/scan/rest/v1/client_pool_test.go @@ -0,0 +1,82 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "fmt" + "testing" + "time" + + "github.com/goharbor/harbor/src/pkg/scan/dao/scanner" + "github.com/goharbor/harbor/src/pkg/scan/rest/auth" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +// ClientPoolTestSuite is a test suite to test the client pool. +type ClientPoolTestSuite struct { + suite.Suite + + pool ClientPool +} + +// TestClientPool is the entry of ClientPoolTestSuite. +func TestClientPool(t *testing.T) { + suite.Run(t, &ClientPoolTestSuite{}) +} + +// SetupSuite sets up test suite env. +func (suite *ClientPoolTestSuite) SetupSuite() { + cfg := &PoolConfig{ + DeadCheckInterval: 100 * time.Millisecond, + ExpireTime: 300 * time.Millisecond, + } + suite.pool = NewClientPool(cfg) +} + +// TestClientPoolGet tests the get method of client pool. +func (suite *ClientPoolTestSuite) TestClientPoolGet() { + r := &scanner.Registration{ + ID: 1, + Name: "TestClientPoolGet", + UUID: "uuid", + URL: "http://a.b.c", + Auth: auth.Basic, + AccessCredential: "u:p", + SkipCertVerify: false, + } + + client1, err := suite.pool.Get(r) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), client1) + + p1 := fmt.Sprintf("%p", client1.(*basicClient)) + + client2, err := suite.pool.Get(r) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), client2) + + p2 := fmt.Sprintf("%p", client2.(*basicClient)) + assert.Equal(suite.T(), p1, p2) + + <-time.After(400 * time.Millisecond) + client3, err := suite.pool.Get(r) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), client3) + + p3 := fmt.Sprintf("%p", client3.(*basicClient)) + assert.NotEqual(suite.T(), p2, p3) +} diff --git a/src/pkg/scan/rest/v1/client_test.go b/src/pkg/scan/rest/v1/client_test.go new file mode 100644 index 000000000..969189356 --- /dev/null +++ b/src/pkg/scan/rest/v1/client_test.go @@ -0,0 +1,197 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/goharbor/harbor/src/pkg/scan/dao/scanner" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +// ClientTestSuite tests the v1 client +type ClientTestSuite struct { + suite.Suite + + testServer *httptest.Server + client Client +} + +// TestClient is the entry of ClientTestSuite +func TestClient(t *testing.T) { + suite.Run(t, new(ClientTestSuite)) +} + +// SetupSuite prepares the test suite env +func (suite *ClientTestSuite) SetupSuite() { + suite.testServer = httptest.NewServer(&mockHandler{}) + r := &scanner.Registration{ + ID: 1000, + UUID: "uuid", + Name: "TestClient", + URL: suite.testServer.URL, + SkipCertVerify: true, + } + + c, err := NewClient(r) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), c) + + suite.client = c +} + +// TestClientMetadata tests the metadata of the client +func (suite *ClientTestSuite) TestClientMetadata() { + m, err := suite.client.GetMetadata() + require.NoError(suite.T(), err) + require.NotNil(suite.T(), m) + + assert.Equal(suite.T(), m.Scanner.Name, "Clair") +} + +// TestClientSubmitScan tests the scan submission of client +func (suite *ClientTestSuite) TestClientSubmitScan() { + res, err := suite.client.SubmitScan(&ScanRequest{}) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), res) + + assert.Equal(suite.T(), res.ID, "123456789") +} + +// TestClientGetScanReportError tests getting report failed +func (suite *ClientTestSuite) TestClientGetScanReportError() { + _, err := suite.client.GetScanReport("id1", MimeTypeNativeReport) + require.Error(suite.T(), err) + assert.Condition(suite.T(), func() (success bool) { + success = strings.Index(err.Error(), "error") != -1 + return + }) +} + +// TestClientGetScanReport tests getting report +func (suite *ClientTestSuite) TestClientGetScanReport() { + res, err := suite.client.GetScanReport("id2", MimeTypeNativeReport) + require.NoError(suite.T(), err) + require.NotEmpty(suite.T(), res) +} + +// TestClientGetScanReportNotReady tests the case that the report is not ready +func (suite *ClientTestSuite) TestClientGetScanReportNotReady() { + _, err := suite.client.GetScanReport("id3", MimeTypeNativeReport) + require.Error(suite.T(), err) + require.Condition(suite.T(), func() (success bool) { + _, success = err.(*ReportNotReadyError) + return + }) + assert.Equal(suite.T(), 10, err.(*ReportNotReadyError).RetryAfter) +} + +// TearDownSuite clears the test suite env +func (suite *ClientTestSuite) TearDownSuite() { + suite.testServer.Close() +} + +type mockHandler struct{} + +// ServeHTTP ... +func (mh *mockHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + switch r.RequestURI { + case "/api/v1/metadata": + if r.Method != http.MethodGet { + w.WriteHeader(http.StatusForbidden) + return + } + m := &ScannerAdapterMetadata{ + Scanner: &Scanner{ + Name: "Clair", + Vendor: "Harbor", + Version: "0.1.0", + }, + Capabilities: []*ScannerCapability{{ + ConsumesMimeTypes: []string{ + MimeTypeOCIArtifact, + MimeTypeDockerArtifact, + }, + ProducesMimeTypes: []string{ + MimeTypeNativeReport, + MimeTypeRawReport, + }, + }}, + Properties: ScannerProperties{ + "extra": "testing", + }, + } + data, _ := json.Marshal(m) + w.WriteHeader(http.StatusOK) + _, _ = w.Write(data) + break + case "/api/v1/scan": + if r.Method != http.MethodPost { + w.WriteHeader(http.StatusForbidden) + return + } + + res := &ScanResponse{} + res.ID = "123456789" + + data, _ := json.Marshal(res) + + w.WriteHeader(http.StatusAccepted) + _, _ = w.Write(data) + break + case "/api/v1/scan/id1/report": + if r.Method != http.MethodGet { + w.WriteHeader(http.StatusForbidden) + return + } + + e := &ErrorResponse{ + &Error{ + Message: "error", + }, + } + + data, _ := json.Marshal(e) + + w.WriteHeader(http.StatusNotFound) + _, _ = w.Write(data) + break + case "/api/v1/scan/id2/report": + if r.Method != http.MethodGet { + w.WriteHeader(http.StatusForbidden) + return + } + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("{}")) + break + case "/api/v1/scan/id3/report": + if r.Method != http.MethodGet { + w.WriteHeader(http.StatusForbidden) + return + } + + w.Header().Add(refreshAfterHeader, fmt.Sprintf("%d", 10)) + w.Header().Add("Location", "/scan/id3/report") + w.WriteHeader(http.StatusFound) + break + } +} diff --git a/src/pkg/scan/rest/v1/models.go b/src/pkg/scan/rest/v1/models.go new file mode 100644 index 000000000..cd817900d --- /dev/null +++ b/src/pkg/scan/rest/v1/models.go @@ -0,0 +1,178 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "encoding/json" + "fmt" + + "github.com/pkg/errors" +) + +// Scanner represents metadata of a Scanner Adapter which allow Harbor to lookup a scanner capable of +// scanning a given Artifact stored in its registry and making sure that it can interpret a +// returned result. +type Scanner struct { + // The name of the scanner. + Name string `json:"name"` + // The name of the scanner's provider. + Vendor string `json:"vendor"` + // The version of the scanner. + Version string `json:"version"` +} + +// ScannerCapability consists of the set of recognized artifact MIME types and the set of scanner +// report MIME types. For example, a scanner capable of analyzing Docker images and producing +// a vulnerabilities report recognizable by Harbor web console might be represented with the +// following capability: +// - consumes MIME types: +// -- application/vnd.oci.image.manifest.v1+json +// -- application/vnd.docker.distribution.manifest.v2+json +// - produces MIME types +// -- application/vnd.scanner.adapter.vuln.report.harbor+json; version=1.0 +// -- application/vnd.scanner.adapter.vuln.report.raw +type ScannerCapability struct { + // The set of MIME types of the artifacts supported by the scanner to produce the reports + // specified in the "produces_mime_types". A given mime type should only be present in one + // capability item. + ConsumesMimeTypes []string `json:"consumes_mime_types"` + // The set of MIME types of reports generated by the scanner for the consumes_mime_types of + // the same capability record. + ProducesMimeTypes []string `json:"produces_mime_types"` +} + +// ScannerProperties is a set of custom properties that can further describe capabilities of a given scanner. +type ScannerProperties map[string]string + +// ScannerAdapterMetadata represents metadata of a Scanner Adapter which allows Harbor to lookup +// a scanner capable of scanning a given Artifact stored in its registry and making sure that it +// can interpret a returned result. +type ScannerAdapterMetadata struct { + Scanner *Scanner `json:"scanner"` + Capabilities []*ScannerCapability `json:"capabilities"` + Properties ScannerProperties `json:"properties"` +} + +// Artifact represents an artifact stored in Registry. +type Artifact struct { + // ID of the namespace (project). It will not be sent to scanner adapter. + NamespaceID int64 `json:"-"` + // The full name of a Harbor repository containing the artifact, including the namespace. + // For example, `library/oracle/nosql`. + Repository string `json:"repository"` + // The info used to identify the version of the artifact, + // e.g: tag of image or version of the chart. + Tag string `json:"tag"` + // The artifact's digest, consisting of an algorithm and hex portion. + // For example, `sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b`, + // represents sha256 based digest. + Digest string `json:"digest"` + // The mime type of the scanned artifact + MimeType string `json:"mime_type"` +} + +// Registry represents Registry connection settings. +type Registry struct { + // A base URL of the Docker Registry v2 API exposed by Harbor. + URL string `json:"url"` + // An optional value of the HTTP Authorization header sent with each request to the Docker Registry v2 API. + // For example, `Bearer: JWTTOKENGOESHERE`. + Authorization string `json:"authorization"` +} + +// ScanRequest represents a structure that is sent to a Scanner Adapter to initiate artifact scanning. +// Conducts all the details required to pull the artifact from a Harbor registry. +type ScanRequest struct { + // Connection settings for the Docker Registry v2 API exposed by Harbor. + Registry *Registry `json:"registry"` + // Artifact to be scanned. + Artifact *Artifact `json:"artifact"` +} + +// FromJSON parses ScanRequest from json data +func (s *ScanRequest) FromJSON(jsonData string) error { + if len(jsonData) == 0 { + return errors.New("empty json data to parse") + } + + return json.Unmarshal([]byte(jsonData), s) +} + +// ToJSON marshals ScanRequest to JSON data +func (s *ScanRequest) ToJSON() (string, error) { + data, err := json.Marshal(s) + if err != nil { + return "", err + } + + return string(data), nil +} + +// Validate ScanRequest +func (s *ScanRequest) Validate() error { + if s.Registry == nil || + len(s.Registry.URL) == 0 || + len(s.Registry.Authorization) == 0 { + return errors.New("scan request: invalid registry") + } + + if s.Artifact == nil || + len(s.Artifact.Digest) == 0 || + len(s.Artifact.Repository) == 0 || + len(s.Artifact.MimeType) == 0 { + return errors.New("scan request: invalid artifact") + } + + return nil +} + +// ScanResponse represents the response returned by the scanner adapter after scan request successfully +// submitted. +type ScanResponse struct { + // e.g: 3fa85f64-5717-4562-b3fc-2c963f66afa6 + ID string `json:"id"` +} + +// ErrorResponse contains error message when requests are not correctly handled. +type ErrorResponse struct { + // Error object + Err *Error `json:"error"` +} + +// Error message +type Error struct { + // Message of the error + Message string `json:"message"` +} + +// Error for ErrorResponse +func (er *ErrorResponse) Error() string { + if er.Err != nil { + return er.Err.Message + } + + return "nil error" +} + +// ReportNotReadyError is an error to indicate the scan report is not ready +type ReportNotReadyError struct { + // Seconds for next retry with seconds + RetryAfter int +} + +// Error for ReportNotReadyError +func (rnr *ReportNotReadyError) Error() string { + return fmt.Sprintf("report is not ready yet, retry after %d", rnr.RetryAfter) +} diff --git a/src/pkg/scan/rest/v1/spec.go b/src/pkg/scan/rest/v1/spec.go new file mode 100644 index 000000000..cf7ff8647 --- /dev/null +++ b/src/pkg/scan/rest/v1/spec.go @@ -0,0 +1,111 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "fmt" + "net/http" + "strings" +) + +const ( + // HTTPAcceptHeader represents the HTTP accept header + HTTPAcceptHeader = "Accept" + // HTTPContentType represents the HTTP content-type header + HTTPContentType = "Content-Type" + // MimeTypeOCIArtifact defines the mime type for OCI artifact + MimeTypeOCIArtifact = "application/vnd.oci.image.manifest.v1+json" + // MimeTypeDockerArtifact defines the mime type for docker artifact + MimeTypeDockerArtifact = "application/vnd.docker.distribution.manifest.v2+json" + // MimeTypeNativeReport defines the mime type for native report + MimeTypeNativeReport = "application/vnd.scanner.adapter.vuln.report.harbor+json; version=1.0" + // MimeTypeRawReport defines the mime type for raw report + MimeTypeRawReport = "application/vnd.scanner.adapter.vuln.report.raw" + // MimeTypeAdapterMeta defines the mime type for adapter metadata + MimeTypeAdapterMeta = "application/vnd.scanner.adapter.metadata+json; version=1.0" + // MimeTypeScanRequest defines the mime type for scan request + MimeTypeScanRequest = "application/vnd.scanner.adapter.scan.request+json; version=1.0" + // MimeTypeScanResponse defines the mime type for scan response + MimeTypeScanResponse = "application/vnd.scanner.adapter.scan.response+json; version=1.0" + + apiPrefix = "/api/v1" +) + +// RequestResolver is a function template to modify the API request, e.g: add headers +type RequestResolver func(req *http.Request) + +// Definition for API +type Definition struct { + // URL of the API + URL string + // Resolver fro the request + Resolver RequestResolver +} + +// Spec of the API +// Contains URL and possible headers. +type Spec struct { + baseRoute string +} + +// NewSpec news V1 spec +func NewSpec(base string) *Spec { + s := &Spec{} + + if len(base) > 0 { + if strings.HasSuffix(base, "/") { + s.baseRoute = base[:len(base)-1] + } else { + s.baseRoute = base + } + } + + s.baseRoute = fmt.Sprintf("%s%s", s.baseRoute, apiPrefix) + + return s +} + +// Metadata API +func (s *Spec) Metadata() Definition { + return Definition{ + URL: fmt.Sprintf("%s%s", s.baseRoute, "/metadata"), + Resolver: func(req *http.Request) { + req.Header.Add(HTTPAcceptHeader, MimeTypeAdapterMeta) + }, + } +} + +// SubmitScan API +func (s *Spec) SubmitScan() Definition { + return Definition{ + URL: fmt.Sprintf("%s%s", s.baseRoute, "/scan"), + Resolver: func(req *http.Request) { + req.Header.Add(HTTPContentType, MimeTypeScanRequest) + req.Header.Add(HTTPAcceptHeader, MimeTypeScanResponse) + }, + } +} + +// GetScanReport API +func (s *Spec) GetScanReport(scanReqID string, mimeType string) Definition { + path := fmt.Sprintf("/scan/%s/report", scanReqID) + + return Definition{ + URL: fmt.Sprintf("%s%s", s.baseRoute, path), + Resolver: func(req *http.Request) { + req.Header.Add(HTTPAcceptHeader, mimeType) + }, + } +} diff --git a/src/pkg/scan/scanner/manager.go b/src/pkg/scan/scanner/manager.go new file mode 100644 index 000000000..c6421fe51 --- /dev/null +++ b/src/pkg/scan/scanner/manager.go @@ -0,0 +1,131 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scanner + +import ( + "github.com/goharbor/harbor/src/pkg/q" + "github.com/goharbor/harbor/src/pkg/scan/dao/scanner" + "github.com/google/uuid" + "github.com/pkg/errors" +) + +// Manager defines the related scanner API endpoints +type Manager interface { + // List returns a list of currently configured scanner registrations. + // Query parameters are optional + List(query *q.Query) ([]*scanner.Registration, error) + + // Create creates a new scanner registration with the given data. + // Returns the scanner registration identifier. + Create(registration *scanner.Registration) (string, error) + + // Get returns the details of the specified scanner registration. + Get(registrationUUID string) (*scanner.Registration, error) + + // Update updates the specified scanner registration. + Update(registration *scanner.Registration) error + + // Delete deletes the specified scanner registration. + Delete(registrationUUID string) error + + // SetAsDefault marks the specified scanner registration as default. + // The implementation is supposed to unset any registration previously set as default. + SetAsDefault(registrationUUID string) error + + // GetDefault returns the default scanner registration or `nil` if there are no registrations configured. + GetDefault() (*scanner.Registration, error) +} + +// basicManager is the default implementation of Manager +type basicManager struct{} + +// New a basic manager +func New() Manager { + return &basicManager{} +} + +// Create ... +func (bm *basicManager) Create(registration *scanner.Registration) (string, error) { + if registration == nil { + return "", errors.New("nil registration to create") + } + + // Inject new UUID + uid, err := uuid.NewUUID() + if err != nil { + return "", errors.Wrap(err, "new UUID: create registration") + } + registration.UUID = uid.String() + + if err := registration.Validate(true); err != nil { + return "", errors.Wrap(err, "create registration") + } + + if _, err := scanner.AddRegistration(registration); err != nil { + return "", errors.Wrap(err, "dao: create registration") + } + + return uid.String(), nil +} + +// Get ... +func (bm *basicManager) Get(registrationUUID string) (*scanner.Registration, error) { + if len(registrationUUID) == 0 { + return nil, errors.New("empty uuid of registration") + } + + return scanner.GetRegistration(registrationUUID) +} + +// Update ... +func (bm *basicManager) Update(registration *scanner.Registration) error { + if registration == nil { + return errors.New("nil registration to update") + } + + if err := registration.Validate(true); err != nil { + return errors.Wrap(err, "update registration") + } + + return scanner.UpdateRegistration(registration) +} + +// Delete ... +func (bm *basicManager) Delete(registrationUUID string) error { + if len(registrationUUID) == 0 { + return errors.New("empty UUID to delete") + } + + return scanner.DeleteRegistration(registrationUUID) +} + +// List ... +func (bm *basicManager) List(query *q.Query) ([]*scanner.Registration, error) { + return scanner.ListRegistrations(query) +} + +// SetAsDefault ... +func (bm *basicManager) SetAsDefault(registrationUUID string) error { + if len(registrationUUID) == 0 { + return errors.New("empty UUID to set default") + } + + return scanner.SetDefaultRegistration(registrationUUID) +} + +// GetDefault ... +func (bm *basicManager) GetDefault() (*scanner.Registration, error) { + return scanner.GetDefaultRegistration() +} diff --git a/src/pkg/scan/scanner/manager_test.go b/src/pkg/scan/scanner/manager_test.go new file mode 100644 index 000000000..6f8a485d2 --- /dev/null +++ b/src/pkg/scan/scanner/manager_test.go @@ -0,0 +1,112 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scanner + +import ( + "testing" + + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/pkg/q" + "github.com/goharbor/harbor/src/pkg/scan/dao/scanner" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +// BasicManagerTestSuite tests the basic manager +type BasicManagerTestSuite struct { + suite.Suite + + mgr Manager + sampleUUID string +} + +// TestBasicManager is the entry of BasicManagerTestSuite +func TestBasicManager(t *testing.T) { + suite.Run(t, new(BasicManagerTestSuite)) +} + +// SetupSuite prepares env for test suite +func (suite *BasicManagerTestSuite) SetupSuite() { + dao.PrepareTestForPostgresSQL() + + suite.mgr = New() + + r := &scanner.Registration{ + Name: "forUT", + Description: "sample registration", + URL: "https://sample.scanner.com", + } + + uid, err := suite.mgr.Create(r) + require.NoError(suite.T(), err) + suite.sampleUUID = uid +} + +// TearDownSuite clears env for test suite +func (suite *BasicManagerTestSuite) TearDownSuite() { + err := suite.mgr.Delete(suite.sampleUUID) + require.NoError(suite.T(), err, "delete registration") +} + +// TestList tests list registrations +func (suite *BasicManagerTestSuite) TestList() { + m := make(map[string]interface{}, 1) + m["name"] = "forUT" + + l, err := suite.mgr.List(&q.Query{ + PageNumber: 1, + PageSize: 10, + Keywords: m, + }) + + require.NoError(suite.T(), err) + require.Equal(suite.T(), 1, len(l)) +} + +// TestGet tests get registration +func (suite *BasicManagerTestSuite) TestGet() { + r, err := suite.mgr.Get(suite.sampleUUID) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), r) + assert.Equal(suite.T(), "forUT", r.Name) +} + +// TestUpdate tests update registration +func (suite *BasicManagerTestSuite) TestUpdate() { + r, err := suite.mgr.Get(suite.sampleUUID) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), r) + + r.URL = "https://updated.com" + err = suite.mgr.Update(r) + require.NoError(suite.T(), err) + + r, err = suite.mgr.Get(suite.sampleUUID) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), r) + assert.Equal(suite.T(), "https://updated.com", r.URL) +} + +// TestDefault tests get/set default registration +func (suite *BasicManagerTestSuite) TestDefault() { + err := suite.mgr.SetAsDefault(suite.sampleUUID) + require.NoError(suite.T(), err) + + dr, err := suite.mgr.GetDefault() + require.NoError(suite.T(), err) + require.NotNil(suite.T(), dr) + assert.Equal(suite.T(), true, dr.IsDefault) +} diff --git a/src/pkg/scan/vuln/report.go b/src/pkg/scan/vuln/report.go new file mode 100644 index 000000000..57bbaf7d6 --- /dev/null +++ b/src/pkg/scan/vuln/report.go @@ -0,0 +1,58 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vuln + +import ( + v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1" +) + +// Report model for vulnerability scan +type Report struct { + // Time of generating this report + GeneratedAt string `json:"generated_at"` + // Scanner of generating this report + Scanner *v1.Scanner `json:"scanner"` + // A standard scale for measuring the severity of a vulnerability. + Severity Severity `json:"severity"` + // Vulnerability list + Vulnerabilities []*VulnerabilityItem `json:"vulnerabilities"` +} + +// VulnerabilityItem represents one found vulnerability +type VulnerabilityItem struct { + // The unique identifier of the vulnerability. + // e.g: CVE-2017-8283 + ID string `json:"id"` + // An operating system or software dependency package containing the vulnerability. + // e.g: dpkg + Package string `json:"package"` + // The version of the package containing the vulnerability. + // e.g: 1.17.27 + Version string `json:"version"` + // The version of the package containing the fix if available. + // e.g: 1.18.0 + FixVersion string `json:"fix_version"` + // A standard scale for measuring the severity of a vulnerability. + Severity Severity `json:"severity"` + // example: dpkg-source in dpkg 1.3.0 through 1.18.23 is able to use a non-GNU patch program + // and does not offer a protection mechanism for blank-indented diff hunks, which allows remote + // attackers to conduct directory traversal attacks via a crafted Debian source package, as + // demonstrated by using of dpkg-source on NetBSD. + Description string `json:"description"` + // The list of link to the upstream database with the full description of the vulnerability. + // Format: URI + // e.g: List [ "https://security-tracker.debian.org/tracker/CVE-2017-8283" ] + Links []string `json:"links"` +} diff --git a/src/pkg/scan/vuln/severity.go b/src/pkg/scan/vuln/severity.go new file mode 100644 index 000000000..3d1df84f5 --- /dev/null +++ b/src/pkg/scan/vuln/severity.go @@ -0,0 +1,39 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vuln + +const ( + // Unknown - either a security problem that has not been assigned to a priority yet or + // a priority that the scanner did not recognize. + Unknown Severity = "Unknown" + // Low - a security problem, but is hard to exploit due to environment, requires a + // user-assisted attack, a small install base, or does very little damage. + Low Severity = "Low" + // Negligible - technically a security problem, but is only theoretical in nature, requires + // a very special situation, has almost no install base, or does no real damage. + Negligible Severity = "Negligible" + // Medium - a real security problem, and is exploitable for many people. Includes network + // daemon denial of service attacks, cross-site scripting, and gaining user privileges. + Medium Severity = "Medium" + // High - a real problem, exploitable for many people in a default installation. Includes + // serious remote denial of service, local root privilege escalations, or data loss. + High Severity = "High" + // Critical - a world-burning problem, exploitable for nearly all people in a default installation. + // Includes remote root privilege escalations, or massive data loss. + Critical Severity = "Critical" +) + +// Severity is a standard scale for measuring the severity of a vulnerability. +type Severity string diff --git a/src/pkg/scan/vuln/summary.go b/src/pkg/scan/vuln/summary.go new file mode 100644 index 000000000..27c596f73 --- /dev/null +++ b/src/pkg/scan/vuln/summary.go @@ -0,0 +1,41 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vuln + +import ( + "time" +) + +// NativeReportSummary is the default supported scan report summary model. +// Generated based on the report with v1.MimeTypeNativeReport mime type. +type NativeReportSummary struct { + ReportID string `json:"report_id"` + ScanStatus string `json:"scan_status"` + Severity Severity `json:"severity"` + Duration int64 `json:"duration"` + Summary *VulnerabilitySummary `json:"summary"` + StartTime time.Time `json:"start_time"` + EndTime time.Time `json:"end_time"` +} + +// VulnerabilitySummary contains the total number of the found vulnerabilities number +// and numbers of each severity level. +type VulnerabilitySummary struct { + Total int `json:"total"` + Summary SeveritySummary `json:"summary"` +} + +// SeveritySummary ... +type SeveritySummary map[Severity]int diff --git a/src/portal/angular.json b/src/portal/angular.json index 157657038..51dd91369 100644 --- a/src/portal/angular.json +++ b/src/portal/angular.json @@ -26,6 +26,7 @@ "node_modules/@clr/ui/clr-ui.min.css", "node_modules/swagger-ui/dist/swagger-ui.css", "node_modules/prismjs/themes/prism-solarizedlight.css", + "src/global.scss", "src/styles.css" ], "scripts": [ @@ -37,9 +38,7 @@ "node_modules/marked/lib/marked.js", "node_modules/prismjs/prism.js", "node_modules/prismjs/components/prism-yaml.min.js", - "node_modules/jquery/dist/jquery.slim.js", - "node_modules/popper.js/dist/umd/popper.js", - "node_modules/bootstrap/dist/js/bootstrap.js" + "node_modules/popper.js/dist/umd/popper.js" ] }, "configurations": { @@ -170,7 +169,29 @@ "main": "lib/src/test.ts", "tsConfig": "lib/tsconfig.lib.json", "karmaConfig": "lib/karma.conf.js" - } + }, + "scripts": [ + "node_modules/core-js/client/shim.min.js", + "node_modules/mutationobserver-shim/dist/mutationobserver.min.js", + "node_modules/@webcomponents/custom-elements/custom-elements.min.js", + "node_modules/@clr/icons/clr-icons.min.js", + "node_modules/web-animations-js/web-animations.min.js", + "node_modules/marked/lib/marked.js", + "node_modules/prismjs/prism.js", + "node_modules/prismjs/components/prism-yaml.min.js" + ], + "styles": [ + "node_modules/@clr/icons/clr-icons.min.css", + "node_modules/@clr/ui/clr-ui.min.css", + "node_modules/prismjs/themes/prism-solarizedlight.css", + "src/styles.css" + ], + "assets": [ + "src/images", + "src/favicon.ico", + "src/setting.json", + "src/i18n" + ] }, "lint": { "builder": "@angular-devkit/build-angular:tslint", diff --git a/src/portal/karma.conf.js b/src/portal/karma.conf.js index ff2fed30f..56f21893b 100644 --- a/src/portal/karma.conf.js +++ b/src/portal/karma.conf.js @@ -1,47 +1,87 @@ // Karma configuration file, see link for more information -// https://karma-runner.github.io/0.13/config/configuration-file.html +// https://karma-runner.github.io/1.0/config/configuration-file.html +const path = require('path'); module.exports = function (config) { config.set({ - basePath: '/', - frameworks: ['jasmine', '@angular-devkit/build-angular'], - plugins: [ - require('karma-jasmine'), - require('karma-chrome-launcher'), - require('karma-mocha-reporter'), - require('karma-remap-istanbul'), - require('@angular-devkit/build-angular/plugins/karma') - ], - files: [ - {pattern: './src/test.ts', watched: false} - ], - preprocessors: { - + basePath: '', + frameworks: ['jasmine', '@angular-devkit/build-angular'], + plugins: [ + require('karma-jasmine'), + require('karma-chrome-launcher'), + require('karma-mocha-reporter'), + require('karma-coverage-istanbul-reporter'), + require('@angular-devkit/build-angular/plugins/karma') + ], + client: { + clearContext: false // leave Jasmine Spec Runner output visible in browser + }, + coverageIstanbulReporter: { + // reports can be any that are listed here: https://github.com/istanbuljs/istanbuljs/tree/aae256fb8b9a3d19414dcf069c592e88712c32c6/packages/istanbul-reports/lib + reports: ['html', 'lcovonly', 'text-summary'], + + // base output directory. If you include %browser% in the path it will be replaced with the karma browser name + dir: path.join(__dirname, 'coverage'), + + // Combines coverage information from multiple browsers into one report rather than outputting a report + // for each browser. + combineBrowserReports: true, + + // if using webpack and pre-loaders, work around webpack breaking the source path + fixWebpackSourcePaths: true, + + // Omit files with no statements, no functions and no branches from the report + skipFilesWithNoCoverage: false, + + // Most reporters accept additional config options. You can pass these through the `report-config` option + 'report-config': { + // all options available at: https://github.com/istanbuljs/istanbuljs/blob/aae256fb8b9a3d19414dcf069c592e88712c32c6/packages/istanbul-reports/lib/html/index.js#L135-L137 + html: { + // outputs the report in ./coverage/html + subdir: 'html' + } }, - mime: { - 'text/x-typescript': ['ts', 'tsx'] - }, - remapIstanbulReporter: { - dir: require('path').join(__dirname, 'coverage'), reports: { - html: 'coverage', - lcovonly: './coverage/coverage.lcov' - } - }, - - reporters: config.angularCli && config.angularCli.codeCoverage - ? ['mocha', 'karma-remap-istanbul'] - : ['mocha'], - port: 9876, - colors: true, - logLevel: config.LOG_INFO, - autoWatch: true, - browsers: ['ChromeHeadlessNoSandbox'], - customLaunchers: { - ChromeHeadlessNoSandbox: { - base: 'ChromeHeadless', - flags: ['--no-sandbox'] - } + + // enforce percentage thresholds + // anything under these percentages will cause karma to fail with an exit code of 1 if not running in watch mode + thresholds: { + emitWarning: true, // set to `true` to not fail the test command when thresholds are not met + // thresholds for all files + global: { + statements: 40, + branches: 13, + functions: 26, + lines: 41 }, - singleRun: true + // thresholds per file + each: { + statements: 0, + lines: 0, + branches: 0, + functions: 0 + } + } + + }, + reporters: ['progress', 'mocha','coverage-istanbul'], + mochaReporter: { + output: 'minimal' + }, + reportSlowerThan: 100, + port: 9876, + colors: true, + logLevel: config.LOG_INFO, + autoWatch: true, + singleRun: true, + browsers: ['ChromeHeadlessNoSandbox'], + browserDisconnectTolerance: 2, + browserNoActivityTimeout: 50000, + customLaunchers: { + ChromeHeadlessNoSandbox: { + base: 'ChromeHeadless', + flags: ['--no-sandbox'] + } + }, + restartOnFileChange: true }); -}; \ No newline at end of file + }; \ No newline at end of file diff --git a/src/portal/lib/karma.conf.js b/src/portal/lib/karma.conf.js new file mode 100644 index 000000000..00f8ceff3 --- /dev/null +++ b/src/portal/lib/karma.conf.js @@ -0,0 +1,87 @@ +// Karma configuration file, see link for more information +// https://karma-runner.github.io/1.0/config/configuration-file.html + +const path = require('path'); +module.exports = function (config) { + config.set({ + basePath: '', + frameworks: ['jasmine', '@angular-devkit/build-angular'], + plugins: [ + require('karma-jasmine'), + require('karma-chrome-launcher'), + require('karma-mocha-reporter'), + require('karma-coverage-istanbul-reporter'), + require('@angular-devkit/build-angular/plugins/karma') + ], + client: { + clearContext: false // leave Jasmine Spec Runner output visible in browser + }, + coverageIstanbulReporter: { + // reports can be any that are listed here: https://github.com/istanbuljs/istanbuljs/tree/aae256fb8b9a3d19414dcf069c592e88712c32c6/packages/istanbul-reports/lib + reports: ['html', 'lcovonly', 'text-summary'], + + // base output directory. If you include %browser% in the path it will be replaced with the karma browser name + dir: path.join(__dirname, 'coverage'), + + // Combines coverage information from multiple browsers into one report rather than outputting a report + // for each browser. + combineBrowserReports: true, + + // if using webpack and pre-loaders, work around webpack breaking the source path + fixWebpackSourcePaths: true, + + // Omit files with no statements, no functions and no branches from the report + skipFilesWithNoCoverage: false, + + // Most reporters accept additional config options. You can pass these through the `report-config` option + 'report-config': { + // all options available at: https://github.com/istanbuljs/istanbuljs/blob/aae256fb8b9a3d19414dcf069c592e88712c32c6/packages/istanbul-reports/lib/html/index.js#L135-L137 + html: { + // outputs the report in ./coverage/html + subdir: 'html' + } + }, + + // enforce percentage thresholds + // anything under these percentages will cause karma to fail with an exit code of 1 if not running in watch mode + thresholds: { + emitWarning: true, // set to `true` to not fail the test command when thresholds are not met + // thresholds for all files + global: { + statements: 37, + branches: 19, + functions: 28, + lines: 36 + }, + // thresholds per file + each: { + statements: 0, + lines: 0, + branches: 0, + functions: 0 + } + } + + }, + reporters: ['progress', 'mocha','coverage-istanbul'], + mochaReporter: { + output: 'minimal' + }, + reportSlowerThan: 100, + port: 9876, + colors: true, + logLevel: config.LOG_INFO, + autoWatch: true, + singleRun: true, + browsers: ['ChromeHeadlessNoSandbox'], + browserDisconnectTolerance: 2, + browserNoActivityTimeout: 50000, + customLaunchers: { + ChromeHeadlessNoSandbox: { + base: 'ChromeHeadless', + flags: ['--no-sandbox'] + } + }, + restartOnFileChange: true + }); + }; \ No newline at end of file diff --git a/src/portal/lib/ng-package.json b/src/portal/lib/ng-package.json index 89852ea86..921319790 100644 --- a/src/portal/lib/ng-package.json +++ b/src/portal/lib/ng-package.json @@ -4,11 +4,6 @@ "deleteDestPath": false, "lib": { "entryFile": "index.ts", - "externals": { - "@ngx-translate/core": "ngx-translate-core", - "@ngx-translate/core/index": "ngx-translate-core", - "ngx-markdown": "ngx-markdown" - }, "umdModuleIds": { "@clr/angular" : "angular", "ngx-markdown" : "ngxMarkdown", diff --git a/src/portal/lib/ng-package.prod.json b/src/portal/lib/ng-package.prod.json index 85a87a50d..4bf1dc101 100644 --- a/src/portal/lib/ng-package.prod.json +++ b/src/portal/lib/ng-package.prod.json @@ -3,11 +3,6 @@ "dest": "./dist", "lib": { "entryFile": "index.ts", - "externals": { - "@ngx-translate/core": "ngx-translate-core", - "@ngx-translate/core/index": "ngx-translate-core", - "ngx-markdown": "ngx-markdown" - }, "umdModuleIds": { "@clr/angular" : "angular", "ngx-markdown" : "ngxMarkdown", diff --git a/src/portal/lib/package-lock.json b/src/portal/lib/package-lock.json new file mode 100644 index 000000000..e4655fb88 --- /dev/null +++ b/src/portal/lib/package-lock.json @@ -0,0 +1,5 @@ +{ + "name": "@harbor/ui", + "version": "1.10.0", + "lockfileVersion": 1 +} diff --git a/src/portal/lib/package.json b/src/portal/lib/package.json index 9c49c4207..db2c23758 100644 --- a/src/portal/lib/package.json +++ b/src/portal/lib/package.json @@ -1,7 +1,7 @@ { "name": "@harbor/ui", - "version": "1.9.0", - "description": "Harbor shared UI components based on Clarity and Angular7", + "version": "1.10.0", + "description": "Harbor shared UI components based on Clarity and Angular8", "author": "CNCF", "module": "index.js", "main": "bundles/harborui.umd.min.js", @@ -19,26 +19,26 @@ }, "homepage": "https://github.com/vmware/harbor#readme", "peerDependencies": { - "@angular/animations": "^7.1.3", - "@angular/common": "^7.1.3", - "@angular/compiler": "^7.1.3", - "@angular/core": "^7.1.3", - "@angular/forms": "^7.1.3", - "@angular/http": "^7.1.3", - "@angular/platform-browser": "^7.1.3", - "@angular/platform-browser-dynamic": "^7.1.3", - "@angular/router": "^7.1.3", + "@angular/animations": "^8.2.0", + "@angular/common": "^8.2.0", + "@angular/compiler": "^8.2.0", + "@angular/core": "^8.2.0", + "@angular/forms": "^8.2.0", + "@angular/http": "^8.2.0", + "@angular/platform-browser": "^8.2.0", + "@angular/platform-browser-dynamic": "^8.2.0", + "@angular/router": "^8.2.0", "@ngx-translate/core": "^10.0.2", "@ngx-translate/http-loader": "^3.0.1", "@webcomponents/custom-elements": "^1.1.3", - "@clr/angular": "^1.0.0", - "@clr/ui": "^1.0.0", - "@clr/icons": "^1.0.0", + "@clr/angular": "^2.1.0", + "@clr/icons": "^2.1.0", + "@clr/ui": "^2.1.0", "core-js": "^2.5.4", "intl": "^1.2.5", "mutationobserver-shim": "^0.3.2", "ngx-cookie": "^1.0.0", - "ngx-markdown": "^6.2.0", + "ngx-markdown": "^8.1.0", "rxjs": "^6.3.3", "ts-helpers": "^1.1.1", "web-animations-js": "^2.2.1", diff --git a/src/portal/lib/src/config/config.ts b/src/portal/lib/src/config/config.ts index 9505b11c5..5d4893fcc 100644 --- a/src/portal/lib/src/config/config.ts +++ b/src/portal/lib/src/config/config.ts @@ -100,6 +100,8 @@ export class Configuration { oidc_scope?: StringValueItem; count_per_project: NumberValueItem; storage_per_project: NumberValueItem; + cfg_expiration: NumberValueItem; + oidc_groups_claim: StringValueItem; public constructor() { this.auth_mode = new StringValueItem("db_auth", true); this.project_creation_restriction = new StringValueItem("everyone", true); @@ -152,6 +154,7 @@ export class Configuration { this.oidc_client_secret = new StringValueItem('', true); this.oidc_verify_cert = new BoolValueItem(false, true); this.oidc_scope = new StringValueItem('', true); + this.oidc_groups_claim = new StringValueItem('', true); this.count_per_project = new NumberValueItem(-1, true); this.storage_per_project = new NumberValueItem(-1, true); } diff --git a/src/portal/lib/src/config/gc/gc-history/gc-history.component.spec.ts b/src/portal/lib/src/config/gc/gc-history/gc-history.component.spec.ts new file mode 100644 index 000000000..4e6ef5a00 --- /dev/null +++ b/src/portal/lib/src/config/gc/gc-history/gc-history.component.spec.ts @@ -0,0 +1,49 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; +import { SharedModule } from '../../../shared/shared.module'; +import { GcRepoService } from "../gc.service"; +import { of } from 'rxjs'; +import { GcViewModelFactory } from "../gc.viewmodel.factory"; +import { TranslateModule, TranslateService } from '@ngx-translate/core'; +import { ErrorHandler } from '../../../error-handler'; +import { GcHistoryComponent } from './gc-history.component'; + +describe('GcHistoryComponent', () => { + let component: GcHistoryComponent; + let fixture: ComponentFixture; + let fakeGcRepoService = { + getJobs: function () { + return of([]); + } + }; + let fakeGcViewModelFactory = { + createJobViewModel: function (data) { + return data; + } + }; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + declarations: [GcHistoryComponent], + imports: [ + SharedModule, + TranslateModule.forRoot() + ], + providers: [ + ErrorHandler, + TranslateService, + { provide: GcRepoService, useValue: fakeGcRepoService }, + { provide: GcViewModelFactory, useValue: fakeGcViewModelFactory } + ] + }).compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(GcHistoryComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/src/portal/lib/src/config/gc/gc.component.spec.ts b/src/portal/lib/src/config/gc/gc.component.spec.ts index c9b442682..1f4941b8c 100644 --- a/src/portal/lib/src/config/gc/gc.component.spec.ts +++ b/src/portal/lib/src/config/gc/gc.component.spec.ts @@ -9,6 +9,7 @@ import { GcViewModelFactory } from './gc.viewmodel.factory'; import { CronScheduleComponent } from '../../cron-schedule/cron-schedule.component'; import { CronTooltipComponent } from "../../cron-schedule/cron-tooltip/cron-tooltip.component"; import { of } from 'rxjs'; +import { GcJobData } from './gcLog'; describe('GcComponent', () => { let component: GcComponent; @@ -18,13 +19,17 @@ describe('GcComponent', () => { systemInfoEndpoint: "/api/system/gc" }; let mockSchedule = []; - let mockJobs = [ + let mockJobs: GcJobData[] = [ { id: 22222, schedule: null, job_status: 'string', - creation_time: new Date(), - update_time: new Date(), + creation_time: new Date().toDateString(), + update_time: new Date().toDateString(), + job_name: 'string', + job_kind: 'string', + job_uuid: 'string', + delete: false } ]; let spySchedule: jasmine.Spy; diff --git a/src/portal/lib/src/config/gc/gc.component.ts b/src/portal/lib/src/config/gc/gc.component.ts index 44d805d92..ccd4196aa 100644 --- a/src/portal/lib/src/config/gc/gc.component.ts +++ b/src/portal/lib/src/config/gc/gc.component.ts @@ -32,7 +32,7 @@ export class GcComponent implements OnInit { getText = 'CONFIG.GC'; getLabelCurrent = 'GC.CURRENT_SCHEDULE'; @Output() loadingGcStatus = new EventEmitter(); - @ViewChild(CronScheduleComponent) + @ViewChild(CronScheduleComponent, {static: false}) CronScheduleComponent: CronScheduleComponent; constructor( private gcRepoService: GcRepoService, diff --git a/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.html b/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.html index c9bd48440..89ab0c585 100644 --- a/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.html +++ b/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.html @@ -4,79 +4,73 @@ -
-
- -
- -
-
- -
+ + + + + + + + + + + + {{ 'DESTINATION.NAME_IS_REQUIRED' | translate }} + + + + + + + + +
+ +
+
+ +
-
- -
- - -
- -
- - -
- -
- - +
+ + {{ 'DESTINATION.URL_IS_REQUIRED' | translate }} +
- -
- - +
+ + + + + + +
+ +
+
+ + + + +
- -
- - - - - -
- -
- - +
+ + +
-
- - -
-
+ + + + + +
+ + +
\ No newline at end of file diff --git a/src/portal/lib/src/create-edit-endpoint/create-edit-endpoint.component.ts b/src/portal/lib/src/create-edit-endpoint/create-edit-endpoint.component.ts index 28d7624e4..b02b6feef 100644 --- a/src/portal/lib/src/create-edit-endpoint/create-edit-endpoint.component.ts +++ b/src/portal/lib/src/create-edit-endpoint/create-edit-endpoint.component.ts @@ -63,13 +63,13 @@ export class CreateEditEndpointComponent selectedType: string; initVal: Endpoint; targetForm: NgForm; - @ViewChild("targetForm") currentForm: NgForm; + @ViewChild("targetForm", {static: false}) currentForm: NgForm; targetEndpoint; testOngoing: boolean; onGoing: boolean; endpointId: number | string; - @ViewChild(InlineAlertComponent) inlineAlert: InlineAlertComponent; + @ViewChild(InlineAlertComponent, {static: false}) inlineAlert: InlineAlertComponent; @Output() reload = new EventEmitter(); diff --git a/src/portal/lib/src/create-edit-label/create-edit-label.component.html b/src/portal/lib/src/create-edit-label/create-edit-label.component.html index ab1f83b9d..92f040130 100644 --- a/src/portal/lib/src/create-edit-label/create-edit-label.component.html +++ b/src/portal/lib/src/create-edit-label/create-edit-label.component.html @@ -3,13 +3,13 @@
- +
- \ No newline at end of file + diff --git a/src/portal/lib/src/project-policy-config/project-policy-config.component.scss b/src/portal/lib/src/project-policy-config/project-policy-config.component.scss index 05d4bc5d1..53759f80c 100644 --- a/src/portal/lib/src/project-policy-config/project-policy-config.component.scss +++ b/src/portal/lib/src/project-policy-config/project-policy-config.component.scss @@ -5,6 +5,7 @@ .select { width: 120px; } + .margin-top-4 { margin-top: 4px; } @@ -14,9 +15,10 @@ border-radius: 3px; padding: 12px; height: 224px; - width: 222px; + width: 270px; color: #0079bb; overflow-y: auto; + li { height: 24px; line-height: 24px; @@ -24,8 +26,8 @@ } } -.width-70per { - width: 70%; +.width-90per { + width: 90%; } .none { @@ -43,16 +45,16 @@ .padding-top-8 { padding-top: 8px; } - -.padding-left-80 { - padding-left: 80px; +.position-relative { + position: relative; } - .add-modal { position: absolute; padding: 0 8px; background-color: rgb(238, 238, 238); - + .flex-direction-column { + flex-direction: column; + } input { width: 100%; border: 1px solid; @@ -63,8 +65,34 @@ } } -.hand{ +.hand { cursor: pointer; margin: 0; } +.config-subtext { + font-size: 0.55rem; + line-height: 1.2rem; + color: rgb(86, 86, 86); + font-weight: 300; +} + +.mt-05 { + margin-bottom: 0.5rem; +} + +.col-flex-grow-0 { + flex-grow: 0; +} + +.expire-data { + min-width: 12.5rem; + margin-top: -1rem; +} + +.bottom-line { + display: flex; + flex-direction: column-reverse; + font-size: 13px; + color: #000; +} diff --git a/src/portal/lib/src/project-policy-config/project-policy-config.component.ts b/src/portal/lib/src/project-policy-config/project-policy-config.component.ts index a6ab59495..1727ad04e 100644 --- a/src/portal/lib/src/project-policy-config/project-policy-config.component.ts +++ b/src/portal/lib/src/project-policy-config/project-policy-config.component.ts @@ -61,9 +61,9 @@ export class ProjectPolicyConfigComponent implements OnInit { @Input() hasSignedIn: boolean; @Input() hasProjectAdminRole: boolean; - @ViewChild('cfgConfirmationDialog') confirmationDlg: ConfirmationDialogComponent; - @ViewChild('dateInput') dateInput: ElementRef; - @ViewChild('dateSystemInput') dateSystemInput: ElementRef; + @ViewChild('cfgConfirmationDialog', {static: false}) confirmationDlg: ConfirmationDialogComponent; + @ViewChild('dateInput', {static: false}) dateInput: ElementRef; + @ViewChild('dateSystemInput', {static: false}) dateSystemInput: ElementRef; systemInfo: SystemInfo; orgProjectPolicy = new ProjectPolicy(); diff --git a/src/portal/lib/src/push-image/push-image.component.spec.ts b/src/portal/lib/src/push-image/push-image.component.spec.ts index a9178514b..4605fcb0d 100644 --- a/src/portal/lib/src/push-image/push-image.component.spec.ts +++ b/src/portal/lib/src/push-image/push-image.component.spec.ts @@ -39,7 +39,7 @@ describe('PushImageButtonComponent (inline template)', () => { expect(component).toBeTruthy(); }); - it('should open the drop-down panel', fakeAsync(() => { + it('should open the drop-down panel', () => { fixture.detectChanges(); fixture.whenStable().then(() => { fixture.detectChanges(); @@ -57,6 +57,6 @@ describe('PushImageButtonComponent (inline template)', () => { expect(copyInputs[1].value.trim()).toEqual(`docker push ${component.registryUrl}/${component.projectName}/IMAGE[:TAG]`); }); }); - })); + }); }); diff --git a/src/portal/lib/src/push-image/push-image.component.ts b/src/portal/lib/src/push-image/push-image.component.ts index 0fd21a60a..3c35ae2a8 100644 --- a/src/portal/lib/src/push-image/push-image.component.ts +++ b/src/portal/lib/src/push-image/push-image.component.ts @@ -13,9 +13,9 @@ export class PushImageButtonComponent { @Input() registryUrl: string = "unknown"; @Input() projectName: string = "unknown"; - @ViewChild("tagCopy") tagCopyInput: CopyInputComponent; - @ViewChild("pushCopy") pushCopyInput: CopyInputComponent; - @ViewChild("copyAlert") copyAlert: InlineAlertComponent; + @ViewChild("tagCopy", {static: false}) tagCopyInput: CopyInputComponent; + @ViewChild("pushCopy", {static: false}) pushCopyInput: CopyInputComponent; + @ViewChild("copyAlert", {static: false}) copyAlert: InlineAlertComponent; public get tagCommand(): string { return `docker tag SOURCE_IMAGE[:TAG] ${this.registryUrl}/${ diff --git a/src/portal/lib/src/push-image/push-image.scss b/src/portal/lib/src/push-image/push-image.scss index d5707a3d3..74835d8ae 100644 --- a/src/portal/lib/src/push-image/push-image.scss +++ b/src/portal/lib/src/push-image/push-image.scss @@ -32,6 +32,7 @@ .command-input { font-size: 14px; font-weight: 500; + border: 0; } :host>>>.dropdown-menu { diff --git a/src/portal/lib/src/replication/replication-tasks/replication-tasks.component.html b/src/portal/lib/src/replication/replication-tasks/replication-tasks.component.html index 633f020e6..718d51222 100644 --- a/src/portal/lib/src/replication/replication-tasks/replication-tasks.component.html +++ b/src/portal/lib/src/replication/replication-tasks/replication-tasks.component.html @@ -110,7 +110,7 @@ {{t.operation}} {{t.status}} {{t.start_time | date: 'short'}} - {{t.end_time ? (t.end_time | date: 'short') : "-"}} + {{t.end_time && t.end_time != '0001-01-01T00:00:00Z' ? (t.end_time | date: 'short') : "-"}} diff --git a/src/portal/lib/src/replication/replication.component.ts b/src/portal/lib/src/replication/replication.component.ts index 1bdbfe200..8079ac3bb 100644 --- a/src/portal/lib/src/replication/replication.component.ts +++ b/src/portal/lib/src/replication/replication.component.ts @@ -120,16 +120,16 @@ export class ReplicationComponent implements OnInit, OnDestroy { jobs: ReplicationJobItem[]; - @ViewChild(ListReplicationRuleComponent) + @ViewChild(ListReplicationRuleComponent, {static: false}) listReplicationRule: ListReplicationRuleComponent; - @ViewChild(CreateEditRuleComponent) + @ViewChild(CreateEditRuleComponent, {static: false}) createEditPolicyComponent: CreateEditRuleComponent; - @ViewChild("replicationConfirmDialog") + @ViewChild("replicationConfirmDialog", {static: false}) replicationConfirmDialog: ConfirmationDialogComponent; - @ViewChild("StopConfirmDialog") + @ViewChild("StopConfirmDialog", {static: false}) StopConfirmDialog: ConfirmationDialogComponent; creationTimeComparator: Comparator = new CustomComparator< diff --git a/src/portal/lib/src/repository-gridview/repository-gridview.component.ts b/src/portal/lib/src/repository-gridview/repository-gridview.component.ts index fe533195b..62bd594ba 100644 --- a/src/portal/lib/src/repository-gridview/repository-gridview.component.ts +++ b/src/portal/lib/src/repository-gridview/repository-gridview.component.ts @@ -80,10 +80,10 @@ export class RepositoryGridviewComponent implements OnChanges, OnInit { totalCount = 0; currentState: State; - @ViewChild("confirmationDialog") + @ViewChild("confirmationDialog", {static: false}) confirmationDialog: ConfirmationDialogComponent; - @ViewChild("gridView") gridView: GridViewComponent; + @ViewChild("gridView", {static: false}) gridView: GridViewComponent; hasCreateRepositoryPermission: boolean; hasDeleteRepositoryPermission: boolean; constructor(@Inject(SERVICE_CONFIG) private configInfo: IServiceConfig, diff --git a/src/portal/lib/src/repository/repository.component.html b/src/portal/lib/src/repository/repository.component.html index a5216ed33..c2ef10c67 100644 --- a/src/portal/lib/src/repository/repository.component.html +++ b/src/portal/lib/src/repository/repository.component.html @@ -33,7 +33,7 @@ {{ 'REPOSITORY.MARKDOWN' | translate }} -
+

{{'REPOSITORY.NO_INFO' | translate }}

@@ -42,11 +42,12 @@
- +
- - + +
diff --git a/src/portal/lib/src/repository/repository.component.ts b/src/portal/lib/src/repository/repository.component.ts index 74204ec1e..1ae176e48 100644 --- a/src/portal/lib/src/repository/repository.component.ts +++ b/src/portal/lib/src/repository/repository.component.ts @@ -56,7 +56,7 @@ export class RepositoryComponent implements OnInit { timerHandler: any; - @ViewChild('confirmationDialog') + @ViewChild('confirmationDialog', {static: false}) confirmationDlg: ConfirmationDialogComponent; constructor( diff --git a/src/portal/lib/src/shared/shared.const.ts b/src/portal/lib/src/shared/shared.const.ts index ff1935071..8e76581d9 100644 --- a/src/portal/lib/src/shared/shared.const.ts +++ b/src/portal/lib/src/shared/shared.const.ts @@ -150,6 +150,7 @@ export const PROJECT_ROOTS = [ export enum GroupType { LDAP_TYPE = 1, - HTTP_TYPE = 2 + HTTP_TYPE = 2, + OIDC_TYPE = 3 } export const REFRESH_TIME_DIFFERENCE = 10000; diff --git a/src/portal/lib/src/tag/tag-detail.component.html b/src/portal/lib/src/tag/tag-detail.component.html index 83c053a2f..bee923073 100644 --- a/src/portal/lib/src/tag/tag-detail.component.html +++ b/src/portal/lib/src/tag/tag-detail.component.html @@ -89,8 +89,8 @@ - - + + {{ 'REPOSITORY.BUILD_HISTORY' | translate }} diff --git a/src/portal/lib/src/tag/tag.component.html b/src/portal/lib/src/tag/tag.component.html index 9df2ddad1..445d6d9ef 100644 --- a/src/portal/lib/src/tag/tag.component.html +++ b/src/portal/lib/src/tag/tag.component.html @@ -3,7 +3,7 @@
diff --git a/src/portal/src/app/config/config.component.scss b/src/portal/src/app/config/config.component.scss index 09b65fac6..132f69f2d 100644 --- a/src/portal/src/app/config/config.component.scss +++ b/src/portal/src/app/config/config.component.scss @@ -11,14 +11,22 @@ clr-icon { color: grey; margin-top: -3px; } + clr-icon:hover { color: #007CBB; } +.clr-validate-icon { + color: red; +} + .config-title { display: inline-block; } .tooltip-position { top: -7px; +} +.clr-form-control-disabled { + opacity: 1; } \ No newline at end of file diff --git a/src/portal/src/app/config/config.component.spec.ts b/src/portal/src/app/config/config.component.spec.ts new file mode 100644 index 000000000..b08f679c4 --- /dev/null +++ b/src/portal/src/app/config/config.component.spec.ts @@ -0,0 +1,69 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; +import { SessionService } from '../shared/session.service'; +import { ConfirmationDialogService } from '../shared/confirmation-dialog/confirmation-dialog.service'; +import { MessageHandlerService } from '../shared/message-handler/message-handler.service'; +import { TranslateModule, TranslateService } from '@ngx-translate/core'; +import { CUSTOM_ELEMENTS_SCHEMA } from '@angular/core'; +import { ClarityModule } from "@clr/angular"; +import { AppConfigService } from '../app-config.service'; +import { ConfigurationService } from './config.service'; +import { ConfigurationComponent } from './config.component'; + +describe('ConfigurationComponent', () => { + let component: ConfigurationComponent; + let fixture: ComponentFixture; + let fakeConfirmationDialogService = { + confirmationConfirm$: { + subscribe: function () { + } + } + }; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + imports: [ + TranslateModule.forRoot(), + ClarityModule + ], + schemas: [CUSTOM_ELEMENTS_SCHEMA], + declarations: [ConfigurationComponent], + providers: [ + TranslateService, + { + provide: SessionService, useValue: { + getCurrentUser: function () { + return "admin"; + } + } + }, + { provide: ConfirmationDialogService, useValue: fakeConfirmationDialogService }, + { provide: MessageHandlerService, useValue: null }, + { + provide: AppConfigService, useValue: { + getConfig: function () { + return { has_ca_root: true }; + } + } + }, + { + provide: ConfigurationService, useValue: { + confirmationConfirm$: { + subscribe: function () { + } + } + } + } + ] + }).compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(ConfigurationComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/src/portal/src/app/config/config.component.ts b/src/portal/src/app/config/config.component.ts index 9c0df8f5e..60c727a5c 100644 --- a/src/portal/src/app/config/config.component.ts +++ b/src/portal/src/app/config/config.component.ts @@ -48,9 +48,9 @@ export class ConfigurationComponent implements OnInit, OnDestroy { originalCopy: Configuration = new Configuration(); confirmSub: Subscription; - @ViewChild(SystemSettingsComponent) systemSettingsConfig: SystemSettingsComponent; - @ViewChild(ConfigurationEmailComponent) mailConfig: ConfigurationEmailComponent; - @ViewChild(ConfigurationAuthComponent) authConfig: ConfigurationAuthComponent; + @ViewChild(SystemSettingsComponent, {static: false}) systemSettingsConfig: SystemSettingsComponent; + @ViewChild(ConfigurationEmailComponent, {static: false}) mailConfig: ConfigurationEmailComponent; + @ViewChild(ConfigurationAuthComponent, {static: false}) authConfig: ConfigurationAuthComponent; constructor( private msgHandler: MessageHandlerService, @@ -105,6 +105,7 @@ export class ConfigurationComponent implements OnInit, OnDestroy { ngOnDestroy(): void { if (this.confirmSub) { + console.log(this.confirmSub); this.confirmSub.unsubscribe(); } } diff --git a/src/portal/src/app/config/config.service.spec.ts b/src/portal/src/app/config/config.service.spec.ts new file mode 100644 index 000000000..f24d64759 --- /dev/null +++ b/src/portal/src/app/config/config.service.spec.ts @@ -0,0 +1,18 @@ +import { TestBed, inject } from '@angular/core/testing'; +import { HttpClientTestingModule } from '@angular/common/http/testing'; +import { ConfigurationService } from './config.service'; + +describe('ConfigService', () => { + beforeEach(() => { + TestBed.configureTestingModule({ + imports: [ + HttpClientTestingModule + ], + providers: [ConfigurationService] + }); + }); + + it('should be created', inject([ConfigurationService], (service: ConfigurationService) => { + expect(service).toBeTruthy(); + })); +}); diff --git a/src/portal/src/app/config/email/config-email.component.html b/src/portal/src/app/config/email/config-email.component.html index 0e40f2640..2e8aaa57e 100644 --- a/src/portal/src/app/config/email/config-email.component.html +++ b/src/portal/src/app/config/email/config-email.component.html @@ -1,91 +1,81 @@ -
+
-
+ - -
-
+ + {{'TOOLTIP.ITEM_REQUIRED' | translate}} + + - -
-
+ + {{'TOOLTIP.PORT_REQUIRED' | translate}} + + - -
-
+ + + - -
-
+ + + - +
+ +
+
+ + +
+
-
- - - - - - -
-
- - - - - +
+ +
+
+ + +
+
- diff --git a/src/portal/src/app/config/email/config-email.component.spec.ts b/src/portal/src/app/config/email/config-email.component.spec.ts new file mode 100644 index 000000000..c036a68c3 --- /dev/null +++ b/src/portal/src/app/config/email/config-email.component.spec.ts @@ -0,0 +1,40 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; +import { MessageHandlerService } from '../../shared/message-handler/message-handler.service'; +import { ConfirmMessageHandler } from '../config.msg.utils'; +import { ConfigurationService } from '../config.service'; +import { ConfigurationEmailComponent } from './config-email.component'; +import { TranslateModule, TranslateService } from '@ngx-translate/core'; +import { CUSTOM_ELEMENTS_SCHEMA } from '@angular/core'; +import { FormsModule } from '@angular/forms'; + +describe('ConfigurationEmailComponent', () => { + let component: ConfigurationEmailComponent; + let fixture: ComponentFixture; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + imports: [ + TranslateModule.forRoot(), + FormsModule + ], + declarations: [ConfigurationEmailComponent], + providers: [ + { provide: MessageHandlerService, useValue: null }, + TranslateService, + { provide: ConfirmMessageHandler, useValue: null }, + { provide: ConfigurationService, useValue: null } + ], + schemas: [CUSTOM_ELEMENTS_SCHEMA] + }).compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(ConfigurationEmailComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/src/portal/src/app/config/email/config-email.component.ts b/src/portal/src/app/config/email/config-email.component.ts index 320e64d5a..427a5e5e6 100644 --- a/src/portal/src/app/config/email/config-email.component.ts +++ b/src/portal/src/app/config/email/config-email.component.ts @@ -31,7 +31,7 @@ export class ConfigurationEmailComponent implements OnChanges { private originalConfig: Configuration; testingMailOnGoing = false; onGoing = false; - @ViewChild("mailConfigFrom") mailForm: NgForm; + @ViewChild("mailConfigFrom", {static: true}) mailForm: NgForm; constructor( private msgHandler: MessageHandlerService, diff --git a/src/portal/src/app/dev-center/dev-center.component.spec.ts b/src/portal/src/app/dev-center/dev-center.component.spec.ts index 9878313eb..2a5ebc655 100644 --- a/src/portal/src/app/dev-center/dev-center.component.spec.ts +++ b/src/portal/src/app/dev-center/dev-center.component.spec.ts @@ -1,5 +1,6 @@ import { async, ComponentFixture, TestBed } from '@angular/core/testing'; - +import { HttpClientTestingModule } from '@angular/common/http/testing'; +import { TranslateModule, TranslateService } from '@ngx-translate/core'; import { DevCenterComponent } from './dev-center.component'; describe('DevCenterComponent', () => { @@ -8,9 +9,16 @@ describe('DevCenterComponent', () => { beforeEach(async(() => { TestBed.configureTestingModule({ - declarations: [ DevCenterComponent ] + declarations: [DevCenterComponent], + imports: [ + HttpClientTestingModule, + TranslateModule.forRoot() + ], + providers: [ + TranslateService + ], }) - .compileComponents(); + .compileComponents(); })); beforeEach(() => { diff --git a/src/portal/src/app/gc-page/gc-page.component.spec.ts b/src/portal/src/app/gc-page/gc-page.component.spec.ts index a5af4e8f1..6289e5c59 100644 --- a/src/portal/src/app/gc-page/gc-page.component.spec.ts +++ b/src/portal/src/app/gc-page/gc-page.component.spec.ts @@ -1,16 +1,34 @@ import { async, ComponentFixture, TestBed } from '@angular/core/testing'; - +import { ClarityModule } from '@clr/angular'; +import { CUSTOM_ELEMENTS_SCHEMA } from '@angular/core'; +import { TranslateModule, TranslateService } from '@ngx-translate/core'; +import { SessionService } from "../shared/session.service"; import { GcPageComponent } from './gc-page.component'; describe('GcPageComponent', () => { let component: GcPageComponent; let fixture: ComponentFixture; - + let fakeSessionService = { + getCurrentUser: function () { + return { has_admin_role: true }; + } + }; beforeEach(async(() => { TestBed.configureTestingModule({ - declarations: [ GcPageComponent ] + declarations: [GcPageComponent], + schemas: [ + CUSTOM_ELEMENTS_SCHEMA + ], + imports: [ + ClarityModule, + TranslateModule.forRoot() + ], + providers: [ + TranslateService, + { provide: SessionService, useValue: fakeSessionService } + ] }) - .compileComponents(); + .compileComponents(); })); beforeEach(() => { diff --git a/src/portal/src/app/global-message/message.component.spec.ts b/src/portal/src/app/global-message/message.component.spec.ts new file mode 100644 index 000000000..d665b4de8 --- /dev/null +++ b/src/portal/src/app/global-message/message.component.spec.ts @@ -0,0 +1,42 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; +import { Component, Input, OnInit, OnDestroy, ElementRef } from '@angular/core'; +import { Router } from '@angular/router'; +import { Subscription } from "rxjs"; +import { RouterTestingModule } from '@angular/router/testing'; +import { TranslateModule, TranslateService } from '@ngx-translate/core'; +import { ClarityModule } from "@clr/angular"; +import { Message } from './message'; +import { MessageService } from './message.service'; +import { MessageComponent } from './message.component'; + +describe('MessageComponent', () => { + let component: MessageComponent; + let fixture: ComponentFixture; + let fakeElementRef = null; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + imports: [ + ClarityModule, + RouterTestingModule, + TranslateModule.forRoot() + ], + declarations: [MessageComponent], + providers: [ + MessageService, + TranslateService, + {provide: ElementRef, useValue: fakeElementRef} + ] + }).compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(MessageComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/src/portal/src/app/global-message/message.service.spec.ts b/src/portal/src/app/global-message/message.service.spec.ts new file mode 100644 index 000000000..63ecfd8ff --- /dev/null +++ b/src/portal/src/app/global-message/message.service.spec.ts @@ -0,0 +1,15 @@ +import { TestBed, inject } from '@angular/core/testing'; + +import { MessageService } from './message.service'; + +describe('MessageService', () => { + beforeEach(() => { + TestBed.configureTestingModule({ + providers: [MessageService] + }); + }); + + it('should be created', inject([MessageService], (service: MessageService) => { + expect(service).toBeTruthy(); + })); +}); diff --git a/src/portal/src/app/group/add-group-modal/add-group-modal.component.html b/src/portal/src/app/group/add-group-modal/add-group-modal.component.html index b3dca6233..d4d0dd7e5 100644 --- a/src/portal/src/app/group/add-group-modal/add-group-modal.component.html +++ b/src/portal/src/app/group/add-group-modal/add-group-modal.component.html @@ -1,53 +1,35 @@ + \ No newline at end of file diff --git a/src/portal/src/app/group/add-group-modal/add-group-modal.component.scss b/src/portal/src/app/group/add-group-modal/add-group-modal.component.scss index e69de29bb..a28413c24 100644 --- a/src/portal/src/app/group/add-group-modal/add-group-modal.component.scss +++ b/src/portal/src/app/group/add-group-modal/add-group-modal.component.scss @@ -0,0 +1,3 @@ +.padding-left-6 { + padding-left: 6px; +} \ No newline at end of file diff --git a/src/portal/src/app/group/add-group-modal/add-group-modal.component.spec.ts b/src/portal/src/app/group/add-group-modal/add-group-modal.component.spec.ts index a26876f21..2979392fe 100644 --- a/src/portal/src/app/group/add-group-modal/add-group-modal.component.spec.ts +++ b/src/portal/src/app/group/add-group-modal/add-group-modal.component.spec.ts @@ -1,16 +1,57 @@ import { async, ComponentFixture, TestBed } from '@angular/core/testing'; - +import { ClarityModule } from '@clr/angular'; +import { TranslateModule, TranslateService } from '@ngx-translate/core'; +import { CUSTOM_ELEMENTS_SCHEMA, ChangeDetectorRef } from '@angular/core'; +import { FormsModule } from '@angular/forms'; +import { GroupService } from "../group.service"; +import { MessageHandlerService } from "./../../shared/message-handler/message-handler.service"; +import { SessionService } from "./../../shared/session.service"; +import { UserGroup } from "./../group"; +import { AppConfigService } from "../../app-config.service"; import { AddGroupModalComponent } from './add-group-modal.component'; describe('AddGroupModalComponent', () => { let component: AddGroupModalComponent; let fixture: ComponentFixture; + let fakeSessionService = { + getCurrentUser: function () { + return { has_admin_role: true }; + } + }; + let fakeGroupService = null; + let fakeAppConfigService = { + isLdapMode: function () { + return true; + }, + isHttpAuthMode: function () { + return false; + }, + isOidcMode: function () { + return false; + } + }; + let fakeMessageHandlerService = null; beforeEach(async(() => { TestBed.configureTestingModule({ - declarations: [ AddGroupModalComponent ] + declarations: [AddGroupModalComponent], + imports: [ + ClarityModule, + FormsModule, + TranslateModule.forRoot() + ], + schemas: [ + CUSTOM_ELEMENTS_SCHEMA + ], + providers: [ + ChangeDetectorRef, + { provide: MessageHandlerService, useValue: fakeMessageHandlerService }, + { provide: SessionService, useValue: fakeSessionService }, + { provide: AppConfigService, useValue: fakeAppConfigService }, + { provide: GroupService, useValue: fakeGroupService }, + ] }) - .compileComponents(); + .compileComponents(); })); beforeEach(() => { diff --git a/src/portal/src/app/group/add-group-modal/add-group-modal.component.ts b/src/portal/src/app/group/add-group-modal/add-group-modal.component.ts index 1095aa692..d2e0277f4 100644 --- a/src/portal/src/app/group/add-group-modal/add-group-modal.component.ts +++ b/src/portal/src/app/group/add-group-modal/add-group-modal.component.ts @@ -25,7 +25,7 @@ export class AddGroupModalComponent implements OnInit, OnDestroy { formChangeSubscription: Subscription; - @ViewChild('groupForm') + @ViewChild('groupForm', { static: true }) groupForm: NgForm; submitted = false; @@ -34,6 +34,7 @@ export class AddGroupModalComponent implements OnInit, OnDestroy { isLdapMode: boolean; isHttpAuthMode: boolean; + isOidcMode: boolean; constructor( private session: SessionService, private msgHandler: MessageHandlerService, @@ -49,21 +50,15 @@ export class AddGroupModalComponent implements OnInit, OnDestroy { if (this.appConfigService.isHttpAuthMode()) { this.isHttpAuthMode = true; } - this.group = new UserGroup(this.isLdapMode ? GroupType.LDAP_TYPE : GroupType.HTTP_TYPE); + if (this.appConfigService.isOidcMode()) { + this.isOidcMode = true; + } + this.group = new UserGroup(this.isLdapMode ? GroupType.LDAP_TYPE : this.isHttpAuthMode ? GroupType.HTTP_TYPE : GroupType.OIDC_TYPE); } ngOnDestroy() { } - public get isDNInvalid(): boolean { - let dnControl = this.groupForm.controls['ldap_group_dn']; - return dnControl && dnControl.invalid && (dnControl.dirty || dnControl.touched); - } - public get isNameInvalid(): boolean { - let dnControl = this.groupForm.controls['group_name']; - return dnControl && dnControl.invalid && (dnControl.dirty || dnControl.touched); - } - public get isFormValid(): boolean { return this.groupForm.valid; } @@ -121,7 +116,7 @@ export class AddGroupModalComponent implements OnInit, OnDestroy { } resetGroup() { - this.group = new UserGroup(this.isLdapMode ? GroupType.LDAP_TYPE : GroupType.HTTP_TYPE); + this.group = new UserGroup(this.isLdapMode ? GroupType.LDAP_TYPE : this.isHttpAuthMode ? GroupType.HTTP_TYPE : GroupType.OIDC_TYPE); this.groupForm.reset(); } } diff --git a/src/portal/src/app/group/group.component.spec.ts b/src/portal/src/app/group/group.component.spec.ts index 50601d077..a538ac169 100644 --- a/src/portal/src/app/group/group.component.spec.ts +++ b/src/portal/src/app/group/group.component.spec.ts @@ -1,16 +1,70 @@ import { async, ComponentFixture, TestBed } from '@angular/core/testing'; - import { GroupComponent } from './group.component'; +import { ClarityModule } from '@clr/angular'; +import { TranslateModule, TranslateService } from '@ngx-translate/core'; +import { FormsModule } from '@angular/forms'; +import { CUSTOM_ELEMENTS_SCHEMA } from '@angular/core'; +import { OperationService } from "@harbor/ui"; +import { SessionService } from "./../shared/session.service"; +import { GroupService } from "./group.service"; +import { of } from "rxjs"; +import { ConfirmationDialogService } from "./../shared/confirmation-dialog/confirmation-dialog.service"; +import { MessageHandlerService } from '../shared/message-handler/message-handler.service'; +import { AppConfigService } from '../app-config.service'; describe('GroupComponent', () => { let component: GroupComponent; let fixture: ComponentFixture; + let fakeMessageHandlerService = null; + let fakeOperationService = null; + let fakeGroupService = { + getUserGroups: function () { + return of([{ + group_name: '' + }, { + group_name: 'abc' + }]); + } + }; + let fakeConfirmationDialogService = { + confirmationConfirm$: of({ + state: 1, + source: 2 + }) + }; + let fakeSessionService = { + currentUser: { + has_admin_role: true + } + }; + let fakeAppConfigService = { + isLdapMode: function () { + return true; + } + }; beforeEach(async(() => { TestBed.configureTestingModule({ - declarations: [ GroupComponent ] + declarations: [GroupComponent], + imports: [ + ClarityModule, + FormsModule, + TranslateModule.forRoot() + ], + schemas: [ + CUSTOM_ELEMENTS_SCHEMA + ], + providers: [ + TranslateService, + { provide: MessageHandlerService, useValue: fakeMessageHandlerService }, + { provide: OperationService, useValue: fakeOperationService }, + { provide: GroupService, useValue: fakeGroupService }, + { provide: ConfirmationDialogService, useValue: fakeConfirmationDialogService }, + { provide: SessionService, useValue: fakeSessionService }, + { provide: AppConfigService, useValue: fakeAppConfigService } + ] }) - .compileComponents(); + .compileComponents(); })); beforeEach(() => { diff --git a/src/portal/src/app/group/group.component.ts b/src/portal/src/app/group/group.component.ts index 91d2ddc33..80bb4b99e 100644 --- a/src/portal/src/app/group/group.component.ts +++ b/src/portal/src/app/group/group.component.ts @@ -39,7 +39,7 @@ export class GroupComponent implements OnInit, OnDestroy { batchInfos = new Map(); isLdapMode: boolean; - @ViewChild(AddGroupModalComponent) newGroupModal: AddGroupModalComponent; + @ViewChild(AddGroupModalComponent, {static: false}) newGroupModal: AddGroupModalComponent; constructor( private operationService: OperationService, @@ -161,6 +161,8 @@ export class GroupComponent implements OnInit, OnDestroy { return 'GROUP.LDAP_TYPE'; } else if (type === GroupType.HTTP_TYPE) { return 'GROUP.HTTP_TYPE'; + } else if (type === GroupType.OIDC_TYPE) { + return 'GROUP.OIDC_TYPE'; } else { return 'UNKNOWN'; } diff --git a/src/portal/src/app/group/group.service.spec.ts b/src/portal/src/app/group/group.service.spec.ts index 21236835b..d609acc67 100644 --- a/src/portal/src/app/group/group.service.spec.ts +++ b/src/portal/src/app/group/group.service.spec.ts @@ -1,11 +1,14 @@ import { TestBed, inject } from '@angular/core/testing'; - +import { HttpClientTestingModule } from '@angular/common/http/testing'; import { GroupService } from './group.service'; describe('GroupService', () => { beforeEach(() => { TestBed.configureTestingModule({ - providers: [GroupService] + providers: [GroupService], + imports: [ + HttpClientTestingModule + ] }); }); diff --git a/src/portal/src/app/harbor-routing.module.ts b/src/portal/src/app/harbor-routing.module.ts index 662b51fe9..df4180392 100644 --- a/src/portal/src/app/harbor-routing.module.ts +++ b/src/portal/src/app/harbor-routing.module.ts @@ -1,13 +1,13 @@ // Copyright Project Harbor Authors // -// Licensed under the Apache License, Version 2.0 (the "License"); +// Licensed under the Apache License, Version 2.0 (the 'License'); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, +// distributed under the License is distributed on an 'AS IS' BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. @@ -18,6 +18,7 @@ import { SystemAdminGuard } from './shared/route/system-admin-activate.service'; import { AuthCheckGuard } from './shared/route/auth-user-activate.service'; import { SignInGuard } from './shared/route/sign-in-guard-activate.service'; import { MemberGuard } from './shared/route/member-guard-activate.service'; +import { MemberPermissionGuard } from './shared/route/member-permission-guard-activate.service'; import { OidcGuard } from './shared/route/oidc-guard-active.service'; import { PageNotFoundComponent } from './shared/not-found/not-found.component'; @@ -50,7 +51,7 @@ import { ProjectDetailComponent } from './project/project-detail/project-detail. import { MemberComponent } from './project/member/member.component'; import { RobotAccountComponent } from './project/robot-account/robot-account.component'; import { WebhookComponent } from './project/webhook/webhook.component'; -import { ProjectLabelComponent } from "./project/project-label/project-label.component"; +import { ProjectLabelComponent } from './project/project-label/project-label.component'; import { ProjectConfigComponent } from './project/project-config/project-config.component'; import { ProjectRoutingResolver } from './project/project-routing-resolver.service'; import { ListChartsComponent } from './project/helm-chart/list-charts.component'; @@ -59,8 +60,8 @@ import { HelmChartDetailComponent } from './project/helm-chart/helm-chart-detail import { OidcOnboardComponent } from './oidc-onboard/oidc-onboard.component'; import { LicenseComponent } from './license/license.component'; import { SummaryComponent } from './project/summary/summary.component'; -import { TagRetentionComponent } from "./project/tag-retention/tag-retention.component"; - +import { TagRetentionComponent } from './project/tag-retention/tag-retention.component'; +import { USERSTATICPERMISSION } from '@harbor/ui'; const harborRoutes: Routes = [ { path: '', redirectTo: 'harbor', pathMatch: 'full' }, @@ -81,7 +82,7 @@ const harborRoutes: Routes = [ { path: 'harbor/sign-in', component: SignInComponent, - canActivate: [ SignInGuard] + canActivate: [SignInGuard] }, { path: 'harbor', @@ -117,13 +118,13 @@ const harborRoutes: Routes = [ path: 'replications', component: TotalReplicationPageComponent, canActivate: [SystemAdminGuard], - canActivateChild: [SystemAdminGuard], + canActivateChild: [SystemAdminGuard] }, { path: 'replications/:id/:tasks', component: ReplicationTasksPageComponent, canActivate: [SystemAdminGuard], - canActivateChild: [SystemAdminGuard], + canActivateChild: [SystemAdminGuard] }, { path: 'tags/:id/:repo', @@ -148,7 +149,7 @@ const harborRoutes: Routes = [ canActivate: [MemberGuard], resolve: { projectResolver: ProjectRoutingResolver - }, + } }, { path: 'projects/:id/helm-charts/:chart/versions', @@ -156,7 +157,7 @@ const harborRoutes: Routes = [ canActivate: [MemberGuard], resolve: { projectResolver: ProjectRoutingResolver - }, + } }, { path: 'projects/:id/helm-charts/:chart/versions/:version', @@ -164,60 +165,127 @@ const harborRoutes: Routes = [ canActivate: [MemberGuard], resolve: { projectResolver: ProjectRoutingResolver - }, + } }, { path: 'projects/:id', component: ProjectDetailComponent, canActivate: [MemberGuard], + canActivateChild: [MemberPermissionGuard], resolve: { projectResolver: ProjectRoutingResolver }, children: [ { path: 'summary', + data: { + permissionParam: { + resource: USERSTATICPERMISSION.PROJECT.KEY, + action: USERSTATICPERMISSION.PROJECT.VALUE.READ + } + }, component: SummaryComponent }, { path: 'repositories', - component: RepositoryPageComponent + data: { + permissionParam: { + resource: USERSTATICPERMISSION.REPOSITORY.KEY, + action: USERSTATICPERMISSION.REPOSITORY.VALUE.LIST + } + }, + component: RepositoryPageComponent, }, { path: 'helm-charts', + data: { + permissionParam: { + resource: USERSTATICPERMISSION.HELM_CHART.KEY, + action: USERSTATICPERMISSION.HELM_CHART.VALUE.LIST + } + }, component: ListChartsComponent }, { path: 'repositories/:repo/tags', - component: TagRepositoryComponent, + data: { + permissionParam: { + resource: USERSTATICPERMISSION.REPOSITORY.KEY, + action: USERSTATICPERMISSION.REPOSITORY.VALUE.LIST + } + }, + component: TagRepositoryComponent }, { path: 'members', + data: { + permissionParam: { + resource: USERSTATICPERMISSION.MEMBER.KEY, + action: USERSTATICPERMISSION.MEMBER.VALUE.LIST + } + }, component: MemberComponent }, { path: 'logs', + data: { + permissionParam: { + resource: USERSTATICPERMISSION.LOG.KEY, + action: USERSTATICPERMISSION.LOG.VALUE.LIST + } + }, component: AuditLogComponent }, { path: 'labels', + data: { + permissionParam: { + resource: USERSTATICPERMISSION.LABEL.KEY, + action: USERSTATICPERMISSION.LABEL.VALUE.CREATE + } + }, component: ProjectLabelComponent }, { path: 'configs', + data: { + permissionParam: { + resource: USERSTATICPERMISSION.CONFIGURATION.KEY, + action: USERSTATICPERMISSION.CONFIGURATION.VALUE.READ + } + }, component: ProjectConfigComponent }, { path: 'robot-account', + data: { + permissionParam: { + resource: USERSTATICPERMISSION.ROBOT.KEY, + action: USERSTATICPERMISSION.ROBOT.VALUE.LIST + } + }, component: RobotAccountComponent }, { path: 'tag-retention', + data: { + permissionParam: { + resource: USERSTATICPERMISSION.TAG_RETENTION.KEY, + action: USERSTATICPERMISSION.TAG_RETENTION.VALUE.READ + } + }, component: TagRetentionComponent }, { path: 'webhook', + data: { + permissionParam: { + resource: USERSTATICPERMISSION.WEBHOOK.KEY, + action: USERSTATICPERMISSION.WEBHOOK.VALUE.LIST + } + }, component: WebhookComponent - }, + } ] }, { @@ -239,19 +307,17 @@ const harborRoutes: Routes = [ path: 'registry', component: DestinationPageComponent, canActivate: [SystemAdminGuard], - canActivateChild: [SystemAdminGuard], + canActivateChild: [SystemAdminGuard] } ] }, - { path: "**", component: PageNotFoundComponent } + { path: '**', component: PageNotFoundComponent } ]; @NgModule({ imports: [ - RouterModule.forRoot(harborRoutes, {onSameUrlNavigation: 'reload'}) + RouterModule.forRoot(harborRoutes, { onSameUrlNavigation: 'reload' }) ], exports: [RouterModule] }) -export class HarborRoutingModule { - -} +export class HarborRoutingModule {} diff --git a/src/portal/src/app/license/license.component.spec.ts b/src/portal/src/app/license/license.component.spec.ts index f1d41ee71..4ac06bfc8 100644 --- a/src/portal/src/app/license/license.component.spec.ts +++ b/src/portal/src/app/license/license.component.spec.ts @@ -1,5 +1,5 @@ import { async, ComponentFixture, TestBed } from '@angular/core/testing'; - +import { HttpClientTestingModule } from '@angular/common/http/testing'; import { LicenseComponent } from './license.component'; describe('LicenseComponent', () => { @@ -8,9 +8,12 @@ describe('LicenseComponent', () => { beforeEach(async(() => { TestBed.configureTestingModule({ - declarations: [ LicenseComponent ] + declarations: [LicenseComponent], + imports: [ + HttpClientTestingModule + ] }) - .compileComponents(); + .compileComponents(); })); beforeEach(() => { diff --git a/src/portal/src/app/log/audit-log.component.html b/src/portal/src/app/log/audit-log.component.html index f891de38f..fdcb7e7f5 100644 --- a/src/portal/src/app/log/audit-log.component.html +++ b/src/portal/src/app/log/audit-log.component.html @@ -30,7 +30,7 @@
- + {{'AUDIT_LOG.USERNAME' | translate}} {{'AUDIT_LOG.REPOSITORY_NAME' | translate}} {{'AUDIT_LOG.TAGS' | translate}} diff --git a/src/portal/src/app/log/audit-log.component.spec.ts b/src/portal/src/app/log/audit-log.component.spec.ts new file mode 100644 index 000000000..bbb75581e --- /dev/null +++ b/src/portal/src/app/log/audit-log.component.spec.ts @@ -0,0 +1,30 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; +import { TranslateModule, TranslateService } from '@ngx-translate/core'; +import { AuditLogComponent } from './audit-log.component'; + +xdescribe('AuditLogComponent', () => { + let component: AuditLogComponent; + let fixture: ComponentFixture; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + imports: [ + TranslateModule + ], + declarations: [AuditLogComponent], + providers: [ + TranslateService + ] + }).compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(AuditLogComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/src/portal/src/app/log/audit-log.component.ts b/src/portal/src/app/log/audit-log.component.ts index b7b411813..a0ce24d60 100644 --- a/src/portal/src/app/log/audit-log.component.ts +++ b/src/portal/src/app/log/audit-log.component.ts @@ -109,12 +109,9 @@ export class AuditLogComponent implements OnInit { ); } - retrievePage(state: State) { - if (state && state.page) { - this.queryParam.page = Math.ceil((state.page.to + 1) / this.pageSize); - this.currentPage = this.queryParam.page; - this.retrieve(); - } + retrievePage() { + this.queryParam.page = this.currentPage; + this.retrieve(); } doSearchAuditLogs(searchUsername: string): void { diff --git a/src/portal/src/app/log/audit-log.service.spec.ts b/src/portal/src/app/log/audit-log.service.spec.ts new file mode 100644 index 000000000..a4ed2a5d6 --- /dev/null +++ b/src/portal/src/app/log/audit-log.service.spec.ts @@ -0,0 +1,18 @@ +import { TestBed, inject } from '@angular/core/testing'; +import { HttpClientTestingModule } from '@angular/common/http/testing'; +import { AuditLogService } from './audit-log.service'; + +describe('AuditLogService', () => { + beforeEach(() => { + TestBed.configureTestingModule({ + imports: [ + HttpClientTestingModule + ], + providers: [AuditLogService] + }); + }); + + it('should be created', inject([AuditLogService], (service: AuditLogService) => { + expect(service).toBeTruthy(); + })); +}); diff --git a/src/portal/src/app/log/log-page.component.spec.ts b/src/portal/src/app/log/log-page.component.spec.ts new file mode 100644 index 000000000..abbe7f368 --- /dev/null +++ b/src/portal/src/app/log/log-page.component.spec.ts @@ -0,0 +1,29 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; +import { LogPageComponent } from './log-page.component'; +import { CUSTOM_ELEMENTS_SCHEMA } from '@angular/core'; + +describe('LogPageComponent', () => { + let component: LogPageComponent; + let fixture: ComponentFixture; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + imports: [ + ], + declarations: [LogPageComponent], + providers: [ + ], + schemas: [CUSTOM_ELEMENTS_SCHEMA] + }).compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(LogPageComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/src/portal/src/app/oidc-onboard/oidc-onboard.component.spec.ts b/src/portal/src/app/oidc-onboard/oidc-onboard.component.spec.ts index b19288a4f..bd9f55823 100644 --- a/src/portal/src/app/oidc-onboard/oidc-onboard.component.spec.ts +++ b/src/portal/src/app/oidc-onboard/oidc-onboard.component.spec.ts @@ -1,16 +1,48 @@ import { async, ComponentFixture, TestBed } from '@angular/core/testing'; - +import { ClarityModule } from '@clr/angular'; +import { CUSTOM_ELEMENTS_SCHEMA } from '@angular/core'; +import { OidcOnboardService } from './oidc-onboard.service'; +import { FormsModule, ReactiveFormsModule } from '@angular/forms'; +import { TranslateModule, TranslateService } from '@ngx-translate/core'; +import { Router, ActivatedRoute } from '@angular/router'; +import { of } from 'rxjs'; import { OidcOnboardComponent } from './oidc-onboard.component'; describe('OidcOnboardComponent', () => { let component: OidcOnboardComponent; let fixture: ComponentFixture; + let fakeOidcOnboardService = null; + let fakeRouter = null; beforeEach(async(() => { TestBed.configureTestingModule({ - declarations: [ OidcOnboardComponent ] - }) - .compileComponents(); + declarations: [OidcOnboardComponent], + schemas: [ + CUSTOM_ELEMENTS_SCHEMA + ], + imports: [ + ClarityModule, + FormsModule, + ReactiveFormsModule, + TranslateModule.forRoot() + ], + providers: [ + TranslateService, + { provide: OidcOnboardService, useValue: fakeOidcOnboardService }, + { provide: Router, useValue: fakeRouter }, + { + provide: ActivatedRoute, useValue: { + queryParams: of({ + view: 'abc', + objectId: 'ddd', + actionUid: 'ddd', + targets: '', + locale: '' + }) + } + } + ] + }).compileComponents(); })); beforeEach(() => { diff --git a/src/portal/src/app/oidc-onboard/oidc-onboard.service.spec.ts b/src/portal/src/app/oidc-onboard/oidc-onboard.service.spec.ts index da7e638b4..0148c4f83 100644 --- a/src/portal/src/app/oidc-onboard/oidc-onboard.service.spec.ts +++ b/src/portal/src/app/oidc-onboard/oidc-onboard.service.spec.ts @@ -1,9 +1,17 @@ import { TestBed } from '@angular/core/testing'; - +import { HttpClient } from '@angular/common/http'; +import { HttpClientTestingModule } from '@angular/common/http/testing'; import { OidcOnboardService } from './oidc-onboard.service'; describe('OidcOnboardService', () => { - beforeEach(() => TestBed.configureTestingModule({})); + beforeEach(() => TestBed.configureTestingModule({ + imports: [ + HttpClientTestingModule + ], + providers: [ + OidcOnboardService + ] + })); it('should be created', () => { const service: OidcOnboardService = TestBed.get(OidcOnboardService); diff --git a/src/portal/src/app/project/create-project/create-project.component.html b/src/portal/src/app/project/create-project/create-project.component.html index 1d9c8e068..b82fcb4a6 100644 --- a/src/portal/src/app/project/create-project/create-project.component.html +++ b/src/portal/src/app/project/create-project/create-project.component.html @@ -1,89 +1,83 @@ - +
- \ No newline at end of file + diff --git a/src/portal/src/app/project/create-project/create-project.component.spec.ts b/src/portal/src/app/project/create-project/create-project.component.spec.ts new file mode 100644 index 000000000..5b6b8001a --- /dev/null +++ b/src/portal/src/app/project/create-project/create-project.component.spec.ts @@ -0,0 +1,30 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; +import { TranslateModule, TranslateService } from '@ngx-translate/core'; +import { CreateProjectComponent } from './create-project.component'; + +xdescribe('CreateProjectComponent', () => { + let component: CreateProjectComponent; + let fixture: ComponentFixture; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + imports: [ + TranslateModule.forRoot() + ], + declarations: [CreateProjectComponent], + providers: [ + TranslateService + ] + }).compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(CreateProjectComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/src/portal/src/app/project/create-project/create-project.component.ts b/src/portal/src/app/project/create-project/create-project.component.ts index aa662176a..5fd36989a 100644 --- a/src/portal/src/app/project/create-project/create-project.component.ts +++ b/src/portal/src/app/project/create-project/create-project.component.ts @@ -34,8 +34,7 @@ import { InlineAlertComponent } from "../../shared/inline-alert/inline-alert.com import { Project } from "../project"; import { ProjectService, QuotaUnits, QuotaHardInterface, QuotaUnlimited, getByte - , GetIntegerAndUnit, clone, StorageMultipleConstant, validateLimit, validateCountLimit} from "@harbor/ui"; -import { errorHandler } from '@angular/platform-browser/src/browser'; + , GetIntegerAndUnit, clone, validateLimit, validateCountLimit} from "@harbor/ui"; @Component({ selector: "create-project", @@ -46,7 +45,7 @@ export class CreateProjectComponent implements OnInit, OnChanges, OnDestroy { projectForm: NgForm; - @ViewChild("projectForm") + @ViewChild("projectForm", {static: true}) currentForm: NgForm; quotaUnits = QuotaUnits; project: Project = new Project(); @@ -74,7 +73,7 @@ export class CreateProjectComponent implements OnInit, OnChanges, OnDestroy { @Output() create = new EventEmitter(); @Input() quotaObj: QuotaHardInterface; @Input() isSystemAdmin: boolean; - @ViewChild(InlineAlertComponent) + @ViewChild(InlineAlertComponent, {static: true}) inlineAlert: InlineAlertComponent; constructor(private projectService: ProjectService, @@ -90,6 +89,7 @@ export class CreateProjectComponent implements OnInit, OnChanges, OnDestroy { this.isNameValid = cont.valid; if (this.isNameValid) { // Check exiting from backend + this.checkOnGoing = true; this.projectService .checkProjectExists(cont.value) .subscribe(() => { diff --git a/src/portal/src/app/project/create-project/create-project.scss b/src/portal/src/app/project/create-project/create-project.scss index ec7820ed9..c8471ed93 100644 --- a/src/portal/src/app/project/create-project/create-project.scss +++ b/src/portal/src/app/project/create-project/create-project.scss @@ -1,47 +1,18 @@ -.form-group-label-override { - font-size: 14px; - font-weight: 400; + +.mr-10 { + margin-right:0.5rem; } -.access-level-label { - font-size: 14px; - font-weight: 400; - margin-left: -4px; - margin-right: 12px; - top: -6px; - position: relative; -} -.modal-height { - height: 15.3em; - overflow-y: hidden; - .form-block > div { - padding-left: 135px; - .input-width { - width: 196px; - } - .public-tooltip { - top: -8px; - left: -8px; - } - .inline-help-public { - margin-left: 5px; - } - } - +.display-flex { + display: flex; + align-items:center; } -.form-group { - ::ng-deep { - clr-select-container { - margin-top: 0.3rem; - } - } - select { - display: inline; - } - .checkbox-inline { - margin-left: 5px; - height: 1rem; - } +.pos-inherit { + position: inherit; } - +.clr-error { + .clr-select-wrapper::after { + right: 0.25rem !important; + } +} \ No newline at end of file diff --git a/src/portal/src/app/project/helm-chart/helm-chart-detail/chart-detail.component.spec.ts b/src/portal/src/app/project/helm-chart/helm-chart-detail/chart-detail.component.spec.ts index d9bd3306c..3adc62fbb 100644 --- a/src/portal/src/app/project/helm-chart/helm-chart-detail/chart-detail.component.spec.ts +++ b/src/portal/src/app/project/helm-chart/helm-chart-detail/chart-detail.component.spec.ts @@ -1,16 +1,51 @@ import { async, ComponentFixture, TestBed } from '@angular/core/testing'; - +import { TranslateModule, TranslateService } from '@ngx-translate/core'; +import { CUSTOM_ELEMENTS_SCHEMA } from '@angular/core'; +import { ClarityModule } from '@clr/angular'; +import { ActivatedRoute, Router } from "@angular/router"; +import { SessionService } from './../../../shared/session.service'; +import { of } from 'rxjs'; import { HelmChartDetailComponent } from './chart-detail.component'; describe('ChartDetailComponent', () => { let component: HelmChartDetailComponent; let fixture: ComponentFixture; + let fakeRouter = null; + let fakeSessionService = { + getCurrentUser: function () { + return { has_admin_role: true }; + } + }; beforeEach(async(() => { TestBed.configureTestingModule({ - declarations: [ HelmChartDetailComponent ] - }) - .compileComponents(); + declarations: [HelmChartDetailComponent], + schemas: [ + CUSTOM_ELEMENTS_SCHEMA + ], + imports: [ + ClarityModule, + TranslateModule.forRoot() + ], + providers: [ + { + provide: ActivatedRoute, useValue: { + paramMap: of({ get: (key) => 'value' }), + snapshot: { + params: { id: 1, chart: 'chart', version: 1.0 }, + data: { + projectResolver: { + role_name: 'admin' + } + } + } + } + }, + { provide: Router, useValue: fakeRouter }, + { provide: SessionService, useValue: fakeSessionService }, + TranslateService + ] + }).compileComponents(); })); beforeEach(() => { diff --git a/src/portal/src/app/project/helm-chart/helm-chart-detail/chart-detail/chart-detail-dependency.component.spec.ts b/src/portal/src/app/project/helm-chart/helm-chart-detail/chart-detail/chart-detail-dependency.component.spec.ts new file mode 100644 index 000000000..72567ab3d --- /dev/null +++ b/src/portal/src/app/project/helm-chart/helm-chart-detail/chart-detail/chart-detail-dependency.component.spec.ts @@ -0,0 +1,30 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; +import { TranslateModule, TranslateService } from '@ngx-translate/core'; +import { ChartDetailDependencyComponent } from './chart-detail-dependency.component'; + +xdescribe('ChartDetailDependencyComponent', () => { + let component: ChartDetailDependencyComponent; + let fixture: ComponentFixture; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + imports: [ + TranslateModule.forRoot() + ], + declarations: [ChartDetailDependencyComponent], + providers: [ + TranslateService + ] + }).compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(ChartDetailDependencyComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/src/portal/src/app/project/helm-chart/helm-chart-detail/chart-detail/chart-detail-summary.component.spec.ts b/src/portal/src/app/project/helm-chart/helm-chart-detail/chart-detail/chart-detail-summary.component.spec.ts new file mode 100644 index 000000000..e56a347eb --- /dev/null +++ b/src/portal/src/app/project/helm-chart/helm-chart-detail/chart-detail/chart-detail-summary.component.spec.ts @@ -0,0 +1,30 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; +import { TranslateModule, TranslateService } from '@ngx-translate/core'; +import { ChartDetailSummaryComponent } from './chart-detail-summary.component'; + +xdescribe('ChartDetailSummaryComponent', () => { + let component: ChartDetailSummaryComponent; + let fixture: ComponentFixture; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + imports: [ + TranslateModule.forRoot() + ], + declarations: [ChartDetailSummaryComponent], + providers: [ + TranslateService + ] + }).compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(ChartDetailSummaryComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/src/portal/src/app/project/helm-chart/helm-chart-detail/chart-detail/chart-detail-value.component.spec.ts b/src/portal/src/app/project/helm-chart/helm-chart-detail/chart-detail/chart-detail-value.component.spec.ts new file mode 100644 index 000000000..a8cc4a6b0 --- /dev/null +++ b/src/portal/src/app/project/helm-chart/helm-chart-detail/chart-detail/chart-detail-value.component.spec.ts @@ -0,0 +1,30 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; +import { TranslateModule, TranslateService } from '@ngx-translate/core'; +import { ChartDetailValueComponent } from './chart-detail-value.component'; + +xdescribe('ChartDetailValueComponent', () => { + let component: ChartDetailValueComponent; + let fixture: ComponentFixture; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + imports: [ + TranslateModule.forRoot() + ], + declarations: [ChartDetailValueComponent], + providers: [ + TranslateService + ] + }).compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(ChartDetailValueComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/src/portal/src/app/project/helm-chart/helm-chart-detail/chart-detail/chart-detail.component.spec.ts b/src/portal/src/app/project/helm-chart/helm-chart-detail/chart-detail/chart-detail.component.spec.ts new file mode 100644 index 000000000..85ff914bf --- /dev/null +++ b/src/portal/src/app/project/helm-chart/helm-chart-detail/chart-detail/chart-detail.component.spec.ts @@ -0,0 +1,30 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; +import { TranslateModule, TranslateService } from '@ngx-translate/core'; +import { ChartDetailComponent } from './chart-detail.component'; + +xdescribe('ChartDetailComponent', () => { + let component: ChartDetailComponent; + let fixture: ComponentFixture; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + imports: [ + TranslateModule.forRoot() + ], + declarations: [ChartDetailComponent], + providers: [ + TranslateService + ] + }).compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(ChartDetailComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/src/portal/src/app/project/helm-chart/helm-chart-detail/chart-detail/chart-detail.component.ts b/src/portal/src/app/project/helm-chart/helm-chart-detail/chart-detail/chart-detail.component.ts index 52f2c0d4f..e3aeb300d 100644 --- a/src/portal/src/app/project/helm-chart/helm-chart-detail/chart-detail/chart-detail.component.ts +++ b/src/portal/src/app/project/helm-chart/helm-chart-detail/chart-detail/chart-detail.component.ts @@ -44,12 +44,16 @@ export class ChartDetailComponent implements OnInit { ngOnInit(): void { this.systemInfoService.getSystemInfo() .subscribe(systemInfo => { - let scheme = 'http://'; this.systemInfo = systemInfo; - if (this.systemInfo.has_ca_root) { - scheme = 'https://'; + if (this.systemInfo.external_url) { + this.repoURL = `${this.systemInfo.external_url}`; + } else { + let scheme = 'http://'; + if (this.systemInfo.has_ca_root) { + scheme = 'https://'; + } + this.repoURL = `${scheme}${this.systemInfo.registry_url}`; } - this.repoURL = `${scheme}${this.systemInfo.registry_url}`; }, error => this.errorHandler.error(error)); this.refresh(); } diff --git a/src/portal/src/app/project/helm-chart/helm-chart.service.spec.ts b/src/portal/src/app/project/helm-chart/helm-chart.service.spec.ts new file mode 100644 index 000000000..3bf486f52 --- /dev/null +++ b/src/portal/src/app/project/helm-chart/helm-chart.service.spec.ts @@ -0,0 +1,15 @@ +import { TestBed, inject } from '@angular/core/testing'; + +import { HelmChartService } from './helm-chart.service'; + +describe('HelmChartService', () => { + beforeEach(() => { + TestBed.configureTestingModule({ + providers: [HelmChartService] + }); + }); + + it('should be created', inject([HelmChartService], (service: HelmChartService) => { + expect(service).toBeTruthy(); + })); +}); diff --git a/src/portal/src/app/project/helm-chart/label-filter/label-filter.component.spec.ts b/src/portal/src/app/project/helm-chart/label-filter/label-filter.component.spec.ts new file mode 100644 index 000000000..eb1ec2078 --- /dev/null +++ b/src/portal/src/app/project/helm-chart/label-filter/label-filter.component.spec.ts @@ -0,0 +1,30 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; +import { TranslateModule, TranslateService } from '@ngx-translate/core'; +import { LabelFilterComponent } from './label-filter.component'; + +xdescribe('LabelFilterComponent', () => { + let component: LabelFilterComponent; + let fixture: ComponentFixture; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + imports: [ + TranslateModule.forRoot() + ], + declarations: [LabelFilterComponent], + providers: [ + TranslateService + ] + }).compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(LabelFilterComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/src/portal/src/app/project/helm-chart/label-filter/label-filter.component.ts b/src/portal/src/app/project/helm-chart/label-filter/label-filter.component.ts index 28adf7309..f3b79e233 100644 --- a/src/portal/src/app/project/helm-chart/label-filter/label-filter.component.ts +++ b/src/portal/src/app/project/helm-chart/label-filter/label-filter.component.ts @@ -17,7 +17,7 @@ export class LabelFilterComponent implements ClrDatagridFilterInterface, On @Input() labels: Label[] = []; @Input() resourceType: ResourceType; - @ViewChild('filterInput') filterInputRef: ElementRef; + @ViewChild('filterInput', {static: false}) filterInputRef: ElementRef; selectedLabels: Map = new Map(); diff --git a/src/portal/src/app/project/helm-chart/label-marker/label-marker.component.spec.ts b/src/portal/src/app/project/helm-chart/label-marker/label-marker.component.spec.ts new file mode 100644 index 000000000..dc345de2f --- /dev/null +++ b/src/portal/src/app/project/helm-chart/label-marker/label-marker.component.spec.ts @@ -0,0 +1,31 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; +import { TranslateModule, TranslateService } from '@ngx-translate/core'; +import { LabelMarkerComponent } from './label-marker.component'; + +xdescribe('LabelMarkerComponent', () => { + let component: LabelMarkerComponent; + let fixture: ComponentFixture; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + imports: [ + TranslateModule.forRoot() + ], + declarations: [LabelMarkerComponent], + providers: [ + TranslateService + ] + }) + .compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(LabelMarkerComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/src/portal/src/app/project/helm-chart/label-marker/label-marker.component.ts b/src/portal/src/app/project/helm-chart/label-marker/label-marker.component.ts index d97ff3fb2..107f96826 100644 --- a/src/portal/src/app/project/helm-chart/label-marker/label-marker.component.ts +++ b/src/portal/src/app/project/helm-chart/label-marker/label-marker.component.ts @@ -30,7 +30,7 @@ export class LabelMarkerComponent implements OnInit { labelChangeDebouncer: Subject = new Subject(); - @ViewChild('filterInput') filterInputRef: ElementRef; + @ViewChild('filterInput', {static: false}) filterInputRef: ElementRef; ngOnInit(): void { this.sortedLabels = this.labels; diff --git a/src/portal/src/app/project/helm-chart/list-chart-versions/helm-chart-versions-detail/helm-chart-version.component.spec.ts b/src/portal/src/app/project/helm-chart/list-chart-versions/helm-chart-versions-detail/helm-chart-version.component.spec.ts new file mode 100644 index 000000000..bee621a70 --- /dev/null +++ b/src/portal/src/app/project/helm-chart/list-chart-versions/helm-chart-versions-detail/helm-chart-version.component.spec.ts @@ -0,0 +1,31 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; +import { TranslateModule, TranslateService } from '@ngx-translate/core'; +import { ChartVersionComponent } from './helm-chart-version.component'; + +xdescribe('ChartVersionComponent', () => { + let component: ChartVersionComponent; + let fixture: ComponentFixture; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + imports: [ + TranslateModule.forRoot() + ], + declarations: [ChartVersionComponent], + providers: [ + TranslateService + ] + }) + .compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(ChartVersionComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/src/portal/src/app/project/helm-chart/list-chart-versions/helm-chart-versions-detail/helm-chart-version.component.ts b/src/portal/src/app/project/helm-chart/list-chart-versions/helm-chart-versions-detail/helm-chart-version.component.ts index 042d9946f..947be10e6 100644 --- a/src/portal/src/app/project/helm-chart/list-chart-versions/helm-chart-versions-detail/helm-chart-version.component.ts +++ b/src/portal/src/app/project/helm-chart/list-chart-versions/helm-chart-versions-detail/helm-chart-version.component.ts @@ -82,7 +82,7 @@ export class ChartVersionComponent implements OnInit { addLabelHeaders = 'HELM_CHART.ADD_LABEL_TO_CHART_VERSION'; - @ViewChild("confirmationDialog") + @ViewChild("confirmationDialog", {static: false}) confirmationDialog: ConfirmationDialogComponent; hasAddRemoveHelmChartVersionPermission: boolean; hasDownloadHelmChartVersionPermission: boolean; diff --git a/src/portal/src/app/project/helm-chart/list-chart-versions/list-chart-versions.component.spec.ts b/src/portal/src/app/project/helm-chart/list-chart-versions/list-chart-versions.component.spec.ts index 13370331f..56a1dcad6 100644 --- a/src/portal/src/app/project/helm-chart/list-chart-versions/list-chart-versions.component.spec.ts +++ b/src/portal/src/app/project/helm-chart/list-chart-versions/list-chart-versions.component.spec.ts @@ -1,16 +1,49 @@ import { async, ComponentFixture, TestBed } from '@angular/core/testing'; - +import { ClarityModule } from '@clr/angular'; +import { CUSTOM_ELEMENTS_SCHEMA } from '@angular/core'; +import { TranslateModule, TranslateService } from '@ngx-translate/core'; +import { ActivatedRoute } from '@angular/router'; +import { Router } from '@angular/router'; +import { of } from 'rxjs'; +import { SessionService } from './../../../shared/session.service'; import { ListChartVersionsComponent } from './list-chart-versions.component'; describe('ListChartVersionsComponent', () => { let component: ListChartVersionsComponent; let fixture: ComponentFixture; + let fakeSessionService = { + getCurrentUser: function () { + return "admin"; + } + }; beforeEach(async(() => { TestBed.configureTestingModule({ - declarations: [ ListChartVersionsComponent ] + declarations: [ListChartVersionsComponent], + imports: [ + ClarityModule, + TranslateModule.forRoot() + ], + schemas: [ + CUSTOM_ELEMENTS_SCHEMA + ], + providers: [ + TranslateService, + { + provide: ActivatedRoute, useValue: { + snapshot: { + params: { + id: 1, + chart: 'chart' + } + } + } + }, + { provide: Router, useValue: null }, + { provide: SessionService, useValue: fakeSessionService } + ] }) - .compileComponents(); + .compileComponents(); })); beforeEach(() => { diff --git a/src/portal/src/app/project/helm-chart/list-charts-detail/helm-chart.component.html b/src/portal/src/app/project/helm-chart/list-charts-detail/helm-chart.component.html index a9d1b75c1..e097b23c3 100644 --- a/src/portal/src/app/project/helm-chart/list-charts-detail/helm-chart.component.html +++ b/src/portal/src/app/project/helm-chart/list-charts-detail/helm-chart.component.html @@ -3,8 +3,8 @@
- + @@ -23,14 +23,16 @@
- - @@ -42,8 +44,7 @@ - + {{ chart.name }} @@ -75,14 +76,13 @@
{{item.total_versions}} - - + +
- {{getStatusString(item) - | translate}} + {{getStatusString(item) | translate}}
@@ -97,37 +97,39 @@ +
\ No newline at end of file diff --git a/src/portal/src/app/project/helm-chart/list-charts-detail/helm-chart.component.scss b/src/portal/src/app/project/helm-chart/list-charts-detail/helm-chart.component.scss index 51acbefa8..68deb5343 100644 --- a/src/portal/src/app/project/helm-chart/list-charts-detail/helm-chart.component.scss +++ b/src/portal/src/app/project/helm-chart/list-charts-detail/helm-chart.component.scss @@ -92,22 +92,18 @@ $size60:60px; } clr-modal { - .form-group { - padding-left: 6rem; + .filename-label { + padding-top: 9px; + } - .filename-label { - padding-top: 9px; - } + .filename-input { + margin-top: 12px; + width: 68%; + } - .filename-input { - margin-top: 12px; - width: 68%; - } - - .file-browser-btn { - margin-left: 15px; - max-width: 25%; - } + .file-browser-btn { + margin-left: 15px; + max-width: 32%; } } @@ -115,4 +111,8 @@ button { clr-icon { margin-right: 6px; } +} + +.mb-10 { + margin-bottom:10px; } \ No newline at end of file diff --git a/src/portal/src/app/project/helm-chart/list-charts-detail/helm-chart.component.spec.ts b/src/portal/src/app/project/helm-chart/list-charts-detail/helm-chart.component.spec.ts new file mode 100644 index 000000000..b26d1dd1d --- /dev/null +++ b/src/portal/src/app/project/helm-chart/list-charts-detail/helm-chart.component.spec.ts @@ -0,0 +1,31 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; +import { TranslateModule, TranslateService } from '@ngx-translate/core'; +import { HelmChartComponent } from './helm-chart.component'; + +xdescribe('HelmChartComponent', () => { + let component: HelmChartComponent; + let fixture: ComponentFixture; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + imports: [ + TranslateModule.forRoot() + ], + declarations: [HelmChartComponent], + providers: [ + TranslateService + ] + }) + .compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(HelmChartComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/src/portal/src/app/project/helm-chart/list-charts-detail/helm-chart.component.ts b/src/portal/src/app/project/helm-chart/list-charts-detail/helm-chart.component.ts index 169d4f5b0..17388368f 100644 --- a/src/portal/src/app/project/helm-chart/list-charts-detail/helm-chart.component.ts +++ b/src/portal/src/app/project/helm-chart/list-charts-detail/helm-chart.component.ts @@ -68,9 +68,9 @@ export class HelmChartComponent implements OnInit { totalCount = 0; currentState: State; - @ViewChild('chartUploadForm') uploadForm: NgForm; + @ViewChild('chartUploadForm', {static: false}) uploadForm: NgForm; - @ViewChild("confirmationDialog") confirmationDialog: ConfirmationDialogComponent; + @ViewChild("confirmationDialog", {static: false}) confirmationDialog: ConfirmationDialogComponent; hasUploadHelmChartsPermission: boolean; hasDownloadHelmChartsPermission: boolean; hasDeleteHelmChartsPermission: boolean; diff --git a/src/portal/src/app/project/helm-chart/list-charts.component.spec.ts b/src/portal/src/app/project/helm-chart/list-charts.component.spec.ts index 851f53812..b12f3281d 100644 --- a/src/portal/src/app/project/helm-chart/list-charts.component.spec.ts +++ b/src/portal/src/app/project/helm-chart/list-charts.component.spec.ts @@ -1,16 +1,47 @@ import { async, ComponentFixture, TestBed } from '@angular/core/testing'; - +import { ClarityModule } from '@clr/angular'; +import { TranslateModule, TranslateService } from '@ngx-translate/core'; +import { CUSTOM_ELEMENTS_SCHEMA, ChangeDetectorRef } from '@angular/core'; +import { ActivatedRoute, Router } from '@angular/router'; +import { SessionService } from './../../shared/session.service'; import { ListChartsComponent } from './list-charts.component'; describe('ListChartsComponent', () => { let component: ListChartsComponent; let fixture: ComponentFixture; + let fakeSessionService = { + getCurrentUser: function () { + return "admin"; + } + }; beforeEach(async(() => { TestBed.configureTestingModule({ - declarations: [ ListChartsComponent ] - }) - .compileComponents(); + declarations: [ListChartsComponent], + imports: [ + ClarityModule, + TranslateModule.forRoot() + ], + schemas: [ + CUSTOM_ELEMENTS_SCHEMA + ], + providers: [ + { + provide: ActivatedRoute, useValue: { + snapshot: { + parent: { + params: { + id: 1, + data: 'chart' + } + } + } + } + }, + { provide: Router, useValue: null }, + { provide: SessionService, useValue: fakeSessionService } + ] + }).compileComponents(); })); beforeEach(() => { diff --git a/src/portal/src/app/project/list-project/list-project.component.html b/src/portal/src/app/project/list-project/list-project.component.html index d6d6957f6..255f4bf2a 100644 --- a/src/portal/src/app/project/list-project/list-project.component.html +++ b/src/portal/src/app/project/list-project/list-project.component.html @@ -2,7 +2,7 @@ - diff --git a/src/portal/src/app/project/list-project/list-project.component.spec.ts b/src/portal/src/app/project/list-project/list-project.component.spec.ts new file mode 100644 index 000000000..90c5e3540 --- /dev/null +++ b/src/portal/src/app/project/list-project/list-project.component.spec.ts @@ -0,0 +1,31 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; +import { TranslateModule, TranslateService } from '@ngx-translate/core'; +import { ListProjectComponent } from './list-project.component'; + +xdescribe('ListProjectComponent', () => { + let component: ListProjectComponent; + let fixture: ComponentFixture; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + imports: [ + TranslateModule.forRoot() + ], + declarations: [ListProjectComponent], + providers: [ + TranslateService + ] + }) + .compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(ListProjectComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/src/portal/src/app/project/member/add-group/add-group.component.html b/src/portal/src/app/project/member/add-group/add-group.component.html index fa7be3b61..0432901e5 100644 --- a/src/portal/src/app/project/member/add-group/add-group.component.html +++ b/src/portal/src/app/project/member/add-group/add-group.component.html @@ -2,60 +2,63 @@