Merge branch 'master' into feat/gitlab
@ -20,6 +20,10 @@ matrix:
|
||||
- go: 1.12.5
|
||||
env:
|
||||
- OFFLINE=true
|
||||
- language: node_js
|
||||
node_js: 10.16.2
|
||||
env:
|
||||
- UI_UT=true
|
||||
env:
|
||||
global:
|
||||
- POSTGRESQL_HOST: localhost
|
||||
@ -64,3 +68,4 @@ script:
|
||||
- if [ "$APITEST_DB" == true ]; then bash ./tests/travis/api_run.sh DB $IP; fi
|
||||
- if [ "$APITEST_LDAP" == true ]; then bash ./tests/travis/api_run.sh LDAP $IP; fi
|
||||
- if [ "$OFFLINE" == true ]; then bash ./tests/travis/distro_installer.sh; fi
|
||||
- if [ "$UI_UT" == true ]; then bash ./tests/travis/ui_ut_run.sh ; fi
|
||||
|
@ -10,7 +10,6 @@ be added to this list as they transition to production deployments.
|
||||
|
||||
<a href="https://www.jd.com" border="0" target="_blank"><img alt="JD.com" src="docs/img/jd.png" height="50"></a>
|
||||
<a href="https://www.trendmicro.com" border="0" target="_blank"><img alt="trendmicro" src="docs/img/trendmicro.png" height="50"></a>
|
||||
<a href="https://www.onstar.com.cn" border="0" target="_blank"><img alt="OnStar" src="docs/img/onstar.png" height="50"></a>
|
||||
<a href="https://www.datayes.com" border="0" target="_blank"><img alt="DataYes" src="docs/img/datayes.png" height="50"></a>
|
||||
<a href="https://www.axatp.com" border="0" target="_blank"><img alt="axatp" src="docs/img/axatp.png" height="50"></a> <br/><br/>
|
||||
<a href="https://www.360totalsecurity.com/en/" target="_blank" border="0"><img alt="360 Total Security" src="docs/img/360.png" height="50"></a>
|
||||
|
10
Makefile
@ -81,6 +81,7 @@ CLAIRFLAG=false
|
||||
HTTPPROXY=
|
||||
BUILDBIN=false
|
||||
MIGRATORFLAG=false
|
||||
NPM_REGISTRY=https://registry.npmjs.org
|
||||
# enable/disable chart repo supporting
|
||||
CHARTFLAG=false
|
||||
|
||||
@ -97,7 +98,7 @@ VERSIONFILENAME=UIVERSION
|
||||
PREPARE_VERSION_NAME=versions
|
||||
|
||||
#versions
|
||||
REGISTRYVERSION=v2.7.1-patch-2819
|
||||
REGISTRYVERSION=v2.7.1-patch-2819-2553
|
||||
NGINXVERSION=$(VERSIONTAG)
|
||||
NOTARYVERSION=v0.6.1
|
||||
CLAIRVERSION=v2.0.9
|
||||
@ -234,12 +235,14 @@ PACKAGE_OFFLINE_PARA=-zcvf harbor-offline-installer-$(PKGVERSIONTAG).tgz \
|
||||
$(HARBORPKG)/$(DOCKERIMGFILE).$(VERSIONTAG).tar.gz \
|
||||
$(HARBORPKG)/prepare \
|
||||
$(HARBORPKG)/LICENSE $(HARBORPKG)/install.sh \
|
||||
$(HARBORPKG)/common.sh \
|
||||
$(HARBORPKG)/harbor.yml
|
||||
|
||||
PACKAGE_ONLINE_PARA=-zcvf harbor-online-installer-$(PKGVERSIONTAG).tgz \
|
||||
$(HARBORPKG)/prepare \
|
||||
$(HARBORPKG)/LICENSE \
|
||||
$(HARBORPKG)/install.sh \
|
||||
$(HARBORPKG)/common.sh \
|
||||
$(HARBORPKG)/harbor.yml
|
||||
|
||||
DOCKERCOMPOSE_FILE_OPT=-f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||
@ -304,7 +307,8 @@ build:
|
||||
-e REGISTRYVERSION=$(REGISTRYVERSION) -e NGINXVERSION=$(NGINXVERSION) -e NOTARYVERSION=$(NOTARYVERSION) -e NOTARYMIGRATEVERSION=$(NOTARYMIGRATEVERSION) \
|
||||
-e CLAIRVERSION=$(CLAIRVERSION) -e CLAIRDBVERSION=$(CLAIRDBVERSION) -e VERSIONTAG=$(VERSIONTAG) \
|
||||
-e BUILDBIN=$(BUILDBIN) -e REDISVERSION=$(REDISVERSION) -e MIGRATORVERSION=$(MIGRATORVERSION) \
|
||||
-e CHARTMUSEUMVERSION=$(CHARTMUSEUMVERSION) -e DOCKERIMAGENAME_CHART_SERVER=$(DOCKERIMAGENAME_CHART_SERVER)
|
||||
-e CHARTMUSEUMVERSION=$(CHARTMUSEUMVERSION) -e DOCKERIMAGENAME_CHART_SERVER=$(DOCKERIMAGENAME_CHART_SERVER) \
|
||||
-e NPM_REGISTRY=$(NPM_REGISTRY)
|
||||
|
||||
install: compile ui_version build prepare start
|
||||
|
||||
@ -431,7 +435,7 @@ swagger_client:
|
||||
mkdir harborclient
|
||||
java -jar swagger-codegen-cli.jar generate -i docs/swagger.yaml -l python -o harborclient
|
||||
cd harborclient; python ./setup.py install
|
||||
pip install docker -q
|
||||
pip install docker -q
|
||||
pip freeze
|
||||
|
||||
cleanbinary:
|
||||
|
91
SECURITY.md
Normal file
@ -0,0 +1,91 @@
|
||||
# Security Release Process
|
||||
Harbor is a large growing community devoted in creating a private enterprise-grade registry for all your cloud native assets. The community has adopted this security disclosure and response policy to ensure we responsibly handle critical issues.
|
||||
|
||||
## Supported Versions
|
||||
This section describes the maximum version skew supported between various Harbor releases. Harbor versions are expressed as **x.y.z**, where **x** is the major version, **y** is the minor version, and **z** is the patch version, following [Semantic Versioning terminology](https://semver.org/).
|
||||
|
||||
### Support Policy
|
||||
The Harbor project maintains release branches for the three most recent minor releases. Applicable fixes, including security fixes, may be backported to those three release branches, depending on severity and feasibility. Patch releases are cut from those branches at a regular cadence, or as needed. The Harbor project typically has a minor release approximately every 3 months, maintaining each minor release branch for approximately 9 months.
|
||||
|
||||
There is no mandated timeline for major versions and there are currently no criteria for shipping a new major version (i.e. Harbor 2.0.0).
|
||||
|
||||
### Minor Release Support Matrix
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| Harbor v1.7.x | :white_check_mark: |
|
||||
| Harbor v1.8.x | :white_check_mark: |
|
||||
| Harbor v1.9.x | :white_check_mark: |
|
||||
|
||||
## Reporting a Vulnerability - Private Disclosure Process
|
||||
Security is of the highest importance and all security vulnerabilities or suspected security vulnerabilities should be reported to Harbor privately, to minimize attacks against current users of Harbor before they are fixed. Vulnerabilities will be investigated and patched on the next patch (or minor) release as soon as possible. This information could be kept entirely internal to the project.
|
||||
|
||||
If you know of a publicly disclosed security vulnerability for Harbor, please **IMMEDIATELY** contact cncf-harbor-security@lists.cncf.io to inform the Harbor Security Team.
|
||||
|
||||
**IMPORTANT: Do not file public issues on GitHub for security vulnerabilities**
|
||||
|
||||
To report a vulnerability or a security-related issue, please email the private address cncf-harbor-security@lists.cncf.io with the details of the vulnerability. The email will be fielded by the Harbor Security Team, which is made up of Harbor maintainers who have committer and release permissions. Emails will be addressed within 3 business days, including a detailed plan to investigate the issue and any potential workarounds to perform in the meantime. Do not report non-security-impacting bugs through this channel. Use [GitHub issues](https://github.com/goharbor/harbor/issues/new/choose) instead.
|
||||
|
||||
### Proposed Email Content
|
||||
Provide a descriptive subject line and in the body of the email include the following information:
|
||||
* Basic identity information, such as your name and your affiliation or company.
|
||||
* Detailed steps to reproduce the vulnerability (POC scripts, screenshots, and compressed packet captures are all helpful to us).
|
||||
* Description of the effects of the vulnerability on Harbor and the related hardware and software configurations, so that the Harbor Security Team can reproduce it.
|
||||
* How the vulnerability affects Harbor usage and an estimation of the attack surface, if there is one.
|
||||
* List other projects or dependencies that were used in conjunction with Harbor to produce the vulnerability.
|
||||
|
||||
## When to report a vulnerability
|
||||
* When you think Harbor has a potential security vulnerability.
|
||||
* When you suspect a potential vulnerability but you are unsure that it impacts Harbor.
|
||||
* When you know of or suspect a potential vulnerability on another project that is used by Harbor. For example Harbor has a dependency on Docker, PGSql, Redis, Notary, Clair, etc.
|
||||
|
||||
## Patch, Release, and Disclosure
|
||||
The Harbor Security Team will respond to vulnerability reports as follows:
|
||||
|
||||
1. The Security Team will investigate the vulnerability and determine its effects and criticality.
|
||||
2. If the issue is not deemed to be a vulnerability, the Security Team will follow up with a detailed reason for rejection.
|
||||
3. The Security Team will initiate a conversation with the reporter within 3 business days.
|
||||
4. If a vulnerability is acknowledged and the timeline for a fix is determined, the Security Team will work on a plan to communicate with the appropriate community, including identifying mitigating steps that affected users can take to protect themselves until the fix is rolled out.
|
||||
5. The Security Team will also create a [CVSS](https://www.first.org/cvss/specification-document) using the [CVSS Calculator](https://www.first.org/cvss/calculator/3.0). The Security Team makes the final call on the calculated CVSS; it is better to move quickly than making the CVSS perfect. Issues may also be reported to [Mitre](https://cve.mitre.org/) using this [scoring calculator](https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator). The CVE will initially be set to private.
|
||||
6. The Security Team will work on fixing the vulnerability and perform internal testing before preparing to roll out the fix.
|
||||
7. The Security Team will provide early disclosure of the vulnerability by emailing the cncf-harbor-distributors-announce@lists.cncf.io mailing list. Distributors can initially plan for the vulnerability patch ahead of the fix, and later can test the fix and provide feedback to the Harbor team. See the section **Early Disclosure to Harbor Distributors List** for details about how to join this mailing list.
|
||||
8. A public disclosure date is negotiated by the Harbor Security Team, the bug submitter, and the distributors list. We prefer to fully disclose the bug as soon as possible once a user mitigation or patch is available. It is reasonable to delay disclosure when the bug or the fix is not yet fully understood, the solution is not well-tested, or for distributor coordination. The timeframe for disclosure is from immediate (especially if it’s already publicly known) to a few weeks. For a critical vulnerability with a straightforward mitigation, we expect report date to public disclosure date to be on the order of 14 business days. The Harbor Security Team holds the final say when setting a public disclosure date.
|
||||
9. Once the fix is confirmed, the Security Team will patch the vulnerability in the next patch or minor release, and backport a patch release into all earlier supported releases. Upon release of the patched version of Harbor, we will follow the **Public Disclosure Process**.
|
||||
|
||||
### Public Disclosure Process
|
||||
The Security Team publishes a public [advisory](https://github.com/goharbor/harbor/security/advisories) to the Harbor community via GitHub. In most cases, additional communication via Slack, Twitter, CNCF lists, blog and other channels will assist in educating Harbor users and rolling out the patched release to affected users.
|
||||
|
||||
The Security Team will also publish any mitigating steps users can take until the fix can be applied to their Harbor instances. Harbor distributors will handle creating and publishing their own security advisories.
|
||||
|
||||
## Mailing lists
|
||||
- Use cncf-harbor-security@lists.cncf.io to report security concerns to the Harbor Security Team, who uses the list to privately discuss security issues and fixes prior to disclosure.
|
||||
- Join cncf-harbor-distributors-announce@lists.cncf.io for early private information and vulnerability disclosure. Early disclosure may include mitigating steps and additional information on security patch releases. See below for information on how Harbor distributors or vendors can apply to join this list.
|
||||
|
||||
## Early Disclosure to Harbor Distributors List
|
||||
This private list is intended to be used primarily to provide actionable information to multiple distributor projects at once. This list is not intended to inform individuals about security issues.
|
||||
|
||||
### Membership Criteria
|
||||
To be eligible to join the cncf-harbor-distributors-announce@lists.cncf.io mailing list, you should:
|
||||
1. Be an active distributor of Harbor.
|
||||
2. Have a user base that is not limited to your own organization.
|
||||
3. Have a publicly verifiable track record up to the present day of fixing security issues.
|
||||
4. Not be a downstream or rebuild of another distributor.
|
||||
5. Be a participant and active contributor in the Harbor community.
|
||||
6. Accept the Embargo Policy that is outlined below.
|
||||
7. Have someone who is already on the list vouch for the person requesting membership on behalf of your distribution.
|
||||
|
||||
**The terms and conditions of the Embargo Policy apply to all members of this mailing list. A request for membership represents your acceptance to the terms and conditions of the Embargo Policy**
|
||||
|
||||
### Embargo Policy
|
||||
The information that members receive on cncf-harbor-distributors-announce@lists.cncf.io must not be made public, shared, or even hinted at anywhere beyond those who need to know within your specific team, unless you receive explicit approval to do so from the Harbor Security Team. This remains true until the public disclosure date/time agreed upon by the list. Members of the list and others cannot use the information for any reason other than to get the issue fixed for your respective distribution's users.
|
||||
Before you share any information from the list with members of your team who are required to fix the issue, these team members must agree to the same terms, and only be provided with information on a need-to-know basis.
|
||||
|
||||
In the unfortunate event that you share information beyond what is permitted by this policy, you must urgently inform the cncf-harbor-security@lists.cncf.io mailing list of exactly what information was leaked and to whom. If you continue to leak information and break the policy outlined here, you will be permanently removed from the list.
|
||||
|
||||
### Requesting to Join
|
||||
Send new membership requests to cncf-harbor-security@lists.cncf.io.
|
||||
In the body of your request please specify how you qualify for membership and fulfill each criterion listed in the Membership Criteria section above.
|
||||
|
||||
## Confidentiality, integrity and availability
|
||||
We consider vulnerabilities leading to the compromise of data confidentiality, elevation of privilege, or integrity to be our highest priority concerns. Availability, in particular in areas relating to DoS and resource exhaustion, is also a serious security concern. The Harbor Security Team takes all vulnerabilities, potential vulnerabilities, and suspected vulnerabilities seriously and will investigate them in an urgent and expeditious manner.
|
||||
|
||||
Note that we do not currently consider the default settings for Harbor to be secure-by-default. It is necessary for operators to explicitly configure settings, role based access control, and other resource related features in Harbor to provide a hardened Harbor environment. We will not act on any security disclosure that relates to a lack of safe defaults. Over time, we will work towards improved safe-by-default configuration, taking into account backwards compatibility.
|
@ -10,11 +10,10 @@ Open the `setting.json` file, you'll see the default content as shown below:
|
||||
"headerBgColor": "#004a70",
|
||||
"headerLogo": "",
|
||||
"loginBgImg": "",
|
||||
"appTitle": "",
|
||||
"product": {
|
||||
"title": "Harbor",
|
||||
"company": "goharbor",
|
||||
"name": "Harbor",
|
||||
"introductions": {
|
||||
"introduction": {
|
||||
"zh-cn": "",
|
||||
"es-es": "",
|
||||
"en-us": ""
|
||||
|
BIN
docs/img/cve-whitelist1.png
Normal file
After Width: | Height: | Size: 55 KiB |
BIN
docs/img/cve-whitelist2.png
Normal file
After Width: | Height: | Size: 24 KiB |
BIN
docs/img/cve-whitelist3.png
Normal file
After Width: | Height: | Size: 10 KiB |
BIN
docs/img/cve-whitelist4.png
Normal file
After Width: | Height: | Size: 4.8 KiB |
BIN
docs/img/cve-whitelist5.png
Normal file
After Width: | Height: | Size: 82 KiB |
BIN
docs/img/cve-whitelist6.png
Normal file
After Width: | Height: | Size: 14 KiB |
Before Width: | Height: | Size: 37 KiB |
BIN
docs/img/project-quota1.png
Normal file
After Width: | Height: | Size: 84 KiB |
BIN
docs/img/project-quota2.png
Normal file
After Width: | Height: | Size: 15 KiB |
BIN
docs/img/project-quota3.png
Normal file
After Width: | Height: | Size: 17 KiB |
BIN
docs/img/project-quota4.png
Normal file
After Width: | Height: | Size: 8.7 KiB |
BIN
docs/img/project-quota5.png
Normal file
After Width: | Height: | Size: 9.9 KiB |
BIN
docs/img/replication-endpoint1.png
Normal file
After Width: | Height: | Size: 33 KiB |
BIN
docs/img/replication-endpoint2.png
Normal file
After Width: | Height: | Size: 68 KiB |
Before Width: | Height: | Size: 44 KiB After Width: | Height: | Size: 19 KiB |
BIN
docs/img/tag-retention1.png
Normal file
After Width: | Height: | Size: 13 KiB |
BIN
docs/img/tag-retention2.png
Normal file
After Width: | Height: | Size: 38 KiB |
BIN
docs/img/tag-retention3.png
Normal file
After Width: | Height: | Size: 44 KiB |
BIN
docs/img/tag-retention4.png
Normal file
After Width: | Height: | Size: 5.7 KiB |
BIN
docs/img/tag-retention5.png
Normal file
After Width: | Height: | Size: 15 KiB |
BIN
docs/img/webhooks1.png
Normal file
After Width: | Height: | Size: 13 KiB |
BIN
docs/img/webhooks2.png
Normal file
After Width: | Height: | Size: 23 KiB |
BIN
docs/img/webhooks3.png
Normal file
After Width: | Height: | Size: 56 KiB |
BIN
docs/img/webhooks4.png
Normal file
After Width: | Height: | Size: 2.9 KiB |
@ -100,19 +100,24 @@ The parameters are described below - note that at the very least, you will need
|
||||
|
||||
- **harbor_admin_password**: The administrator's initial password. This password only takes effect for the first time Harbor launches. After that, this setting is ignored and the administrator's password should be set in the Portal. _Note that the default username/password are **admin/Harbor12345** ._
|
||||
|
||||
|
||||
|
||||
- **database**: the configs related to local database
|
||||
- **password**: The root password for the PostgreSQL database used for **db_auth**. _Change this password for any production use!_
|
||||
- **password**: The root password for the PostgreSQL database. Change this password for any production use.
|
||||
- **max_idle_conns**: The maximum number of connections in the idle connection pool. If <=0 no idle connections are retained. The default value is 50 and if it is not configured the value is 2.
|
||||
- **max_open_conns**: The maximum number of open connections to the database. If <= 0 there is no limit on the number of open connections. The default value is 100 for the max connections to the Harbor database. If it is not configured the value is 0.
|
||||
|
||||
- **jobservice**: jobservice related service
|
||||
- **max_job_workers**: The maximum number of replication workers in job service. For each image replication job, a worker synchronizes all tags of a repository to the remote destination. Increasing this number allows more concurrent replication jobs in the system. However, since each worker consumes a certain amount of network/CPU/IO resources, please carefully pick the value of this attribute based on the hardware resource of the host.
|
||||
- **log**: log related url
|
||||
- **level**: log level, options are debug, info, warning, error, fatal
|
||||
- **rotate_count**: Log files are rotated **rotate_count** times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
- **rotate_size**: Log files are rotated only if they grow bigger than **rotate_size** bytes. If size is followed by k, the size is assumed to be in kilobytes. If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G are all valid.
|
||||
- **location**: the directory to store log
|
||||
|
||||
- **local**: The default is to retain logs locally.
|
||||
- **rotate_count**: Log files are rotated **rotate_count** times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
- **rotate_size**: Log files are rotated only if they grow bigger than **rotate_size** bytes. If size is followed by k, the size is assumed to be in kilobytes. If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G are all valid.
|
||||
- **location**: the directory to store logs
|
||||
- **external_endpoint**: Enable this option to forward logs to a syslog server.
|
||||
- **protocol**: Transport protocol for the syslog server. Default is TCP.
|
||||
- **host**: The URL of the syslog server.
|
||||
- **port**: The port on which the syslog server listens.
|
||||
|
||||
##### optional parameters
|
||||
|
||||
- **http**:
|
||||
@ -143,6 +148,8 @@ refer to **[Configuring Harbor with HTTPS Access](configure_https.md)**.
|
||||
- **username**: username to connect harbor core database
|
||||
- **password**: password to harbor core database
|
||||
- **ssl_mode**: is enable ssl mode
|
||||
- **max_idle_conns**: The maximum number of connections in the idle connection pool. If <=0 no idle connections are retained. The default value is 2.
|
||||
- **max_open_conns**: The maximum number of open connections to the database. If <= 0 there is no limit on the number of open connections. The default value is 0.
|
||||
- **clair**: clair's database configs
|
||||
- **host**: hostname for clair database
|
||||
- **port**: port of clair database
|
||||
|
@ -1,91 +1,99 @@
|
||||
# Harbor upgrade and migration guide
|
||||
# Harbor Upgrade and Migration Guide
|
||||
|
||||
This guide only covers upgrade and migration to version >= v1.8.0
|
||||
This guide covers upgrade and migration to version 1.9.0. This guide only covers migration from v1.7.x and later to the current version. If you are upgrading from an earlier version, refer to the migration guide in the `release-1.7.0` branch to upgrade to v1.7.x first, then follow this guide to perform the migration to this version.
|
||||
|
||||
When upgrading your existing Harbor instance to a newer version, you may need to migrate the data in your database and the settings in `harbor.cfg`.
|
||||
Since the migration may alter the database schema and the settings of `harbor.cfg`, you should **always** back up your data before any migration.
|
||||
When upgrading an existing Harbor 1.7.x instance to a newer version, you might need to migrate the data in your database and the settings in `harbor.cfg`.
|
||||
Since the migration might alter the database schema and the settings of `harbor.cfg`, you should **always** back up your data before any migration.
|
||||
|
||||
**NOTE:**
|
||||
**NOTES:**
|
||||
|
||||
- Again, you must back up your data before any data migration.
|
||||
- Since v1.8.0, the configuration of Harbor has changed to a `.yml` file. If you are upgrading from 1.7.x, the migrator will transform the configuration file from `harbor.cfg` to `harbor.yml`. The command will be a little different to perform this migration, so make sure you follow the steps below.
|
||||
- In version 1.9.0, some containers are started by `non-root`. This does not pose problems if you are upgrading an officially released version of Harbor, but if you have deployed a customized instance of Harbor, you might encounter permission issues.
|
||||
- In previous releases, user roles took precedence over group roles in a project. In this version, user roles and group roles are combined so that the user has whichever set of permissions is highest. This might cause the roles of certain users to change during upgrade.
|
||||
- With the introduction of storage and artifact quotas in version 1.9.0, migration from 1.7.x and 1.8.x might take a few minutes. This is because the `core` walks through all blobs in the registry and populates the database with information about the layers and artifacts in projects.
|
||||
- With the introduction of storage and artifact quotas in version 1.9.0, replication between version 1.9.0 and a previous version of Harbor does not work. You must upgrade all Harbor nodes to 1.9.0 if you have configured replication between them.
|
||||
|
||||
- This guide only covers the migration from v1.6.0 to current version, if you are upgrading from earlier versions please
|
||||
refer to the migration guide in release branch to upgrade to v1.6.0 and follow this guide to do the migration to later version.
|
||||
|
||||
- From v1.6.0 on, Harbor will automatically try to do the migrate the DB schema when it starts, so if you are upgrading from v1.6.0
|
||||
or above it's not necessary to call the migrator tool to migrate the schema.
|
||||
|
||||
- For the change in Database schema please refer to [change log](../tools/migration/db/changelog.md).
|
||||
|
||||
- Since v1.8.0, the configuration of Harbor has changed to `.yml` file, the migrator will transform the configuration
|
||||
file from `harbor.cfg` to `harbor.yml`. The command will be a little different to perform this migration, please make sure
|
||||
you follow the steps below.
|
||||
|
||||
|
||||
### Upgrading Harbor and migrating data
|
||||
## Upgrading Harbor and Migrating Data
|
||||
|
||||
1. Log in to the host that Harbor runs on, stop and remove existing Harbor instance if it is still running:
|
||||
```
|
||||
|
||||
```sh
|
||||
cd harbor
|
||||
docker-compose down
|
||||
```
|
||||
|
||||
2. Back up Harbor's current files so that you can roll back to the current version when it is necessary.
|
||||
```
|
||||
2. Back up Harbor's current files so that you can roll back to the current version if necessary.
|
||||
|
||||
```sh
|
||||
mv harbor /my_backup_dir/harbor
|
||||
```
|
||||
|
||||
Back up database (by default in directory `/data/database`)
|
||||
```
|
||||
|
||||
```sh
|
||||
cp -r /data/database /my_backup_dir/
|
||||
```
|
||||
|
||||
3. Get the latest Harbor release package from Github:
|
||||
https://github.com/goharbor/harbor/releases
|
||||
[https://github.com/goharbor/harbor/releases](https://github.com/goharbor/harbor/releases)
|
||||
|
||||
4. Before upgrading Harbor, perform migration first. The migration tool is delivered as a docker image, so you should pull the image from docker hub. Replace [tag] with the release version of Harbor (e.g. v1.5.0) in the below command:
|
||||
```
|
||||
4. Before upgrading Harbor, perform a migration first. The migration tool is delivered as a docker image, so you should pull the image from docker hub. Replace [tag] with the release version of Harbor (for example, v1.9.0) in the command below:
|
||||
|
||||
```sh
|
||||
docker pull goharbor/harbor-migrator:[tag]
|
||||
```
|
||||
|
||||
5. Upgrade from `harbor.cfg` to `harbor.yml`
|
||||
**NOTE:** You can find the ${harbor_yml} in the extracted installer you got in step `3`, after the migration the file `harbor.yml`
|
||||
5. If you are current version is v1.7.x or earlier, i.e. migrate config file from `harbor.cfg` to `harbor.yml`.
|
||||
|
||||
**NOTE:** You can find the ${harbor_yml} in the extracted installer you got in step `3`, after the migration the file `harbor.yml`
|
||||
in that path will be updated with the values from ${harbor_cfg}
|
||||
|
||||
```
|
||||
|
||||
```sh
|
||||
docker run -it --rm -v ${harbor_cfg}:/harbor-migration/harbor-cfg/harbor.yml -v ${harbor_yml}:/harbor-migration/harbor-cfg-out/harbor.yml goharbor/harbor-migrator:[tag] --cfg up
|
||||
```
|
||||
**NOTE:** The schema upgrade and data migration of Database is performed by core when Harbor starts, if the migration fails,
|
||||
please check the log of core to debug.
|
||||
|
||||
6. Under the directory `./harbor`, run the `./install.sh` script to install the new Harbor instance. If you choose to install Harbor with components like Notary, Clair, and chartmuseum, refer to [Installation & Configuration Guide](../docs/installation_guide.md) for more information.
|
||||
Otherwise, If your version is 1.8.x or higher, just upgrade the `harbor.yml` file.
|
||||
|
||||
```sh
|
||||
docker run -it --rm -v ${harbor_yml}:/harbor-migration/harbor-cfg/harbor.yml goharbor/harbor-migrator:[tag] --cfg up
|
||||
```
|
||||
|
||||
### Roll back from an upgrade
|
||||
For any reason, if you want to roll back to the previous version of Harbor, follow the below steps:
|
||||
**NOTE:** The schema upgrade and data migration of the database is performed by core when Harbor starts, if the migration fails, please check the log of core to debug.
|
||||
|
||||
**NOTE:** Roll back doesn't support upgrade across v1.5.0, like from v1.2.0 to v1.7.0. This is because Harbor changes DB to PostgreSQL from v1.7.0, the migrator cannot roll back data to MariaDB.
|
||||
6. Under the directory `./harbor`, run the `./install.sh` script to install the new Harbor instance. If you choose to install Harbor with components such as Notary, Clair, and chartmuseum, refer to [Installation & Configuration Guide](../docs/installation_guide.md) for more information.
|
||||
|
||||
## Roll Back from an Upgrade
|
||||
|
||||
If, for any reason, you want to roll back to the previous version of Harbor, perform the following steps:
|
||||
|
||||
1. Stop and remove the current Harbor service if it is still running.
|
||||
```
|
||||
|
||||
```sh
|
||||
cd harbor
|
||||
docker-compose down
|
||||
```
|
||||
|
||||
|
||||
2. Remove current Harbor instance.
|
||||
```
|
||||
|
||||
```sh
|
||||
rm -rf harbor
|
||||
```
|
||||
|
||||
|
||||
3. Restore the older version package of Harbor.
|
||||
|
||||
```sh
|
||||
mv /my_backup_dir/harbor harbor
|
||||
```
|
||||
|
||||
4. Restore database, copy the data files from backup directory to you data volume, by default `/data/database`.
|
||||
|
||||
4. Restore database, copy the data files from backup directory to you data volume, by default `/data/database`.
|
||||
|
||||
5. Restart Harbor service using the previous configuration.
|
||||
If previous version of Harbor was installed by a release build:
|
||||
|
||||
```sh
|
||||
cd harbor
|
||||
./install.sh
|
||||
```
|
||||
|
||||
**NOTE**: While you can roll back an upgrade to the state before you started the upgrade, Harbor does not support downgrades.
|
||||
|
@ -43,3 +43,11 @@ The following table depicts the various user permission levels in a project.
|
||||
| Add/Remove labels of helm chart version | | ✓ | ✓ | ✓ |
|
||||
| See a list of project robots | | | ✓ | ✓ |
|
||||
| Create/edit/delete project robots | | | | ✓ |
|
||||
| See configured CVE whitelist | ✓ | ✓ | ✓ | ✓ |
|
||||
| Create/edit/remove CVE whitelist | | | | ✓ |
|
||||
| Enable/disable webhooks | | ✓ | ✓ | ✓ |
|
||||
| Create/delete tag retention rules | | ✓ | ✓ | ✓ |
|
||||
| Enable/disable tag retention rules | | ✓ | ✓ | ✓ |
|
||||
| See project quotas | ✓ | ✓ | ✓ | ✓ |
|
||||
| Edit project quotas | | | | |
|
||||
|
||||
|
@ -774,20 +774,15 @@ paths:
|
||||
description: Internal errors.
|
||||
/users/search:
|
||||
get:
|
||||
summary: Search users by username, email
|
||||
summary: Search users by username
|
||||
description: |
|
||||
This endpoint is to search the users by username, email.
|
||||
This endpoint is to search the users by username.
|
||||
parameters:
|
||||
- name: username
|
||||
in: query
|
||||
type: string
|
||||
required: false
|
||||
required: true
|
||||
description: Username for filtering results.
|
||||
- name: email
|
||||
in: query
|
||||
type: string
|
||||
required: false
|
||||
description: Email for filtering results.
|
||||
- name: page
|
||||
in: query
|
||||
type: integer
|
||||
@ -964,13 +959,13 @@ paths:
|
||||
description: User ID does not exist.
|
||||
'500':
|
||||
description: Unexpected internal errors.
|
||||
'/users/{user_id}/gen_cli_secret':
|
||||
post:
|
||||
summary: Generate new CLI secret for a user.
|
||||
'/users/{user_id}/cli_secret':
|
||||
put:
|
||||
summary: Set CLI secret for a user.
|
||||
description: |
|
||||
This endpoint let user generate a new CLI secret for himself. This API only works when auth mode is set to 'OIDC'.
|
||||
Once this API returns with successful status, the old secret will be invalid, as there will be only one CLI secret
|
||||
for a user. The new secret will be returned in the response.
|
||||
for a user.
|
||||
parameters:
|
||||
- name: user_id
|
||||
in: path
|
||||
@ -978,19 +973,23 @@ paths:
|
||||
format: int
|
||||
required: true
|
||||
description: User ID
|
||||
tags:
|
||||
- Products
|
||||
responses:
|
||||
'200':
|
||||
description: The secret is successfully generated.
|
||||
- name: input_secret
|
||||
in: body
|
||||
description: JSON object that includes the new secret
|
||||
required: true
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
secret:
|
||||
type: string
|
||||
description: The new secret
|
||||
tags:
|
||||
- Products
|
||||
responses:
|
||||
'200':
|
||||
description: The secret is successfully updated
|
||||
'400':
|
||||
description: Invalid user ID. Or user is not onboarded via OIDC authentication.
|
||||
description: Invalid user ID. Or user is not onboarded via OIDC authentication. Or the secret does not meet the standard.
|
||||
'401':
|
||||
description: User need to log in first.
|
||||
'403':
|
||||
@ -2415,7 +2414,7 @@ paths:
|
||||
description: |
|
||||
This endpoint is for syncing quota usage of registry/chart with database.
|
||||
tags:
|
||||
- Products
|
||||
- Products
|
||||
responses:
|
||||
'200':
|
||||
description: Sync repositories successfully.
|
||||
@ -2423,6 +2422,28 @@ paths:
|
||||
description: User need to log in first.
|
||||
'403':
|
||||
description: User does not have permission of system admin role.
|
||||
/internal/switchquota:
|
||||
put:
|
||||
summary: Enable or disable quota.
|
||||
description: |
|
||||
This endpoint is for enable/disable quota. When quota is disabled, no resource require/release in image/chart push and delete.
|
||||
tags:
|
||||
- Products
|
||||
parameters:
|
||||
- name: switcher
|
||||
in: body
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/QuotaSwitcher'
|
||||
responses:
|
||||
'200':
|
||||
description: Enable/Disable quota successfully.
|
||||
'401':
|
||||
description: User need to log in first.
|
||||
'403':
|
||||
description: User does not have permission of system admin role.
|
||||
'500':
|
||||
description: Unexpected internal errors.
|
||||
/systeminfo:
|
||||
get:
|
||||
summary: Get general system info
|
||||
@ -3600,7 +3621,6 @@ paths:
|
||||
description: List quotas
|
||||
tags:
|
||||
- Products
|
||||
- Quota
|
||||
parameters:
|
||||
- name: reference
|
||||
in: query
|
||||
@ -3968,7 +3988,124 @@ paths:
|
||||
description: User have no permission to list webhook jobs of the project.
|
||||
'500':
|
||||
description: Unexpected internal errors.
|
||||
|
||||
'/projects/{project_id}/immutabletagrules':
|
||||
get:
|
||||
summary: List all immutable tag rules of current project
|
||||
description: |
|
||||
This endpoint returns the immutable tag rules of a project
|
||||
parameters:
|
||||
- name: project_id
|
||||
in: path
|
||||
type: integer
|
||||
format: int64
|
||||
required: true
|
||||
description: Relevant project ID.
|
||||
tags:
|
||||
- Products
|
||||
responses:
|
||||
'200':
|
||||
description: List project immutable tag rules successfully.
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/definitions/ImmutableTagRule'
|
||||
'400':
|
||||
description: Illegal format of provided ID value.
|
||||
'401':
|
||||
description: User need to log in first.
|
||||
'403':
|
||||
description: User have no permission to list immutable tag rules of the project.
|
||||
'500':
|
||||
description: Unexpected internal errors.
|
||||
post:
|
||||
summary: Add an immutable tag rule to current project
|
||||
description: |
|
||||
This endpoint add an immutable tag rule to the project
|
||||
parameters:
|
||||
- name: project_id
|
||||
in: path
|
||||
type: integer
|
||||
format: int64
|
||||
required: true
|
||||
description: Relevant project ID.
|
||||
- name: immutabletagrule
|
||||
in: body
|
||||
schema:
|
||||
$ref: '#/definitions/ImmutableTagRule'
|
||||
tags:
|
||||
- Products
|
||||
responses:
|
||||
'200':
|
||||
description: Add the immutable tag rule successfully.
|
||||
'400':
|
||||
description: Illegal format of provided ID value.
|
||||
'401':
|
||||
description: User need to log in first.
|
||||
'403':
|
||||
description: User have no permission to get immutable tag rule of the project.
|
||||
'500':
|
||||
description: Internal server errors.
|
||||
'/projects/{project_id}/immutabletagrules/{id}':
|
||||
put:
|
||||
summary: Update the immutable tag rule or enable or disable the rule
|
||||
parameters:
|
||||
- name: project_id
|
||||
in: path
|
||||
type: integer
|
||||
format: int64
|
||||
required: true
|
||||
description: Relevant project ID.
|
||||
- name: id
|
||||
in: path
|
||||
type: integer
|
||||
format: int64
|
||||
required: true
|
||||
description: Immutable tag rule ID.
|
||||
- name: immutabletagrule
|
||||
in: body
|
||||
schema:
|
||||
$ref: '#/definitions/ImmutableTagRule'
|
||||
tags:
|
||||
- Products
|
||||
responses:
|
||||
'200':
|
||||
description: Update the immutable tag rule successfully.
|
||||
'400':
|
||||
description: Illegal format of provided ID value.
|
||||
'401':
|
||||
description: User need to log in first.
|
||||
'403':
|
||||
description: User have no permission to update the immutable tag rule of the project.
|
||||
'500':
|
||||
description: Internal server errors.
|
||||
delete:
|
||||
summary: Delete the immutable tag rule.
|
||||
parameters:
|
||||
- name: project_id
|
||||
in: path
|
||||
type: integer
|
||||
format: int64
|
||||
required: true
|
||||
description: Relevant project ID.
|
||||
- name: id
|
||||
in: path
|
||||
type: integer
|
||||
format: int64
|
||||
required: true
|
||||
description: Immutable tag rule ID.
|
||||
tags:
|
||||
- Products
|
||||
responses:
|
||||
'200':
|
||||
description: Delete the immutable tag rule successfully.
|
||||
'400':
|
||||
description: Illegal format of provided ID value.
|
||||
'401':
|
||||
description: User need to log in first.
|
||||
'403':
|
||||
description: User have no permission to delete immutable tags of the project.
|
||||
'500':
|
||||
description: Internal server errors.
|
||||
'/retentions/metadatas':
|
||||
get:
|
||||
summary: Get Retention Metadatas
|
||||
@ -6248,4 +6385,22 @@ definitions:
|
||||
type: integer
|
||||
retained:
|
||||
type: integer
|
||||
|
||||
QuotaSwitcher:
|
||||
type: object
|
||||
properties:
|
||||
enabled:
|
||||
type: boolean
|
||||
description: The quota is enable or disable
|
||||
ImmutableTagRule:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: integer
|
||||
format: int64
|
||||
project_id:
|
||||
type: integer
|
||||
format: int64
|
||||
tag_filter:
|
||||
type: string
|
||||
enabled:
|
||||
type: boolean
|
||||
|
1849
docs/user_guide.md
131
make/checkenv.sh
@ -1,50 +1,6 @@
|
||||
#/bin/bash
|
||||
#!/bin/bash
|
||||
|
||||
#docker version: 1.11.2
|
||||
#docker-compose version: 1.7.1
|
||||
#Harbor version: 0.4.5+
|
||||
set +e
|
||||
set -o noglob
|
||||
|
||||
#
|
||||
# Set Colors
|
||||
#
|
||||
|
||||
bold=$(tput bold)
|
||||
underline=$(tput sgr 0 1)
|
||||
reset=$(tput sgr0)
|
||||
|
||||
red=$(tput setaf 1)
|
||||
green=$(tput setaf 76)
|
||||
white=$(tput setaf 7)
|
||||
tan=$(tput setaf 202)
|
||||
blue=$(tput setaf 25)
|
||||
|
||||
#
|
||||
# Headers and Logging
|
||||
#
|
||||
|
||||
underline() { printf "${underline}${bold}%s${reset}\n" "$@"
|
||||
}
|
||||
h1() { printf "\n${underline}${bold}${blue}%s${reset}\n" "$@"
|
||||
}
|
||||
h2() { printf "\n${underline}${bold}${white}%s${reset}\n" "$@"
|
||||
}
|
||||
debug() { printf "${white}%s${reset}\n" "$@"
|
||||
}
|
||||
info() { printf "${white}➜ %s${reset}\n" "$@"
|
||||
}
|
||||
success() { printf "${green}✔ %s${reset}\n" "$@"
|
||||
}
|
||||
error() { printf "${red}✖ %s${reset}\n" "$@"
|
||||
}
|
||||
warn() { printf "${tan}➜ %s${reset}\n" "$@"
|
||||
}
|
||||
bold() { printf "${bold}%s${reset}\n" "$@"
|
||||
}
|
||||
note() { printf "\n${underline}${bold}${blue}Note:${reset} ${blue}%s${reset}\n" "$@"
|
||||
}
|
||||
|
||||
set -e
|
||||
|
||||
usage=$'Checking environment for harbor build and install. Include golang, docker and docker-compose.'
|
||||
@ -61,89 +17,8 @@ while [ $# -gt 0 ]; do
|
||||
shift || true
|
||||
done
|
||||
|
||||
function check_golang {
|
||||
if ! go version &> /dev/null
|
||||
then
|
||||
warn "No golang package in your enviroment. You should use golang docker image build binary."
|
||||
return
|
||||
fi
|
||||
|
||||
# docker has been installed and check its version
|
||||
if [[ $(go version) =~ (([0-9]+)\.([0-9]+)([\.0-9]*)) ]]
|
||||
then
|
||||
golang_version=${BASH_REMATCH[1]}
|
||||
golang_version_part1=${BASH_REMATCH[2]}
|
||||
golang_version_part2=${BASH_REMATCH[3]}
|
||||
|
||||
# the version of golang does not meet the requirement
|
||||
if [ "$golang_version_part1" -lt 1 ] || ([ "$golang_version_part1" -eq 1 ] && [ "$golang_version_part2" -lt 6 ])
|
||||
then
|
||||
warn "Better to upgrade golang package to 1.6.0+ or use golang docker image build binary."
|
||||
return
|
||||
else
|
||||
note "golang version: $golang_version"
|
||||
fi
|
||||
else
|
||||
warn "Failed to parse golang version."
|
||||
return
|
||||
fi
|
||||
}
|
||||
|
||||
function check_docker {
|
||||
if ! docker --version &> /dev/null
|
||||
then
|
||||
error "Need to install docker(1.10.0+) first and run this script again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# docker has been installed and check its version
|
||||
if [[ $(docker --version) =~ (([0-9]+)\.([0-9]+)([\.0-9]*)) ]]
|
||||
then
|
||||
docker_version=${BASH_REMATCH[1]}
|
||||
docker_version_part1=${BASH_REMATCH[2]}
|
||||
docker_version_part2=${BASH_REMATCH[3]}
|
||||
|
||||
# the version of docker does not meet the requirement
|
||||
if [ "$docker_version_part1" -lt 1 ] || ([ "$docker_version_part1" -eq 1 ] && [ "$docker_version_part2" -lt 10 ])
|
||||
then
|
||||
error "Need to upgrade docker package to 1.10.0+."
|
||||
exit 1
|
||||
else
|
||||
note "docker version: $docker_version"
|
||||
fi
|
||||
else
|
||||
error "Failed to parse docker version."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function check_dockercompose {
|
||||
if ! docker-compose --version &> /dev/null
|
||||
then
|
||||
error "Need to install docker-compose(1.7.1+) by yourself first and run this script again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# docker-compose has been installed, check its version
|
||||
if [[ $(docker-compose --version) =~ (([0-9]+)\.([0-9]+)([\.0-9]*)) ]]
|
||||
then
|
||||
docker_compose_version=${BASH_REMATCH[1]}
|
||||
docker_compose_version_part1=${BASH_REMATCH[2]}
|
||||
docker_compose_version_part2=${BASH_REMATCH[3]}
|
||||
|
||||
# the version of docker-compose does not meet the requirement
|
||||
if [ "$docker_compose_version_part1" -lt 1 ] || ([ "$docker_compose_version_part1" -eq 1 ] && [ "$docker_compose_version_part2" -lt 6 ])
|
||||
then
|
||||
error "Need to upgrade docker-compose package to 1.7.1+."
|
||||
exit 1
|
||||
else
|
||||
note "docker-compose version: $docker_compose_version"
|
||||
fi
|
||||
else
|
||||
error "Failed to parse docker-compose version."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
source $DIR/common.sh
|
||||
|
||||
check_golang
|
||||
check_docker
|
||||
|
132
make/common.sh
Normal file
@ -0,0 +1,132 @@
|
||||
#!/bin/bash
|
||||
#docker version: 17.06.0+
|
||||
#docker-compose version: 1.18.0+
|
||||
#golang version: 1.12.0+
|
||||
|
||||
set +e
|
||||
set -o noglob
|
||||
|
||||
#
|
||||
# Set Colors
|
||||
#
|
||||
|
||||
bold=$(tput bold)
|
||||
underline=$(tput sgr 0 1)
|
||||
reset=$(tput sgr0)
|
||||
|
||||
red=$(tput setaf 1)
|
||||
green=$(tput setaf 76)
|
||||
white=$(tput setaf 7)
|
||||
tan=$(tput setaf 202)
|
||||
blue=$(tput setaf 25)
|
||||
|
||||
#
|
||||
# Headers and Logging
|
||||
#
|
||||
|
||||
underline() { printf "${underline}${bold}%s${reset}\n" "$@"
|
||||
}
|
||||
h1() { printf "\n${underline}${bold}${blue}%s${reset}\n" "$@"
|
||||
}
|
||||
h2() { printf "\n${underline}${bold}${white}%s${reset}\n" "$@"
|
||||
}
|
||||
debug() { printf "${white}%s${reset}\n" "$@"
|
||||
}
|
||||
info() { printf "${white}➜ %s${reset}\n" "$@"
|
||||
}
|
||||
success() { printf "${green}✔ %s${reset}\n" "$@"
|
||||
}
|
||||
error() { printf "${red}✖ %s${reset}\n" "$@"
|
||||
}
|
||||
warn() { printf "${tan}➜ %s${reset}\n" "$@"
|
||||
}
|
||||
bold() { printf "${bold}%s${reset}\n" "$@"
|
||||
}
|
||||
note() { printf "\n${underline}${bold}${blue}Note:${reset} ${blue}%s${reset}\n" "$@"
|
||||
}
|
||||
|
||||
set -e
|
||||
|
||||
function check_golang {
|
||||
if ! go version &> /dev/null
|
||||
then
|
||||
warn "No golang package in your enviroment. You should use golang docker image build binary."
|
||||
return
|
||||
fi
|
||||
|
||||
# docker has been installed and check its version
|
||||
if [[ $(go version) =~ (([0-9]+)\.([0-9]+)([\.0-9]*)) ]]
|
||||
then
|
||||
golang_version=${BASH_REMATCH[1]}
|
||||
golang_version_part1=${BASH_REMATCH[2]}
|
||||
golang_version_part2=${BASH_REMATCH[3]}
|
||||
|
||||
# the version of golang does not meet the requirement
|
||||
if [ "$golang_version_part1" -lt 1 ] || ([ "$golang_version_part1" -eq 1 ] && [ "$golang_version_part2" -lt 12 ])
|
||||
then
|
||||
warn "Better to upgrade golang package to 1.12.0+ or use golang docker image build binary."
|
||||
return
|
||||
else
|
||||
note "golang version: $golang_version"
|
||||
fi
|
||||
else
|
||||
warn "Failed to parse golang version."
|
||||
return
|
||||
fi
|
||||
}
|
||||
|
||||
function check_docker {
|
||||
if ! docker --version &> /dev/null
|
||||
then
|
||||
error "Need to install docker(17.06.0+) first and run this script again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# docker has been installed and check its version
|
||||
if [[ $(docker --version) =~ (([0-9]+)\.([0-9]+)([\.0-9]*)) ]]
|
||||
then
|
||||
docker_version=${BASH_REMATCH[1]}
|
||||
docker_version_part1=${BASH_REMATCH[2]}
|
||||
docker_version_part2=${BASH_REMATCH[3]}
|
||||
|
||||
# the version of docker does not meet the requirement
|
||||
if [ "$docker_version_part1" -lt 17 ] || ([ "$docker_version_part1" -eq 17 ] && [ "$docker_version_part2" -lt 6 ])
|
||||
then
|
||||
error "Need to upgrade docker package to 17.06.0+."
|
||||
exit 1
|
||||
else
|
||||
note "docker version: $docker_version"
|
||||
fi
|
||||
else
|
||||
error "Failed to parse docker version."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function check_dockercompose {
|
||||
if ! docker-compose --version &> /dev/null
|
||||
then
|
||||
error "Need to install docker-compose(1.18.0+) by yourself first and run this script again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# docker-compose has been installed, check its version
|
||||
if [[ $(docker-compose --version) =~ (([0-9]+)\.([0-9]+)([\.0-9]*)) ]]
|
||||
then
|
||||
docker_compose_version=${BASH_REMATCH[1]}
|
||||
docker_compose_version_part1=${BASH_REMATCH[2]}
|
||||
docker_compose_version_part2=${BASH_REMATCH[3]}
|
||||
|
||||
# the version of docker-compose does not meet the requirement
|
||||
if [ "$docker_compose_version_part1" -lt 1 ] || ([ "$docker_compose_version_part1" -eq 1 ] && [ "$docker_compose_version_part2" -lt 18 ])
|
||||
then
|
||||
error "Need to upgrade docker-compose package to 1.18.0+."
|
||||
exit 1
|
||||
else
|
||||
note "docker-compose version: $docker_compose_version"
|
||||
fi
|
||||
else
|
||||
error "Failed to parse docker-compose version."
|
||||
exit 1
|
||||
fi
|
||||
}
|
101
make/install.sh
@ -1,50 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
set +e
|
||||
set -o noglob
|
||||
|
||||
#
|
||||
# Set Colors
|
||||
#
|
||||
|
||||
bold=$(tput bold)
|
||||
underline=$(tput sgr 0 1)
|
||||
reset=$(tput sgr0)
|
||||
|
||||
red=$(tput setaf 1)
|
||||
green=$(tput setaf 76)
|
||||
white=$(tput setaf 7)
|
||||
tan=$(tput setaf 202)
|
||||
blue=$(tput setaf 25)
|
||||
|
||||
#
|
||||
# Headers and Logging
|
||||
#
|
||||
|
||||
underline() { printf "${underline}${bold}%s${reset}\n" "$@"
|
||||
}
|
||||
h1() { printf "\n${underline}${bold}${blue}%s${reset}\n" "$@"
|
||||
}
|
||||
h2() { printf "\n${underline}${bold}${white}%s${reset}\n" "$@"
|
||||
}
|
||||
debug() { printf "${white}%s${reset}\n" "$@"
|
||||
}
|
||||
info() { printf "${white}➜ %s${reset}\n" "$@"
|
||||
}
|
||||
success() { printf "${green}✔ %s${reset}\n" "$@"
|
||||
}
|
||||
error() { printf "${red}✖ %s${reset}\n" "$@"
|
||||
}
|
||||
warn() { printf "${tan}➜ %s${reset}\n" "$@"
|
||||
}
|
||||
bold() { printf "${bold}%s${reset}\n" "$@"
|
||||
}
|
||||
note() { printf "\n${underline}${bold}${blue}Note:${reset} ${blue}%s${reset}\n" "$@"
|
||||
}
|
||||
|
||||
set -e
|
||||
set +o noglob
|
||||
|
||||
DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
source $DIR/common.sh
|
||||
|
||||
usage=$'Please set hostname and other necessary attributes in harbor.yml first. DO NOT use localhost or 127.0.0.1 for hostname, because Harbor needs to be accessed by external clients.
|
||||
Please set --with-notary if needs enable Notary in Harbor, and set ui_url_protocol/ssl_cert/ssl_cert_key in harbor.yml bacause notary must run under https.
|
||||
Please set --with-clair if needs enable Clair in Harbor
|
||||
@ -86,62 +47,6 @@ then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function check_docker {
|
||||
if ! docker --version &> /dev/null
|
||||
then
|
||||
error "Need to install docker(17.06.0+) first and run this script again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# docker has been installed and check its version
|
||||
if [[ $(docker --version) =~ (([0-9]+).([0-9]+).([0-9]+)) ]]
|
||||
then
|
||||
docker_version=${BASH_REMATCH[1]}
|
||||
docker_version_part1=${BASH_REMATCH[2]}
|
||||
docker_version_part2=${BASH_REMATCH[3]}
|
||||
|
||||
# the version of docker does not meet the requirement
|
||||
if [ "$docker_version_part1" -lt 17 ] || ([ "$docker_version_part1" -eq 17 ] && [ "$docker_version_part2" -lt 6 ])
|
||||
then
|
||||
error "Need to upgrade docker package to 17.06.0+."
|
||||
exit 1
|
||||
else
|
||||
note "docker version: $docker_version"
|
||||
fi
|
||||
else
|
||||
error "Failed to parse docker version."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function check_dockercompose {
|
||||
if ! docker-compose --version &> /dev/null
|
||||
then
|
||||
error "Need to install docker-compose(1.18.0+) by yourself first and run this script again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# docker-compose has been installed, check its version
|
||||
if [[ $(docker-compose --version) =~ (([0-9]+).([0-9]+).([0-9]+)) ]]
|
||||
then
|
||||
docker_compose_version=${BASH_REMATCH[1]}
|
||||
docker_compose_version_part1=${BASH_REMATCH[2]}
|
||||
docker_compose_version_part2=${BASH_REMATCH[3]}
|
||||
|
||||
# the version of docker-compose does not meet the requirement
|
||||
if [ "$docker_compose_version_part1" -lt 1 ] || ([ "$docker_compose_version_part1" -eq 1 ] && [ "$docker_compose_version_part2" -lt 18 ])
|
||||
then
|
||||
error "Need to upgrade docker-compose package to 1.18.0+."
|
||||
exit 1
|
||||
else
|
||||
note "docker-compose version: $docker_compose_version"
|
||||
fi
|
||||
else
|
||||
error "Failed to parse docker-compose version."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
h2 "[Step $item]: checking installation environment ..."; let item+=1
|
||||
check_docker
|
||||
check_dockercompose
|
||||
|
@ -185,4 +185,4 @@ create table notification_policy (
|
||||
|
||||
ALTER TABLE replication_task ADD COLUMN status_revision int DEFAULT 0;
|
||||
DELETE FROM project_metadata WHERE deleted = TRUE;
|
||||
ALTER TABLE project_metadata DROP COLUMN deleted;
|
||||
ALTER TABLE project_metadata DROP COLUMN deleted;
|
||||
|
2
make/migrations/postgresql/0011_1.9.1_schema.up.sql
Normal file
@ -0,0 +1,2 @@
|
||||
ALTER TABLE harbor_user ADD COLUMN password_version varchar(16) Default 'sha256';
|
||||
UPDATE harbor_user SET password_version = 'sha1';
|
47
make/migrations/postgresql/0015_1.10.0_schema.up.sql
Normal file
@ -0,0 +1,47 @@
|
||||
/*Table for keeping the plug scanner registration*/
|
||||
CREATE TABLE scanner_registration
|
||||
(
|
||||
id SERIAL PRIMARY KEY NOT NULL,
|
||||
uuid VARCHAR(64) UNIQUE NOT NULL,
|
||||
url VARCHAR(256) UNIQUE NOT NULL,
|
||||
name VARCHAR(128) UNIQUE NOT NULL,
|
||||
description VARCHAR(1024) NULL,
|
||||
auth VARCHAR(16) NOT NULL,
|
||||
access_cred VARCHAR(512) NULL,
|
||||
disabled BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
is_default BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
skip_cert_verify BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
create_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
update_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
/*Table for keeping the scan report. The report details are stored as JSON*/
|
||||
CREATE TABLE scan_report
|
||||
(
|
||||
id SERIAL PRIMARY KEY NOT NULL,
|
||||
uuid VARCHAR(64) UNIQUE NOT NULL,
|
||||
digest VARCHAR(256) NOT NULL,
|
||||
registration_uuid VARCHAR(64) NOT NULL,
|
||||
mime_type VARCHAR(256) NOT NULL,
|
||||
job_id VARCHAR(64),
|
||||
track_id VARCHAR(64),
|
||||
status VARCHAR(1024) NOT NULL,
|
||||
status_code INTEGER DEFAULT 0,
|
||||
status_rev BIGINT DEFAULT 0,
|
||||
report JSON,
|
||||
start_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
end_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
UNIQUE(digest, registration_uuid, mime_type)
|
||||
);
|
||||
|
||||
/** Add table for immutable tag **/
|
||||
CREATE TABLE immutable_tag_rule
|
||||
(
|
||||
id SERIAL PRIMARY KEY NOT NULL,
|
||||
project_id int NOT NULL,
|
||||
tag_filter text,
|
||||
enabled boolean default true NOT NULL,
|
||||
creation_time timestamp default CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
ALTER TABLE robot ADD COLUMN visible boolean DEFAULT true NOT NULL;
|
@ -1,5 +1,5 @@
|
||||
# Makefile for a harbor project
|
||||
#
|
||||
#
|
||||
# Targets:
|
||||
#
|
||||
# build: build harbor photon images
|
||||
@ -109,20 +109,20 @@ _build_db:
|
||||
|
||||
_build_portal:
|
||||
@echo "building portal container for photon..."
|
||||
$(DOCKERBUILD) -f $(DOCKERFILEPATH_PORTAL)/$(DOCKERFILENAME_PORTAL) -t $(DOCKERIMAGENAME_PORTAL):$(VERSIONTAG) .
|
||||
$(DOCKERBUILD) --build-arg npm_registry=$(NPM_REGISTRY) -f $(DOCKERFILEPATH_PORTAL)/$(DOCKERFILENAME_PORTAL) -t $(DOCKERIMAGENAME_PORTAL):$(VERSIONTAG) .
|
||||
@echo "Done."
|
||||
|
||||
_build_core:
|
||||
_build_core:
|
||||
@echo "building core container for photon..."
|
||||
@$(DOCKERBUILD) -f $(DOCKERFILEPATH_CORE)/$(DOCKERFILENAME_CORE) -t $(DOCKERIMAGENAME_CORE):$(VERSIONTAG) .
|
||||
@echo "Done."
|
||||
|
||||
_build_jobservice:
|
||||
|
||||
_build_jobservice:
|
||||
@echo "building jobservice container for photon..."
|
||||
@$(DOCKERBUILD) -f $(DOCKERFILEPATH_JOBSERVICE)/$(DOCKERFILENAME_JOBSERVICE) -t $(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG) .
|
||||
@echo "Done."
|
||||
|
||||
_build_log:
|
||||
_build_log:
|
||||
@echo "building log container for photon..."
|
||||
$(DOCKERBUILD) -f $(DOCKERFILEPATH_LOG)/$(DOCKERFILENAME_LOG) -t $(DOCKERIMAGENAME_LOG):$(VERSIONTAG) .
|
||||
@echo "Done."
|
||||
@ -154,7 +154,7 @@ _build_chart_server:
|
||||
rm -rf $(DOCKERFILEPATH_CHART_SERVER)/binary; \
|
||||
echo "Done." ; \
|
||||
fi
|
||||
|
||||
|
||||
_build_nginx:
|
||||
@echo "building nginx container for photon..."
|
||||
@$(DOCKERBUILD) -f $(DOCKERFILEPATH_NGINX)/$(DOCKERFILENAME_NGINX) -t $(DOCKERIMAGENAME_NGINX):$(NGINXVERSION) .
|
||||
@ -175,7 +175,7 @@ _build_notary:
|
||||
rm -rf $(DOCKERFILEPATH_NOTARY)/binary; \
|
||||
echo "Done."; \
|
||||
fi
|
||||
|
||||
|
||||
_build_registry:
|
||||
@if [ "$(BUILDBIN)" != "true" ] ; then \
|
||||
rm -rf $(DOCKERFILEPATH_REG)/binary && mkdir -p $(DOCKERFILEPATH_REG)/binary && \
|
||||
@ -187,7 +187,7 @@ _build_registry:
|
||||
@chmod 655 $(DOCKERFILEPATH_REG)/binary/registry && $(DOCKERBUILD) -f $(DOCKERFILEPATH_REG)/$(DOCKERFILENAME_REG) -t $(DOCKERIMAGENAME_REG):$(REGISTRYVERSION)-$(VERSIONTAG) .
|
||||
@echo "Done."
|
||||
|
||||
_build_registryctl:
|
||||
_build_registryctl:
|
||||
@echo "building registry controller for photon..."
|
||||
@$(DOCKERBUILD) -f $(DOCKERFILEPATH_REGISTRYCTL)/$(DOCKERFILENAME_REGISTRYCTL) -t $(DOCKERIMAGENAME_REGISTRYCTL):$(VERSIONTAG) .
|
||||
@rm -rf $(DOCKERFILEPATH_REG)/binary
|
||||
@ -217,7 +217,7 @@ cleanimage:
|
||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_CORE):$(VERSIONTAG)
|
||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG)
|
||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_LOG):$(VERSIONTAG)
|
||||
|
||||
|
||||
.PHONY: clean
|
||||
clean: cleanimage
|
||||
|
||||
|
@ -1,20 +1,26 @@
|
||||
FROM node:10.15.0 as nodeportal
|
||||
|
||||
COPY src/portal /portal_src
|
||||
COPY ./docs/swagger.yaml /portal_src
|
||||
COPY ./LICENSE /portal_src
|
||||
|
||||
WORKDIR /build_dir
|
||||
|
||||
RUN cp -r /portal_src/* /build_dir \
|
||||
&& ls -la \
|
||||
&& apt-get update \
|
||||
ARG npm_registry=https://registry.npmjs.org
|
||||
ENV NPM_CONFIG_REGISTRY=${npm_registry}
|
||||
|
||||
COPY src/portal/package.json /build_dir
|
||||
COPY src/portal/package-lock.json /build_dir
|
||||
COPY ./docs/swagger.yaml /build_dir
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends python-yaml=3.12-1 \
|
||||
&& python -c 'import sys, yaml, json; y=yaml.load(sys.stdin.read()); print json.dumps(y)' < swagger.yaml > swagger.json \
|
||||
&& npm install \
|
||||
&& npm install
|
||||
|
||||
COPY ./LICENSE /build_dir
|
||||
COPY src/portal /build_dir
|
||||
|
||||
RUN ls -la \
|
||||
&& npm run build_lib \
|
||||
&& npm run link_lib \
|
||||
&& npm run release
|
||||
&& node --max_old_space_size=8192 'node_modules/@angular/cli/bin/ng' build --prod
|
||||
|
||||
|
||||
FROM photon:2.0
|
||||
|
@ -29,7 +29,7 @@ old_private_key_pem_path, old_crt_path)
|
||||
def main(conf, with_notary, with_clair, with_chartmuseum):
|
||||
|
||||
delfile(config_dir)
|
||||
config_dict = parse_yaml_config(conf)
|
||||
config_dict = parse_yaml_config(conf, with_notary=with_notary, with_clair=with_clair, with_chartmuseum=with_chartmuseum)
|
||||
validate(config_dict, notary_mode=with_notary)
|
||||
|
||||
prepare_log_configs(config_dict)
|
||||
|
@ -39,6 +39,10 @@ http {
|
||||
# disable any limits to avoid HTTP 413 for large image uploads
|
||||
client_max_body_size 0;
|
||||
|
||||
# Add extra headers
|
||||
add_header X-Frame-Options DENY;
|
||||
add_header Content-Security-Policy "frame-ancestors 'none'";
|
||||
|
||||
# costumized location config file can place to /etc/nginx/etc with prefix harbor.http. and suffix .conf
|
||||
include /etc/nginx/conf.d/harbor.http.*.conf;
|
||||
|
||||
|
@ -45,7 +45,7 @@ http {
|
||||
ssl_certificate_key {{ssl_cert_key}};
|
||||
|
||||
# Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
|
||||
ssl_protocols TLSv1.1 TLSv1.2;
|
||||
ssl_protocols TLSv1.2;
|
||||
ssl_ciphers '!aNULL:kECDH+AESGCM:ECDH+AESGCM:RSA+AESGCM:kECDH+AES:ECDH+AES:RSA+AES:';
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_session_cache shared:SSL:10m;
|
||||
@ -56,6 +56,11 @@ http {
|
||||
# required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486)
|
||||
chunked_transfer_encoding on;
|
||||
|
||||
# Add extra headers
|
||||
add_header Strict-Transport-Security "max-age=31536000; includeSubdomains; preload";
|
||||
add_header X-Frame-Options DENY;
|
||||
add_header Content-Security-Policy "frame-ancestors 'none'";
|
||||
|
||||
# costumized location config file can place to /etc/nginx dir with prefix harbor.https. and suffix .conf
|
||||
include /etc/nginx/conf.d/harbor.https.*.conf;
|
||||
|
||||
@ -68,8 +73,7 @@ http {
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# Add Secure flag when serving HTTPS
|
||||
proxy_cookie_path / "/; secure";
|
||||
proxy_cookie_path / "/; HttpOnly; Secure";
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
@ -83,7 +87,9 @@ http {
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
|
||||
proxy_cookie_path / "/; Secure";
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
}
|
||||
@ -96,6 +102,8 @@ http {
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_cookie_path / "/; Secure";
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
@ -109,6 +117,8 @@ http {
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_cookie_path / "/; Secure";
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
@ -139,6 +149,8 @@ http {
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_cookie_path / "/; Secure";
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ def parse_versions():
|
||||
versions = yaml.load(f)
|
||||
return versions
|
||||
|
||||
def parse_yaml_config(config_file_path):
|
||||
def parse_yaml_config(config_file_path, with_notary, with_clair, with_chartmuseum):
|
||||
'''
|
||||
:param configs: config_parser object
|
||||
:returns: dict of configs
|
||||
@ -117,27 +117,31 @@ def parse_yaml_config(config_file_path):
|
||||
config_dict['harbor_db_sslmode'] = 'disable'
|
||||
config_dict['harbor_db_max_idle_conns'] = db_configs.get("max_idle_conns") or default_db_max_idle_conns
|
||||
config_dict['harbor_db_max_open_conns'] = db_configs.get("max_open_conns") or default_db_max_open_conns
|
||||
# clari db
|
||||
config_dict['clair_db_host'] = 'postgresql'
|
||||
config_dict['clair_db_port'] = 5432
|
||||
config_dict['clair_db_name'] = 'postgres'
|
||||
config_dict['clair_db_username'] = 'postgres'
|
||||
config_dict['clair_db_password'] = db_configs.get("password") or ''
|
||||
config_dict['clair_db_sslmode'] = 'disable'
|
||||
# notary signer
|
||||
config_dict['notary_signer_db_host'] = 'postgresql'
|
||||
config_dict['notary_signer_db_port'] = 5432
|
||||
config_dict['notary_signer_db_name'] = 'notarysigner'
|
||||
config_dict['notary_signer_db_username'] = 'signer'
|
||||
config_dict['notary_signer_db_password'] = 'password'
|
||||
config_dict['notary_signer_db_sslmode'] = 'disable'
|
||||
# notary server
|
||||
config_dict['notary_server_db_host'] = 'postgresql'
|
||||
config_dict['notary_server_db_port'] = 5432
|
||||
config_dict['notary_server_db_name'] = 'notaryserver'
|
||||
config_dict['notary_server_db_username'] = 'server'
|
||||
config_dict['notary_server_db_password'] = 'password'
|
||||
config_dict['notary_server_db_sslmode'] = 'disable'
|
||||
|
||||
if with_clair:
|
||||
# clair db
|
||||
config_dict['clair_db_host'] = 'postgresql'
|
||||
config_dict['clair_db_port'] = 5432
|
||||
config_dict['clair_db_name'] = 'postgres'
|
||||
config_dict['clair_db_username'] = 'postgres'
|
||||
config_dict['clair_db_password'] = db_configs.get("password") or ''
|
||||
config_dict['clair_db_sslmode'] = 'disable'
|
||||
|
||||
if with_notary:
|
||||
# notary signer
|
||||
config_dict['notary_signer_db_host'] = 'postgresql'
|
||||
config_dict['notary_signer_db_port'] = 5432
|
||||
config_dict['notary_signer_db_name'] = 'notarysigner'
|
||||
config_dict['notary_signer_db_username'] = 'signer'
|
||||
config_dict['notary_signer_db_password'] = 'password'
|
||||
config_dict['notary_signer_db_sslmode'] = 'disable'
|
||||
# notary server
|
||||
config_dict['notary_server_db_host'] = 'postgresql'
|
||||
config_dict['notary_server_db_port'] = 5432
|
||||
config_dict['notary_server_db_name'] = 'notaryserver'
|
||||
config_dict['notary_server_db_username'] = 'server'
|
||||
config_dict['notary_server_db_password'] = 'password'
|
||||
config_dict['notary_server_db_sslmode'] = 'disable'
|
||||
|
||||
|
||||
# Data path volume
|
||||
@ -240,27 +244,30 @@ def parse_yaml_config(config_file_path):
|
||||
config_dict['harbor_db_sslmode'] = external_db_configs['harbor']['ssl_mode']
|
||||
config_dict['harbor_db_max_idle_conns'] = external_db_configs['harbor'].get("max_idle_conns") or default_db_max_idle_conns
|
||||
config_dict['harbor_db_max_open_conns'] = external_db_configs['harbor'].get("max_open_conns") or default_db_max_open_conns
|
||||
# clair db
|
||||
config_dict['clair_db_host'] = external_db_configs['clair']['host']
|
||||
config_dict['clair_db_port'] = external_db_configs['clair']['port']
|
||||
config_dict['clair_db_name'] = external_db_configs['clair']['db_name']
|
||||
config_dict['clair_db_username'] = external_db_configs['clair']['username']
|
||||
config_dict['clair_db_password'] = external_db_configs['clair']['password']
|
||||
config_dict['clair_db_sslmode'] = external_db_configs['clair']['ssl_mode']
|
||||
# notary signer
|
||||
config_dict['notary_signer_db_host'] = external_db_configs['notary_signer']['host']
|
||||
config_dict['notary_signer_db_port'] = external_db_configs['notary_signer']['port']
|
||||
config_dict['notary_signer_db_name'] = external_db_configs['notary_signer']['db_name']
|
||||
config_dict['notary_signer_db_username'] = external_db_configs['notary_signer']['username']
|
||||
config_dict['notary_signer_db_password'] = external_db_configs['notary_signer']['password']
|
||||
config_dict['notary_signer_db_sslmode'] = external_db_configs['notary_signer']['ssl_mode']
|
||||
# notary server
|
||||
config_dict['notary_server_db_host'] = external_db_configs['notary_server']['host']
|
||||
config_dict['notary_server_db_port'] = external_db_configs['notary_server']['port']
|
||||
config_dict['notary_server_db_name'] = external_db_configs['notary_server']['db_name']
|
||||
config_dict['notary_server_db_username'] = external_db_configs['notary_server']['username']
|
||||
config_dict['notary_server_db_password'] = external_db_configs['notary_server']['password']
|
||||
config_dict['notary_server_db_sslmode'] = external_db_configs['notary_server']['ssl_mode']
|
||||
|
||||
if with_clair:
|
||||
# clair db
|
||||
config_dict['clair_db_host'] = external_db_configs['clair']['host']
|
||||
config_dict['clair_db_port'] = external_db_configs['clair']['port']
|
||||
config_dict['clair_db_name'] = external_db_configs['clair']['db_name']
|
||||
config_dict['clair_db_username'] = external_db_configs['clair']['username']
|
||||
config_dict['clair_db_password'] = external_db_configs['clair']['password']
|
||||
config_dict['clair_db_sslmode'] = external_db_configs['clair']['ssl_mode']
|
||||
if with_notary:
|
||||
# notary signer
|
||||
config_dict['notary_signer_db_host'] = external_db_configs['notary_signer']['host']
|
||||
config_dict['notary_signer_db_port'] = external_db_configs['notary_signer']['port']
|
||||
config_dict['notary_signer_db_name'] = external_db_configs['notary_signer']['db_name']
|
||||
config_dict['notary_signer_db_username'] = external_db_configs['notary_signer']['username']
|
||||
config_dict['notary_signer_db_password'] = external_db_configs['notary_signer']['password']
|
||||
config_dict['notary_signer_db_sslmode'] = external_db_configs['notary_signer']['ssl_mode']
|
||||
# notary server
|
||||
config_dict['notary_server_db_host'] = external_db_configs['notary_server']['host']
|
||||
config_dict['notary_server_db_port'] = external_db_configs['notary_server']['port']
|
||||
config_dict['notary_server_db_name'] = external_db_configs['notary_server']['db_name']
|
||||
config_dict['notary_server_db_username'] = external_db_configs['notary_server']['username']
|
||||
config_dict['notary_server_db_password'] = external_db_configs['notary_server']['password']
|
||||
config_dict['notary_server_db_sslmode'] = external_db_configs['notary_server']['ssl_mode']
|
||||
else:
|
||||
config_dict['external_database'] = False
|
||||
|
||||
|
@ -22,6 +22,13 @@ cur=$PWD
|
||||
TEMP=`mktemp -d /$TMPDIR/distribution.XXXXXX`
|
||||
git clone -b $VERSION https://github.com/docker/distribution.git $TEMP
|
||||
|
||||
# add patch 2879
|
||||
echo 'add patch https://github.com/docker/distribution/pull/2879 ...'
|
||||
cd $TEMP
|
||||
wget https://github.com/docker/distribution/pull/2879.patch
|
||||
git apply 2879.patch
|
||||
cd $cur
|
||||
|
||||
echo 'build the registry binary bases on the golang:1.11...'
|
||||
cp Dockerfile.binary $TEMP
|
||||
docker build -f $TEMP/Dockerfile.binary -t registry-golang $TEMP
|
||||
|
@ -143,6 +143,7 @@ var (
|
||||
{Name: common.OIDCEndpoint, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}},
|
||||
{Name: common.OIDCCLientID, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}},
|
||||
{Name: common.OIDCClientSecret, Scope: UserScope, Group: OIDCGroup, ItemType: &PasswordType{}},
|
||||
{Name: common.OIDCGroupsClaim, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}},
|
||||
{Name: common.OIDCScope, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}},
|
||||
{Name: common.OIDCVerifyCert, Scope: UserScope, Group: OIDCGroup, DefaultValue: "true", ItemType: &BoolType{}},
|
||||
|
||||
|
@ -109,6 +109,7 @@ const (
|
||||
OIDCCLientID = "oidc_client_id"
|
||||
OIDCClientSecret = "oidc_client_secret"
|
||||
OIDCVerifyCert = "oidc_verify_cert"
|
||||
OIDCGroupsClaim = "oidc_groups_claim"
|
||||
OIDCScope = "oidc_scope"
|
||||
|
||||
DefaultClairEndpoint = "http://clair:6060"
|
||||
@ -125,13 +126,14 @@ const (
|
||||
DefaultNotaryEndpoint = "http://notary-server:4443"
|
||||
LDAPGroupType = 1
|
||||
HTTPGroupType = 2
|
||||
OIDCGroupType = 3
|
||||
LDAPGroupAdminDn = "ldap_group_admin_dn"
|
||||
LDAPGroupMembershipAttribute = "ldap_group_membership_attribute"
|
||||
DefaultRegistryControllerEndpoint = "http://registryctl:8080"
|
||||
WithChartMuseum = "with_chartmuseum"
|
||||
ChartRepoURL = "chart_repository_url"
|
||||
DefaultChartRepoURL = "http://chartmuseum:9999"
|
||||
DefaultPortalURL = "http://portal"
|
||||
DefaultPortalURL = "http://portal:8080"
|
||||
DefaultRegistryCtlURL = "http://registryctl:8080"
|
||||
DefaultClairHealthCheckServerURL = "http://clair:6061"
|
||||
// Use this prefix to distinguish harbor user, the prefix contains a special character($), so it cannot be registered as a harbor user.
|
||||
|
@ -324,7 +324,12 @@ func TestResetUserPassword(t *testing.T) {
|
||||
t.Errorf("Error occurred in UpdateUserResetUuid: %v", err)
|
||||
}
|
||||
|
||||
err = ResetUserPassword(models.User{UserID: currentUser.UserID, Password: "HarborTester12345", ResetUUID: uuid, Salt: currentUser.Salt})
|
||||
err = ResetUserPassword(
|
||||
models.User{
|
||||
UserID: currentUser.UserID,
|
||||
PasswordVersion: utils.SHA256,
|
||||
ResetUUID: uuid,
|
||||
Salt: currentUser.Salt}, "HarborTester12345")
|
||||
if err != nil {
|
||||
t.Errorf("Error occurred in ResetUserPassword: %v", err)
|
||||
}
|
||||
@ -346,7 +351,12 @@ func TestChangeUserPassword(t *testing.T) {
|
||||
t.Errorf("Error occurred when get user salt")
|
||||
}
|
||||
currentUser.Salt = query.Salt
|
||||
err = ChangeUserPassword(models.User{UserID: currentUser.UserID, Password: "NewHarborTester12345", Salt: currentUser.Salt})
|
||||
err = ChangeUserPassword(
|
||||
models.User{
|
||||
UserID: currentUser.UserID,
|
||||
Password: "NewHarborTester12345",
|
||||
PasswordVersion: utils.SHA256,
|
||||
Salt: currentUser.Salt})
|
||||
if err != nil {
|
||||
t.Errorf("Error occurred in ChangeUserPassword: %v", err)
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ func AddBlobsToProject(projectID int64, blobs ...*models.Blob) (int64, error) {
|
||||
})
|
||||
}
|
||||
|
||||
cnt, err := GetOrmer().InsertMulti(10, projectBlobs)
|
||||
cnt, err := GetOrmer().InsertMulti(100, projectBlobs)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
|
||||
return cnt, ErrDupRows
|
||||
@ -121,7 +121,7 @@ func CountSizeOfProject(pid int64) (int64, error) {
|
||||
var blobs []models.Blob
|
||||
|
||||
sql := `
|
||||
SELECT
|
||||
SELECT
|
||||
DISTINCT bb.digest,
|
||||
bb.id,
|
||||
bb.content_type,
|
||||
@ -132,7 +132,7 @@ JOIN artifact_blob afnb
|
||||
ON af.digest = afnb.digest_af
|
||||
JOIN BLOB bb
|
||||
ON afnb.digest_blob = bb.digest
|
||||
WHERE af.project_id = ?
|
||||
WHERE af.project_id = ?
|
||||
AND bb.content_type != ?
|
||||
`
|
||||
_, err := GetOrmer().Raw(sql, pid, common.ForeignLayer).QueryRows(&blobs)
|
||||
@ -152,7 +152,7 @@ AND bb.content_type != ?
|
||||
func RemoveUntaggedBlobs(pid int64) error {
|
||||
var blobs []models.Blob
|
||||
sql := `
|
||||
SELECT
|
||||
SELECT
|
||||
DISTINCT bb.digest,
|
||||
bb.id,
|
||||
bb.content_type,
|
||||
@ -163,7 +163,7 @@ JOIN artifact_blob afnb
|
||||
ON af.digest = afnb.digest_af
|
||||
JOIN BLOB bb
|
||||
ON afnb.digest_blob = bb.digest
|
||||
WHERE af.project_id = ?
|
||||
WHERE af.project_id = ?
|
||||
`
|
||||
_, err := GetOrmer().Raw(sql, pid).QueryRows(&blobs)
|
||||
if len(blobs) == 0 {
|
||||
|
@ -49,19 +49,20 @@ func TestAddBlobsToProject(t *testing.T) {
|
||||
OwnerID: 1,
|
||||
})
|
||||
require.Nil(t, err)
|
||||
defer DeleteProject(pid)
|
||||
|
||||
for i := 0; i < 88888; i++ {
|
||||
blobsCount := 88888
|
||||
for i := 0; i < blobsCount; i++ {
|
||||
blob := &models.Blob{
|
||||
ID: int64(100000 + i), // Use fake id to speed this test
|
||||
Digest: digest.FromString(utils.GenerateRandomString()).String(),
|
||||
Size: 100,
|
||||
}
|
||||
_, err := AddBlob(blob)
|
||||
require.Nil(t, err)
|
||||
blobs = append(blobs, blob)
|
||||
}
|
||||
cnt, err := AddBlobsToProject(pid, blobs...)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, cnt, int64(88888))
|
||||
require.Equal(t, cnt, int64(blobsCount))
|
||||
}
|
||||
|
||||
func TestHasBlobInProject(t *testing.T) {
|
||||
|
@ -29,10 +29,10 @@ func Register(user models.User) (int64, error) {
|
||||
now := time.Now()
|
||||
salt := utils.GenerateRandomString()
|
||||
sql := `insert into harbor_user
|
||||
(username, password, realname, email, comment, salt, sysadmin_flag, creation_time, update_time)
|
||||
values (?, ?, ?, ?, ?, ?, ?, ?, ?) RETURNING user_id`
|
||||
(username, password, password_version, realname, email, comment, salt, sysadmin_flag, creation_time, update_time)
|
||||
values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) RETURNING user_id`
|
||||
var userID int64
|
||||
err := o.Raw(sql, user.Username, utils.Encrypt(user.Password, salt), user.Realname, user.Email,
|
||||
err := o.Raw(sql, user.Username, utils.Encrypt(user.Password, salt, utils.SHA256), utils.SHA256, user.Realname, user.Email,
|
||||
user.Comment, salt, user.HasAdminRole, now, now).QueryRow(&userID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
@ -1,106 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dao
|
||||
|
||||
import (
|
||||
"github.com/astaxie/beego/orm"
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// AddRobot ...
|
||||
func AddRobot(robot *models.Robot) (int64, error) {
|
||||
now := time.Now()
|
||||
robot.CreationTime = now
|
||||
robot.UpdateTime = now
|
||||
id, err := GetOrmer().Insert(robot)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
|
||||
return 0, ErrDupRows
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// GetRobotByID ...
|
||||
func GetRobotByID(id int64) (*models.Robot, error) {
|
||||
robot := &models.Robot{
|
||||
ID: id,
|
||||
}
|
||||
if err := GetOrmer().Read(robot); err != nil {
|
||||
if err == orm.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return robot, nil
|
||||
}
|
||||
|
||||
// ListRobots list robots according to the query conditions
|
||||
func ListRobots(query *models.RobotQuery) ([]*models.Robot, error) {
|
||||
qs := getRobotQuerySetter(query).OrderBy("Name")
|
||||
if query != nil {
|
||||
if query.Size > 0 {
|
||||
qs = qs.Limit(query.Size)
|
||||
if query.Page > 0 {
|
||||
qs = qs.Offset((query.Page - 1) * query.Size)
|
||||
}
|
||||
}
|
||||
}
|
||||
robots := []*models.Robot{}
|
||||
_, err := qs.All(&robots)
|
||||
return robots, err
|
||||
}
|
||||
|
||||
func getRobotQuerySetter(query *models.RobotQuery) orm.QuerySeter {
|
||||
qs := GetOrmer().QueryTable(&models.Robot{})
|
||||
|
||||
if query == nil {
|
||||
return qs
|
||||
}
|
||||
|
||||
if len(query.Name) > 0 {
|
||||
if query.FuzzyMatchName {
|
||||
qs = qs.Filter("Name__icontains", query.Name)
|
||||
} else {
|
||||
qs = qs.Filter("Name", query.Name)
|
||||
}
|
||||
}
|
||||
if query.ProjectID != 0 {
|
||||
qs = qs.Filter("ProjectID", query.ProjectID)
|
||||
}
|
||||
return qs
|
||||
}
|
||||
|
||||
// CountRobot ...
|
||||
func CountRobot(query *models.RobotQuery) (int64, error) {
|
||||
return getRobotQuerySetter(query).Count()
|
||||
}
|
||||
|
||||
// UpdateRobot ...
|
||||
func UpdateRobot(robot *models.Robot) error {
|
||||
robot.UpdateTime = time.Now()
|
||||
_, err := GetOrmer().Update(robot)
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteRobot ...
|
||||
func DeleteRobot(id int64) error {
|
||||
_, err := GetOrmer().QueryTable(&models.Robot{}).Filter("ID", id).Delete()
|
||||
return err
|
||||
}
|
@ -1,159 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dao
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAddRobot(t *testing.T) {
|
||||
robotName := "test1"
|
||||
robot := &models.Robot{
|
||||
Name: robotName,
|
||||
Description: "test1 description",
|
||||
ProjectID: 1,
|
||||
}
|
||||
|
||||
// add
|
||||
id, err := AddRobot(robot)
|
||||
require.Nil(t, err)
|
||||
robot.ID = id
|
||||
|
||||
require.Nil(t, err)
|
||||
assert.NotNil(t, id)
|
||||
|
||||
}
|
||||
|
||||
func TestGetRobot(t *testing.T) {
|
||||
robotName := "test2"
|
||||
robot := &models.Robot{
|
||||
Name: robotName,
|
||||
Description: "test2 description",
|
||||
ProjectID: 1,
|
||||
}
|
||||
|
||||
// add
|
||||
id, err := AddRobot(robot)
|
||||
require.Nil(t, err)
|
||||
robot.ID = id
|
||||
|
||||
robot, err = GetRobotByID(id)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, robotName, robot.Name)
|
||||
|
||||
}
|
||||
|
||||
func TestListRobots(t *testing.T) {
|
||||
robotName := "test3"
|
||||
robot := &models.Robot{
|
||||
Name: robotName,
|
||||
Description: "test3 description",
|
||||
ProjectID: 1,
|
||||
}
|
||||
|
||||
_, err := AddRobot(robot)
|
||||
require.Nil(t, err)
|
||||
|
||||
robots, err := ListRobots(&models.RobotQuery{
|
||||
ProjectID: 1,
|
||||
})
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, 3, len(robots))
|
||||
|
||||
}
|
||||
|
||||
func TestDisableRobot(t *testing.T) {
|
||||
robotName := "test4"
|
||||
robot := &models.Robot{
|
||||
Name: robotName,
|
||||
Description: "test4 description",
|
||||
ProjectID: 1,
|
||||
}
|
||||
|
||||
// add
|
||||
id, err := AddRobot(robot)
|
||||
require.Nil(t, err)
|
||||
|
||||
// Disable
|
||||
robot.Disabled = true
|
||||
err = UpdateRobot(robot)
|
||||
require.Nil(t, err)
|
||||
|
||||
// Get
|
||||
robot, err = GetRobotByID(id)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, true, robot.Disabled)
|
||||
|
||||
}
|
||||
|
||||
func TestEnableRobot(t *testing.T) {
|
||||
robotName := "test5"
|
||||
robot := &models.Robot{
|
||||
Name: robotName,
|
||||
Description: "test5 description",
|
||||
Disabled: true,
|
||||
ProjectID: 1,
|
||||
}
|
||||
|
||||
// add
|
||||
id, err := AddRobot(robot)
|
||||
require.Nil(t, err)
|
||||
|
||||
// Disable
|
||||
robot.Disabled = false
|
||||
err = UpdateRobot(robot)
|
||||
require.Nil(t, err)
|
||||
|
||||
// Get
|
||||
robot, err = GetRobotByID(id)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, false, robot.Disabled)
|
||||
|
||||
}
|
||||
|
||||
func TestDeleteRobot(t *testing.T) {
|
||||
robotName := "test6"
|
||||
robot := &models.Robot{
|
||||
Name: robotName,
|
||||
Description: "test6 description",
|
||||
ProjectID: 1,
|
||||
}
|
||||
|
||||
// add
|
||||
id, err := AddRobot(robot)
|
||||
require.Nil(t, err)
|
||||
|
||||
// Disable
|
||||
err = DeleteRobot(id)
|
||||
require.Nil(t, err)
|
||||
|
||||
// Get
|
||||
robot, err = GetRobotByID(id)
|
||||
assert.Nil(t, robot)
|
||||
|
||||
}
|
||||
|
||||
func TestListAllRobot(t *testing.T) {
|
||||
|
||||
robots, err := ListRobots(nil)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, 5, len(robots))
|
||||
|
||||
}
|
@ -23,7 +23,6 @@ import (
|
||||
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/goharbor/harbor/src/common/utils"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
)
|
||||
|
||||
@ -32,7 +31,7 @@ func GetUser(query models.User) (*models.User, error) {
|
||||
|
||||
o := GetOrmer()
|
||||
|
||||
sql := `select user_id, username, password, email, realname, comment, reset_uuid, salt,
|
||||
sql := `select user_id, username, password, password_version, email, realname, comment, reset_uuid, salt,
|
||||
sysadmin_flag, creation_time, update_time
|
||||
from harbor_user u
|
||||
where deleted = false `
|
||||
@ -76,9 +75,9 @@ func GetUser(query models.User) (*models.User, error) {
|
||||
|
||||
// LoginByDb is used for user to login with database auth mode.
|
||||
func LoginByDb(auth models.AuthModel) (*models.User, error) {
|
||||
var users []models.User
|
||||
o := GetOrmer()
|
||||
|
||||
var users []models.User
|
||||
n, err := o.Raw(`select * from harbor_user where (username = ? or email = ?) and deleted = false`,
|
||||
auth.Principal, auth.Principal).QueryRows(&users)
|
||||
if err != nil {
|
||||
@ -90,12 +89,10 @@ func LoginByDb(auth models.AuthModel) (*models.User, error) {
|
||||
|
||||
user := users[0]
|
||||
|
||||
if user.Password != utils.Encrypt(auth.Password, user.Salt) {
|
||||
if !matchPassword(&user, auth.Password) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
user.Password = "" // do not return the password
|
||||
|
||||
return &user, nil
|
||||
}
|
||||
|
||||
@ -165,23 +162,34 @@ func ToggleUserAdminRole(userID int, hasAdmin bool) error {
|
||||
func ChangeUserPassword(u models.User) error {
|
||||
u.UpdateTime = time.Now()
|
||||
u.Salt = utils.GenerateRandomString()
|
||||
u.Password = utils.Encrypt(u.Password, u.Salt)
|
||||
_, err := GetOrmer().Update(&u, "Password", "Salt", "UpdateTime")
|
||||
u.Password = utils.Encrypt(u.Password, u.Salt, utils.SHA256)
|
||||
var err error
|
||||
if u.PasswordVersion == utils.SHA1 {
|
||||
u.PasswordVersion = utils.SHA256
|
||||
_, err = GetOrmer().Update(&u, "Password", "PasswordVersion", "Salt", "UpdateTime")
|
||||
} else {
|
||||
_, err = GetOrmer().Update(&u, "Password", "Salt", "UpdateTime")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ResetUserPassword ...
|
||||
func ResetUserPassword(u models.User) error {
|
||||
o := GetOrmer()
|
||||
r, err := o.Raw(`update harbor_user set password=?, reset_uuid=? where reset_uuid=?`, utils.Encrypt(u.Password, u.Salt), "", u.ResetUUID).Exec()
|
||||
func ResetUserPassword(u models.User, rawPassword string) error {
|
||||
var rowsAffected int64
|
||||
var err error
|
||||
u.UpdateTime = time.Now()
|
||||
u.Password = utils.Encrypt(rawPassword, u.Salt, utils.SHA256)
|
||||
u.ResetUUID = ""
|
||||
if u.PasswordVersion == utils.SHA1 {
|
||||
u.PasswordVersion = utils.SHA256
|
||||
rowsAffected, err = GetOrmer().Update(&u, "Password", "PasswordVersion", "ResetUUID", "UpdateTime")
|
||||
} else {
|
||||
rowsAffected, err = GetOrmer().Update(&u, "Password", "ResetUUID", "UpdateTime")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
count, err := r.RowsAffected()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if count == 0 {
|
||||
if rowsAffected == 0 {
|
||||
return errors.New("no record be changed, reset password failed")
|
||||
}
|
||||
return nil
|
||||
@ -282,3 +290,11 @@ func CleanUser(id int64) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MatchPassword returns true is password matched
|
||||
func matchPassword(u *models.User, password string) bool {
|
||||
if u.Password != utils.Encrypt(password, u.Salt, u.PasswordVersion) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
commonhttp "github.com/goharbor/harbor/src/common/http"
|
||||
@ -18,7 +19,9 @@ import (
|
||||
var (
|
||||
// GlobalClient is an instance of the default client that can be used globally
|
||||
// Notes: the client needs to be initialized before can be used
|
||||
GlobalClient Client
|
||||
GlobalClient Client
|
||||
statusBehindErrorPattern = "mismatch job status for stopping job: .*, job status (.*) is behind Running"
|
||||
statusBehindErrorReg = regexp.MustCompile(statusBehindErrorPattern)
|
||||
)
|
||||
|
||||
// Client wraps interface to access jobservice.
|
||||
@ -30,6 +33,21 @@ type Client interface {
|
||||
// TODO Redirect joblog when we see there's memory issue.
|
||||
}
|
||||
|
||||
// StatusBehindError represents the error got when trying to stop a success/failed job
|
||||
type StatusBehindError struct {
|
||||
status string
|
||||
}
|
||||
|
||||
// Error returns the detail message about the error
|
||||
func (s *StatusBehindError) Error() string {
|
||||
return "status behind error"
|
||||
}
|
||||
|
||||
// Status returns the current status of the job
|
||||
func (s *StatusBehindError) Status() string {
|
||||
return s.status
|
||||
}
|
||||
|
||||
// DefaultClient is the default implementation of Client interface
|
||||
type DefaultClient struct {
|
||||
endpoint string
|
||||
@ -156,5 +174,25 @@ func (d *DefaultClient) PostAction(uuid, action string) error {
|
||||
}{
|
||||
Action: action,
|
||||
}
|
||||
return d.client.Post(url, req)
|
||||
if err := d.client.Post(url, req); err != nil {
|
||||
status, flag := isStatusBehindError(err)
|
||||
if flag {
|
||||
return &StatusBehindError{
|
||||
status: status,
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isStatusBehindError(err error) (string, bool) {
|
||||
if err == nil {
|
||||
return "", false
|
||||
}
|
||||
strs := statusBehindErrorReg.FindStringSubmatch(err.Error())
|
||||
if len(strs) != 2 {
|
||||
return "", false
|
||||
}
|
||||
return strs[1], true
|
||||
}
|
||||
|
@ -1,11 +1,13 @@
|
||||
package job
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/job/models"
|
||||
"github.com/goharbor/harbor/src/common/job/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -62,3 +64,20 @@ func TestPostAction(t *testing.T) {
|
||||
err2 := testClient.PostAction(ID, "stop")
|
||||
assert.Nil(err2)
|
||||
}
|
||||
|
||||
func TestIsStatusBehindError(t *testing.T) {
|
||||
// nil error
|
||||
status, flag := isStatusBehindError(nil)
|
||||
assert.False(t, flag)
|
||||
|
||||
// not status behind error
|
||||
err := errors.New("not status behind error")
|
||||
status, flag = isStatusBehindError(err)
|
||||
assert.False(t, flag)
|
||||
|
||||
// status behind error
|
||||
err = errors.New("mismatch job status for stopping job: 9feedf9933jffs, job status Error is behind Running")
|
||||
status, flag = isStatusBehindError(err)
|
||||
assert.True(t, flag)
|
||||
assert.Equal(t, "Error", status)
|
||||
}
|
||||
|
@ -35,7 +35,6 @@ func init() {
|
||||
new(UserGroup),
|
||||
new(AdminJob),
|
||||
new(JobLog),
|
||||
new(Robot),
|
||||
new(OIDCUser),
|
||||
new(NotificationPolicy),
|
||||
new(NotificationJob),
|
||||
|
@ -82,6 +82,7 @@ type OIDCSetting struct {
|
||||
VerifyCert bool `json:"verify_cert"`
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
GroupsClaim string `json:"groups_claim"`
|
||||
RedirectURL string `json:"redirect_url"`
|
||||
Scope []string `json:"scope"`
|
||||
}
|
||||
|
@ -54,11 +54,11 @@ type RepositoryQuery struct {
|
||||
// TagResp holds the information of one image tag
|
||||
type TagResp struct {
|
||||
TagDetail
|
||||
Signature *model.Target `json:"signature"`
|
||||
ScanOverview *ImgScanOverview `json:"scan_overview,omitempty"`
|
||||
Labels []*Label `json:"labels"`
|
||||
PushTime time.Time `json:"push_time"`
|
||||
PullTime time.Time `json:"pull_time"`
|
||||
Signature *model.Target `json:"signature"`
|
||||
ScanOverview map[string]interface{} `json:"scan_overview,omitempty"`
|
||||
Labels []*Label `json:"labels"`
|
||||
PushTime time.Time `json:"push_time"`
|
||||
PullTime time.Time `json:"pull_time"`
|
||||
}
|
||||
|
||||
// TagDetail ...
|
||||
|
@ -23,14 +23,15 @@ const UserTable = "harbor_user"
|
||||
|
||||
// User holds the details of a user.
|
||||
type User struct {
|
||||
UserID int `orm:"pk;auto;column(user_id)" json:"user_id"`
|
||||
Username string `orm:"column(username)" json:"username"`
|
||||
Email string `orm:"column(email)" json:"email"`
|
||||
Password string `orm:"column(password)" json:"password"`
|
||||
Realname string `orm:"column(realname)" json:"realname"`
|
||||
Comment string `orm:"column(comment)" json:"comment"`
|
||||
Deleted bool `orm:"column(deleted)" json:"deleted"`
|
||||
Rolename string `orm:"-" json:"role_name"`
|
||||
UserID int `orm:"pk;auto;column(user_id)" json:"user_id"`
|
||||
Username string `orm:"column(username)" json:"username"`
|
||||
Email string `orm:"column(email)" json:"email"`
|
||||
Password string `orm:"column(password)" json:"password"`
|
||||
PasswordVersion string `orm:"column(password_version)" json:"password_version"`
|
||||
Realname string `orm:"column(realname)" json:"realname"`
|
||||
Comment string `orm:"column(comment)" json:"comment"`
|
||||
Deleted bool `orm:"column(deleted)" json:"deleted"`
|
||||
Rolename string `orm:"-" json:"role_name"`
|
||||
// if this field is named as "RoleID", beego orm can not map role_id
|
||||
// to it.
|
||||
Role int `orm:"-" json:"role_id"`
|
||||
|
@ -49,13 +49,15 @@ const (
|
||||
ResourceReplicationTask = Resource("replication-task")
|
||||
ResourceRepository = Resource("repository")
|
||||
ResourceTagRetention = Resource("tag-retention")
|
||||
ResourceImmutableTag = Resource("immutable-tag")
|
||||
ResourceRepositoryLabel = Resource("repository-label")
|
||||
ResourceRepositoryTag = Resource("repository-tag")
|
||||
ResourceRepositoryTagLabel = Resource("repository-tag-label")
|
||||
ResourceRepositoryTagManifest = Resource("repository-tag-manifest")
|
||||
ResourceRepositoryTagScanJob = Resource("repository-tag-scan-job")
|
||||
ResourceRepositoryTagVulnerability = Resource("repository-tag-vulnerability")
|
||||
ResourceRepositoryTagScanJob = Resource("repository-tag-scan-job") // TODO: remove
|
||||
ResourceRepositoryTagVulnerability = Resource("repository-tag-vulnerability") // TODO: remove
|
||||
ResourceRobot = Resource("robot")
|
||||
ResourceNotificationPolicy = Resource("notification-policy")
|
||||
ResourceScan = Resource("scan")
|
||||
ResourceSelf = Resource("") // subresource for self
|
||||
)
|
||||
|
@ -95,6 +95,11 @@ var (
|
||||
{Resource: rbac.ResourceTagRetention, Action: rbac.ActionList},
|
||||
{Resource: rbac.ResourceTagRetention, Action: rbac.ActionOperate},
|
||||
|
||||
{Resource: rbac.ResourceImmutableTag, Action: rbac.ActionCreate},
|
||||
{Resource: rbac.ResourceImmutableTag, Action: rbac.ActionUpdate},
|
||||
{Resource: rbac.ResourceImmutableTag, Action: rbac.ActionDelete},
|
||||
{Resource: rbac.ResourceImmutableTag, Action: rbac.ActionList},
|
||||
|
||||
{Resource: rbac.ResourceLabel, Action: rbac.ActionCreate},
|
||||
{Resource: rbac.ResourceLabel, Action: rbac.ActionRead},
|
||||
{Resource: rbac.ResourceLabel, Action: rbac.ActionUpdate},
|
||||
@ -157,6 +162,9 @@ var (
|
||||
{Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionDelete},
|
||||
{Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionList},
|
||||
{Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionRead},
|
||||
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionCreate},
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionRead},
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -68,6 +68,11 @@ var (
|
||||
{Resource: rbac.ResourceTagRetention, Action: rbac.ActionList},
|
||||
{Resource: rbac.ResourceTagRetention, Action: rbac.ActionOperate},
|
||||
|
||||
{Resource: rbac.ResourceImmutableTag, Action: rbac.ActionCreate},
|
||||
{Resource: rbac.ResourceImmutableTag, Action: rbac.ActionUpdate},
|
||||
{Resource: rbac.ResourceImmutableTag, Action: rbac.ActionDelete},
|
||||
{Resource: rbac.ResourceImmutableTag, Action: rbac.ActionList},
|
||||
|
||||
{Resource: rbac.ResourceRepositoryLabel, Action: rbac.ActionCreate},
|
||||
{Resource: rbac.ResourceRepositoryLabel, Action: rbac.ActionDelete},
|
||||
{Resource: rbac.ResourceRepositoryLabel, Action: rbac.ActionList},
|
||||
@ -114,6 +119,9 @@ var (
|
||||
{Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionDelete},
|
||||
{Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionList},
|
||||
{Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionRead},
|
||||
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionCreate},
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionRead},
|
||||
},
|
||||
|
||||
"master": {
|
||||
@ -153,6 +161,11 @@ var (
|
||||
{Resource: rbac.ResourceTagRetention, Action: rbac.ActionList},
|
||||
{Resource: rbac.ResourceTagRetention, Action: rbac.ActionOperate},
|
||||
|
||||
{Resource: rbac.ResourceImmutableTag, Action: rbac.ActionCreate},
|
||||
{Resource: rbac.ResourceImmutableTag, Action: rbac.ActionUpdate},
|
||||
{Resource: rbac.ResourceImmutableTag, Action: rbac.ActionDelete},
|
||||
{Resource: rbac.ResourceImmutableTag, Action: rbac.ActionList},
|
||||
|
||||
{Resource: rbac.ResourceRepositoryLabel, Action: rbac.ActionCreate},
|
||||
{Resource: rbac.ResourceRepositoryLabel, Action: rbac.ActionDelete},
|
||||
{Resource: rbac.ResourceRepositoryLabel, Action: rbac.ActionList},
|
||||
@ -191,6 +204,9 @@ var (
|
||||
{Resource: rbac.ResourceRobot, Action: rbac.ActionList},
|
||||
|
||||
{Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionList},
|
||||
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionCreate},
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionRead},
|
||||
},
|
||||
|
||||
"developer": {
|
||||
@ -241,6 +257,9 @@ var (
|
||||
|
||||
{Resource: rbac.ResourceRobot, Action: rbac.ActionRead},
|
||||
{Resource: rbac.ResourceRobot, Action: rbac.ActionList},
|
||||
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionCreate},
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionRead},
|
||||
},
|
||||
|
||||
"guest": {
|
||||
|
@ -110,6 +110,10 @@ func (p *Policy) GetEffect() string {
|
||||
return eft.String()
|
||||
}
|
||||
|
||||
func (p *Policy) String() string {
|
||||
return p.Resource.String() + ":" + p.Action.String() + ":" + p.GetEffect()
|
||||
}
|
||||
|
||||
// Role the interface of rbac role
|
||||
type Role interface {
|
||||
// GetRoleName returns the role identity, if empty string role's policies will be ignore
|
||||
|
@ -18,17 +18,18 @@ import (
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/goharbor/harbor/src/common/rbac"
|
||||
"github.com/goharbor/harbor/src/core/promgr"
|
||||
"github.com/goharbor/harbor/src/pkg/robot/model"
|
||||
)
|
||||
|
||||
// SecurityContext implements security.Context interface based on database
|
||||
type SecurityContext struct {
|
||||
robot *models.Robot
|
||||
robot *model.Robot
|
||||
pm promgr.ProjectManager
|
||||
policy []*rbac.Policy
|
||||
}
|
||||
|
||||
// NewSecurityContext ...
|
||||
func NewSecurityContext(robot *models.Robot, pm promgr.ProjectManager, policy []*rbac.Policy) *SecurityContext {
|
||||
func NewSecurityContext(robot *model.Robot, pm promgr.ProjectManager, policy []*rbac.Policy) *SecurityContext {
|
||||
return &SecurityContext{
|
||||
robot: robot,
|
||||
pm: pm,
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
"github.com/goharbor/harbor/src/core/promgr"
|
||||
"github.com/goharbor/harbor/src/core/promgr/pmsdriver/local"
|
||||
"github.com/goharbor/harbor/src/pkg/robot/model"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@ -96,7 +97,7 @@ func TestIsAuthenticated(t *testing.T) {
|
||||
assert.False(t, ctx.IsAuthenticated())
|
||||
|
||||
// authenticated
|
||||
ctx = NewSecurityContext(&models.Robot{
|
||||
ctx = NewSecurityContext(&model.Robot{
|
||||
Name: "test",
|
||||
Disabled: false,
|
||||
}, nil, nil)
|
||||
@ -109,7 +110,7 @@ func TestGetUsername(t *testing.T) {
|
||||
assert.Equal(t, "", ctx.GetUsername())
|
||||
|
||||
// authenticated
|
||||
ctx = NewSecurityContext(&models.Robot{
|
||||
ctx = NewSecurityContext(&model.Robot{
|
||||
Name: "test",
|
||||
Disabled: false,
|
||||
}, nil, nil)
|
||||
@ -122,7 +123,7 @@ func TestIsSysAdmin(t *testing.T) {
|
||||
assert.False(t, ctx.IsSysAdmin())
|
||||
|
||||
// authenticated, non admin
|
||||
ctx = NewSecurityContext(&models.Robot{
|
||||
ctx = NewSecurityContext(&model.Robot{
|
||||
Name: "test",
|
||||
Disabled: false,
|
||||
}, nil, nil)
|
||||
@ -141,7 +142,7 @@ func TestHasPullPerm(t *testing.T) {
|
||||
Action: rbac.ActionPull,
|
||||
},
|
||||
}
|
||||
robot := &models.Robot{
|
||||
robot := &model.Robot{
|
||||
Name: "test_robot_1",
|
||||
Description: "desc",
|
||||
}
|
||||
@ -158,7 +159,7 @@ func TestHasPushPerm(t *testing.T) {
|
||||
Action: rbac.ActionPush,
|
||||
},
|
||||
}
|
||||
robot := &models.Robot{
|
||||
robot := &model.Robot{
|
||||
Name: "test_robot_2",
|
||||
Description: "desc",
|
||||
}
|
||||
@ -179,7 +180,7 @@ func TestHasPushPullPerm(t *testing.T) {
|
||||
Action: rbac.ActionPull,
|
||||
},
|
||||
}
|
||||
robot := &models.Robot{
|
||||
robot := &model.Robot{
|
||||
Name: "test_robot_3",
|
||||
Description: "desc",
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
type robot struct {
|
||||
username string
|
||||
namespace rbac.Namespace
|
||||
policy []*rbac.Policy
|
||||
policies []*rbac.Policy
|
||||
}
|
||||
|
||||
// GetUserName get the robot name.
|
||||
@ -23,7 +23,7 @@ func (r *robot) GetPolicies() []*rbac.Policy {
|
||||
if r.namespace.IsPublic() {
|
||||
policies = append(policies, project.PoliciesForPublicProject(r.namespace)...)
|
||||
}
|
||||
policies = append(policies, r.policy...)
|
||||
policies = append(policies, r.policies...)
|
||||
return policies
|
||||
}
|
||||
|
||||
@ -33,10 +33,30 @@ func (r *robot) GetRoles() []rbac.Role {
|
||||
}
|
||||
|
||||
// NewRobot ...
|
||||
func NewRobot(username string, namespace rbac.Namespace, policy []*rbac.Policy) rbac.User {
|
||||
func NewRobot(username string, namespace rbac.Namespace, policies []*rbac.Policy) rbac.User {
|
||||
return &robot{
|
||||
username: username,
|
||||
namespace: namespace,
|
||||
policy: policy,
|
||||
policies: filterPolicies(namespace, policies),
|
||||
}
|
||||
}
|
||||
|
||||
func filterPolicies(namespace rbac.Namespace, policies []*rbac.Policy) []*rbac.Policy {
|
||||
var results []*rbac.Policy
|
||||
if len(policies) == 0 {
|
||||
return results
|
||||
}
|
||||
|
||||
mp := map[string]bool{}
|
||||
for _, policy := range project.GetAllPolicies(namespace) {
|
||||
mp[policy.String()] = true
|
||||
}
|
||||
|
||||
for _, policy := range policies {
|
||||
if mp[policy.String()] {
|
||||
results = append(results, policy)
|
||||
}
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
@ -33,10 +33,21 @@ func TestGetPolicies(t *testing.T) {
|
||||
robot := robot{
|
||||
username: "test",
|
||||
namespace: rbac.NewProjectNamespace(1, false),
|
||||
policy: policies,
|
||||
policies: policies,
|
||||
}
|
||||
|
||||
assert.Equal(t, robot.GetUserName(), "test")
|
||||
assert.NotNil(t, robot.GetPolicies())
|
||||
assert.Nil(t, robot.GetRoles())
|
||||
}
|
||||
|
||||
func TestNewRobot(t *testing.T) {
|
||||
policies := []*rbac.Policy{
|
||||
{Resource: "/project/1/repository", Action: "pull"},
|
||||
{Resource: "/project/library/repository", Action: "pull"},
|
||||
{Resource: "/project/library/repository", Action: "push"},
|
||||
}
|
||||
|
||||
robot := NewRobot("test", rbac.NewProjectNamespace(1, false), policies)
|
||||
assert.Len(t, robot.GetPolicies(), 1)
|
||||
}
|
||||
|
@ -19,25 +19,37 @@ import (
|
||||
"crypto/cipher"
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
)
|
||||
|
||||
// Encrypt encrypts the content with salt
|
||||
func Encrypt(content string, salt string) string {
|
||||
return fmt.Sprintf("%x", pbkdf2.Key([]byte(content), []byte(salt), 4096, 16, sha1.New))
|
||||
}
|
||||
|
||||
const (
|
||||
// EncryptHeaderV1 ...
|
||||
EncryptHeaderV1 = "<enc-v1>"
|
||||
// SHA1 is the name of sha1 hash alg
|
||||
SHA1 = "sha1"
|
||||
// SHA256 is the name of sha256 hash alg
|
||||
SHA256 = "sha256"
|
||||
)
|
||||
|
||||
// HashAlg used to get correct alg for hash
|
||||
var HashAlg = map[string]func() hash.Hash{
|
||||
SHA1: sha1.New,
|
||||
SHA256: sha256.New,
|
||||
}
|
||||
|
||||
// Encrypt encrypts the content with salt
|
||||
func Encrypt(content string, salt string, encrptAlg string) string {
|
||||
return fmt.Sprintf("%x", pbkdf2.Key([]byte(content), []byte(salt), 4096, 16, HashAlg[encrptAlg]))
|
||||
}
|
||||
|
||||
// ReversibleEncrypt encrypts the str with aes/base64
|
||||
func ReversibleEncrypt(str, key string) (string, error) {
|
||||
keyBytes := []byte(key)
|
||||
|
@ -444,7 +444,7 @@ func createGroupSearchFilter(oldFilter, groupName, groupNameAttribute string) st
|
||||
|
||||
func createNestedGroupFilter(userDN string) string {
|
||||
filter := ""
|
||||
filter = "(&(objectClass=group)(member:1.2.840.113556.1.4.1941:=" + userDN + "))"
|
||||
filter = "(&(objectClass=group)(member:1.2.840.113556.1.4.1941:=" + goldap.EscapeFilter(userDN) + "))"
|
||||
return filter
|
||||
}
|
||||
|
||||
|
@ -207,6 +207,45 @@ func RefreshToken(ctx context.Context, token *Token) (*Token, error) {
|
||||
return &Token{Token: *t, IDToken: it}, nil
|
||||
}
|
||||
|
||||
// GroupsFromToken returns the list of group name in the token, the claim of the group list is set in OIDCSetting.
|
||||
// It's designed not to return errors, in case of unexpected situation it will log and return empty list.
|
||||
func GroupsFromToken(token *gooidc.IDToken) []string {
|
||||
if token == nil {
|
||||
log.Warning("Return empty list for nil token")
|
||||
return []string{}
|
||||
}
|
||||
setting := provider.setting.Load().(models.OIDCSetting)
|
||||
if len(setting.GroupsClaim) == 0 {
|
||||
log.Warning("Group claim is not set in OIDC setting returning empty group list.")
|
||||
return []string{}
|
||||
}
|
||||
var c map[string]interface{}
|
||||
err := token.Claims(&c)
|
||||
if err != nil {
|
||||
log.Warningf("Failed to get claims map, error: %v", err)
|
||||
return []string{}
|
||||
}
|
||||
return groupsFromClaim(c, setting.GroupsClaim)
|
||||
}
|
||||
|
||||
func groupsFromClaim(claimMap map[string]interface{}, k string) []string {
|
||||
var res []string
|
||||
g, ok := claimMap[k].([]interface{})
|
||||
if !ok {
|
||||
log.Warningf("Unable to get groups from claims, claims: %+v, groups claim key: %s", claimMap, k)
|
||||
return res
|
||||
}
|
||||
for _, e := range g {
|
||||
s, ok := e.(string)
|
||||
if !ok {
|
||||
log.Warningf("Element in group list is not string: %v, list: %v", e, g)
|
||||
continue
|
||||
}
|
||||
res = append(res, s)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Conn wraps connection info of an OIDC endpoint
|
||||
type Conn struct {
|
||||
URL string `json:"url"`
|
||||
|
@ -15,6 +15,7 @@
|
||||
package oidc
|
||||
|
||||
import (
|
||||
gooidc "github.com/coreos/go-oidc"
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
config2 "github.com/goharbor/harbor/src/common/config"
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
@ -110,3 +111,50 @@ func TestTestEndpoint(t *testing.T) {
|
||||
assert.Nil(t, TestEndpoint(c1))
|
||||
assert.NotNil(t, TestEndpoint(c2))
|
||||
}
|
||||
|
||||
func TestGroupsFromToken(t *testing.T) {
|
||||
res := GroupsFromToken(nil)
|
||||
assert.Equal(t, []string{}, res)
|
||||
res = GroupsFromToken(&gooidc.IDToken{})
|
||||
assert.Equal(t, []string{}, res)
|
||||
}
|
||||
|
||||
func TestGroupsFromClaim(t *testing.T) {
|
||||
in := map[string]interface{}{
|
||||
"user": "user1",
|
||||
"groups": []interface{}{"group1", "group2"},
|
||||
"groups_2": []interface{}{"group1", "group2", 2},
|
||||
}
|
||||
|
||||
m := []struct {
|
||||
input map[string]interface{}
|
||||
key string
|
||||
expect []string
|
||||
}{
|
||||
{
|
||||
in,
|
||||
"user",
|
||||
nil,
|
||||
},
|
||||
{
|
||||
in,
|
||||
"prg",
|
||||
nil,
|
||||
},
|
||||
{
|
||||
in,
|
||||
"groups",
|
||||
[]string{"group1", "group2"},
|
||||
},
|
||||
{
|
||||
in,
|
||||
"groups_2",
|
||||
[]string{"group1", "group2"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range m {
|
||||
r := groupsFromClaim(tc.input, tc.key)
|
||||
assert.Equal(t, tc.expect, r)
|
||||
}
|
||||
}
|
||||
|
45
src/common/utils/registry/auth/apikey.go
Normal file
@ -0,0 +1,45 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/http/modifier"
|
||||
)
|
||||
|
||||
type apiKeyType = string
|
||||
|
||||
const (
|
||||
// APIKeyInHeader sets auth content in header
|
||||
APIKeyInHeader apiKeyType = "header"
|
||||
// APIKeyInQuery sets auth content in url query
|
||||
APIKeyInQuery apiKeyType = "query"
|
||||
)
|
||||
|
||||
type apiKeyAuthorizer struct {
|
||||
key, value, in apiKeyType
|
||||
}
|
||||
|
||||
// NewAPIKeyAuthorizer returns a apikey authorizer
|
||||
func NewAPIKeyAuthorizer(key, value, in apiKeyType) modifier.Modifier {
|
||||
return &apiKeyAuthorizer{
|
||||
key: key,
|
||||
value: value,
|
||||
in: in,
|
||||
}
|
||||
}
|
||||
|
||||
// Modify implements modifier.Modifier
|
||||
func (a *apiKeyAuthorizer) Modify(r *http.Request) error {
|
||||
switch a.in {
|
||||
case APIKeyInHeader:
|
||||
r.Header.Set(a.key, a.value)
|
||||
return nil
|
||||
case APIKeyInQuery:
|
||||
query := r.URL.Query()
|
||||
query.Add(a.key, a.value)
|
||||
r.URL.RawQuery = query.Encode()
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("set api key in %s is invalid", a.in)
|
||||
}
|
50
src/common/utils/registry/auth/apikey_test.go
Normal file
@ -0,0 +1,50 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/http/modifier"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAPIKeyAuthorizer(t *testing.T) {
|
||||
type suite struct {
|
||||
key string
|
||||
value string
|
||||
in string
|
||||
}
|
||||
|
||||
var (
|
||||
s suite
|
||||
authorizer modifier.Modifier
|
||||
request *http.Request
|
||||
err error
|
||||
)
|
||||
|
||||
// set in header
|
||||
s = suite{key: "Authorization", value: "Basic abc", in: "header"}
|
||||
authorizer = NewAPIKeyAuthorizer(s.key, s.value, s.in)
|
||||
request, err = http.NewRequest(http.MethodGet, "http://example.com", nil)
|
||||
assert.Nil(t, err)
|
||||
err = authorizer.Modify(request)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, s.value, request.Header.Get(s.key))
|
||||
|
||||
// set in query
|
||||
s = suite{key: "private_token", value: "abc", in: "query"}
|
||||
authorizer = NewAPIKeyAuthorizer(s.key, s.value, s.in)
|
||||
request, err = http.NewRequest(http.MethodGet, "http://example.com", nil)
|
||||
assert.Nil(t, err)
|
||||
err = authorizer.Modify(request)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, s.value, request.URL.Query().Get(s.key))
|
||||
|
||||
// set in invalid location
|
||||
s = suite{key: "", value: "", in: "invalid"}
|
||||
authorizer = NewAPIKeyAuthorizer(s.key, s.value, s.in)
|
||||
request, err = http.NewRequest(http.MethodGet, "http://example.com", nil)
|
||||
assert.Nil(t, err)
|
||||
err = authorizer.Modify(request)
|
||||
assert.NotNil(t, err)
|
||||
}
|
@ -89,7 +89,6 @@ func updateUserInitialPassword(userID int, password string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to update user encrypted password, userID: %d, err: %v", userID, err)
|
||||
}
|
||||
} else {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ package utils
|
||||
import (
|
||||
"encoding/base64"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
@ -91,12 +92,21 @@ func TestParseRepository(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEncrypt(t *testing.T) {
|
||||
content := "content"
|
||||
salt := "salt"
|
||||
result := Encrypt(content, salt)
|
||||
tests := map[string]struct {
|
||||
content string
|
||||
salt string
|
||||
alg string
|
||||
want string
|
||||
}{
|
||||
"sha1 test": {content: "content", salt: "salt", alg: SHA1, want: "dc79e76c88415c97eb089d9cc80b4ab0"},
|
||||
"sha256 test": {content: "content", salt: "salt", alg: SHA256, want: "83d3d6f3e7cacb040423adf7ced63d21"},
|
||||
}
|
||||
|
||||
if result != "dc79e76c88415c97eb089d9cc80b4ab0" {
|
||||
t.Errorf("unexpected result: %s != %s", result, "dc79e76c88415c97eb089d9cc80b4ab0")
|
||||
for name, tc := range tests {
|
||||
got := Encrypt(tc.content, tc.salt, tc.alg)
|
||||
if !reflect.DeepEqual(tc.want, got) {
|
||||
t.Errorf("%s: expected: %v, got: %v", name, tc.want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -62,9 +62,12 @@ func (aj *AJAPI) updateSchedule(ajr models.AdminJobReq) {
|
||||
|
||||
// stop the scheduled job and remove it.
|
||||
if err = utils_core.GetJobServiceClient().PostAction(jobs[0].UUID, common_job.JobActionStop); err != nil {
|
||||
if e, ok := err.(*common_http.Error); !ok || e.Code != http.StatusNotFound {
|
||||
aj.SendInternalServerError(err)
|
||||
return
|
||||
_, ok := err.(*common_job.StatusBehindError)
|
||||
if !ok {
|
||||
if e, ok := err.(*common_http.Error); !ok || e.Code != http.StatusNotFound {
|
||||
aj.SendInternalServerError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -180,6 +180,7 @@ func runCodeCheckingCases(t *testing.T, cases ...*codeCheckingCase) {
|
||||
if c.postFunc != nil {
|
||||
if err := c.postFunc(resp); err != nil {
|
||||
t.Logf("error in running post function: %v", err)
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"net/http"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
@ -37,6 +38,7 @@ import (
|
||||
|
||||
const (
|
||||
yamlFileContentType = "application/x-yaml"
|
||||
userSessionKey = "user"
|
||||
)
|
||||
|
||||
// the managers/controllers used globally
|
||||
@ -168,6 +170,12 @@ func (b *BaseController) WriteYamlData(object interface{}) {
|
||||
_, _ = w.Write(yData)
|
||||
}
|
||||
|
||||
// PopulateUserSession generates a new session ID and fill the user model in parm to the session
|
||||
func (b *BaseController) PopulateUserSession(u models.User) {
|
||||
b.SessionRegenerateID()
|
||||
b.SetSession(userSessionKey, u)
|
||||
}
|
||||
|
||||
// Init related objects/configurations for the API controllers
|
||||
func Init() error {
|
||||
registerHealthCheckers()
|
||||
|
@ -177,7 +177,8 @@ func init() {
|
||||
beego.Router("/api/projects/:pid([0-9]+)/webhook/policies/test", &NotificationPolicyAPI{}, "post:Test")
|
||||
beego.Router("/api/projects/:pid([0-9]+)/webhook/lasttrigger", &NotificationPolicyAPI{}, "get:ListGroupByEventType")
|
||||
beego.Router("/api/projects/:pid([0-9]+)/webhook/jobs/", &NotificationJobAPI{}, "get:List")
|
||||
|
||||
beego.Router("/api/projects/:pid([0-9]+)/immutabletagrules", &ImmutableTagRuleAPI{}, "get:List;post:Post")
|
||||
beego.Router("/api/projects/:pid([0-9]+)/immutabletagrules/:id([0-9]+)", &ImmutableTagRuleAPI{})
|
||||
// Charts are controlled under projects
|
||||
chartRepositoryAPIType := &ChartRepositoryAPI{}
|
||||
beego.Router("/api/chartrepo/health", chartRepositoryAPIType, "get:GetHealthStatus")
|
||||
@ -206,6 +207,22 @@ func init() {
|
||||
beego.Router("/api/internal/switchquota", &InternalAPI{}, "put:SwitchQuota")
|
||||
beego.Router("/api/internal/syncquota", &InternalAPI{}, "post:SyncQuota")
|
||||
|
||||
// Add routes for plugin scanner management
|
||||
scannerAPI := &ScannerAPI{}
|
||||
beego.Router("/api/scanners", scannerAPI, "post:Create;get:List")
|
||||
beego.Router("/api/scanners/:uuid", scannerAPI, "get:Get;delete:Delete;put:Update;patch:SetAsDefault")
|
||||
beego.Router("/api/scanners/:uuid/metadata", scannerAPI, "get:Metadata")
|
||||
beego.Router("/api/scanners/ping", scannerAPI, "post:Ping")
|
||||
|
||||
// Add routes for project level scanner
|
||||
proScannerAPI := &ProjectScannerAPI{}
|
||||
beego.Router("/api/projects/:pid([0-9]+)/scanner", proScannerAPI, "get:GetProjectScanner;put:SetProjectScanner")
|
||||
|
||||
// Add routes for scan
|
||||
scanAPI := &ScanAPI{}
|
||||
beego.Router("/api/repositories/*/tags/:tag/scan", scanAPI, "post:Scan;get:Report")
|
||||
beego.Router("/api/repositories/*/tags/:tag/scan/:uuid/log", scanAPI, "get:Log")
|
||||
|
||||
// syncRegistry
|
||||
if err := SyncRegistry(config.GlobalProjectMgr); err != nil {
|
||||
log.Fatalf("failed to sync repositories from registry: %v", err)
|
||||
|
135
src/core/api/immutabletagrule.go
Normal file
@ -0,0 +1,135 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/rbac"
|
||||
"github.com/goharbor/harbor/src/pkg/immutabletag"
|
||||
"github.com/goharbor/harbor/src/pkg/immutabletag/model"
|
||||
)
|
||||
|
||||
// ImmutableTagRuleAPI ...
|
||||
type ImmutableTagRuleAPI struct {
|
||||
BaseController
|
||||
ctr immutabletag.APIController
|
||||
projectID int64
|
||||
ID int64
|
||||
}
|
||||
|
||||
// Prepare validates the user and projectID
|
||||
func (itr *ImmutableTagRuleAPI) Prepare() {
|
||||
itr.BaseController.Prepare()
|
||||
if !itr.SecurityCtx.IsAuthenticated() {
|
||||
itr.SendUnAuthorizedError(errors.New("Unauthorized"))
|
||||
return
|
||||
}
|
||||
|
||||
pid, err := itr.GetInt64FromPath(":pid")
|
||||
if err != nil || pid <= 0 {
|
||||
text := "invalid project ID: "
|
||||
if err != nil {
|
||||
text += err.Error()
|
||||
} else {
|
||||
text += fmt.Sprintf("%d", pid)
|
||||
}
|
||||
itr.SendBadRequestError(errors.New(text))
|
||||
return
|
||||
}
|
||||
itr.projectID = pid
|
||||
|
||||
ruleID, err := itr.GetInt64FromPath(":id")
|
||||
if err == nil || ruleID > 0 {
|
||||
itr.ID = ruleID
|
||||
}
|
||||
|
||||
itr.ctr = immutabletag.ImmuCtr
|
||||
|
||||
if strings.EqualFold(itr.Ctx.Request.Method, "get") {
|
||||
if !itr.requireAccess(rbac.ActionList) {
|
||||
return
|
||||
}
|
||||
} else if strings.EqualFold(itr.Ctx.Request.Method, "put") {
|
||||
if !itr.requireAccess(rbac.ActionUpdate) {
|
||||
return
|
||||
}
|
||||
} else if strings.EqualFold(itr.Ctx.Request.Method, "post") {
|
||||
if !itr.requireAccess(rbac.ActionCreate) {
|
||||
return
|
||||
}
|
||||
|
||||
} else if strings.EqualFold(itr.Ctx.Request.Method, "delete") {
|
||||
if !itr.requireAccess(rbac.ActionDelete) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (itr *ImmutableTagRuleAPI) requireAccess(action rbac.Action) bool {
|
||||
return itr.RequireProjectAccess(itr.projectID, action, rbac.ResourceImmutableTag)
|
||||
}
|
||||
|
||||
// List list all immutable tag rules of current project
|
||||
func (itr *ImmutableTagRuleAPI) List() {
|
||||
rules, err := itr.ctr.ListImmutableRules(itr.projectID)
|
||||
if err != nil {
|
||||
itr.SendInternalServerError(err)
|
||||
return
|
||||
}
|
||||
itr.WriteJSONData(rules)
|
||||
}
|
||||
|
||||
// Post create immutable tag rule
|
||||
func (itr *ImmutableTagRuleAPI) Post() {
|
||||
ir := &model.Metadata{}
|
||||
isValid, err := itr.DecodeJSONReqAndValidate(ir)
|
||||
if !isValid {
|
||||
itr.SendBadRequestError(err)
|
||||
return
|
||||
}
|
||||
ir.ProjectID = itr.projectID
|
||||
id, err := itr.ctr.CreateImmutableRule(ir)
|
||||
if err != nil {
|
||||
itr.SendInternalServerError(err)
|
||||
return
|
||||
}
|
||||
itr.Redirect(http.StatusCreated, strconv.FormatInt(id, 10))
|
||||
|
||||
}
|
||||
|
||||
// Delete delete immutable tag rule
|
||||
func (itr *ImmutableTagRuleAPI) Delete() {
|
||||
if itr.ID <= 0 {
|
||||
itr.SendBadRequestError(fmt.Errorf("invalid immutable rule id %d", itr.ID))
|
||||
return
|
||||
}
|
||||
err := itr.ctr.DeleteImmutableRule(itr.ID)
|
||||
if err != nil {
|
||||
itr.SendInternalServerError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Put update an immutable tag rule
|
||||
func (itr *ImmutableTagRuleAPI) Put() {
|
||||
ir := &model.Metadata{}
|
||||
if err := itr.DecodeJSONReq(ir); err != nil {
|
||||
itr.SendBadRequestError(err)
|
||||
return
|
||||
}
|
||||
ir.ID = itr.ID
|
||||
ir.ProjectID = itr.projectID
|
||||
|
||||
if itr.ID <= 0 {
|
||||
itr.SendBadRequestError(fmt.Errorf("invalid immutable rule id %d", itr.ID))
|
||||
return
|
||||
}
|
||||
|
||||
if err := itr.ctr.UpdateImmutableRule(itr.projectID, ir); err != nil {
|
||||
itr.SendInternalServerError(err)
|
||||
return
|
||||
}
|
||||
}
|
335
src/core/api/immutabletagrule_test.go
Normal file
@ -0,0 +1,335 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/goharbor/harbor/src/pkg/immutabletag"
|
||||
"github.com/goharbor/harbor/src/pkg/immutabletag/model"
|
||||
)
|
||||
|
||||
func TestImmutableTagRuleAPI_List(t *testing.T) {
|
||||
|
||||
metadata := &model.Metadata{
|
||||
ProjectID: 1,
|
||||
Disabled: false,
|
||||
TagSelectors: []*model.Selector{
|
||||
{
|
||||
Kind: "doublestar",
|
||||
Decoration: "matches",
|
||||
Pattern: "release-[\\d\\.]+",
|
||||
},
|
||||
},
|
||||
ScopeSelectors: map[string][]*model.Selector{
|
||||
"repository": {
|
||||
{
|
||||
Kind: "doublestar",
|
||||
Decoration: "matches",
|
||||
Pattern: ".+",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
mgr := immutabletag.NewDefaultRuleManager()
|
||||
id, err := mgr.CreateImmutableRule(metadata)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer mgr.DeleteImmutableRule(id)
|
||||
cases := []*codeCheckingCase{
|
||||
// 401
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodGet,
|
||||
url: "/api/projects/1/immutabletagrules",
|
||||
},
|
||||
code: http.StatusUnauthorized,
|
||||
},
|
||||
// 200
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodGet,
|
||||
url: "/api/projects/1/immutabletagrules",
|
||||
credential: admin,
|
||||
},
|
||||
postFunc: func(responseRecorder *httptest.ResponseRecorder) error {
|
||||
var rules []model.Metadata
|
||||
err := json.Unmarshal([]byte(responseRecorder.Body.String()), &rules)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(rules) <= 0 {
|
||||
return fmt.Errorf("no rules found")
|
||||
}
|
||||
if rules[0].TagSelectors[0].Kind != "doublestar" {
|
||||
return fmt.Errorf("rule is not expected. actual: %v", responseRecorder.Body.String())
|
||||
}
|
||||
return nil
|
||||
},
|
||||
code: http.StatusOK,
|
||||
},
|
||||
// 200
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodGet,
|
||||
url: "/api/projects/1/immutabletagrules",
|
||||
credential: projAdmin,
|
||||
},
|
||||
code: http.StatusOK,
|
||||
},
|
||||
// 403
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodGet,
|
||||
url: "/api/projects/1/immutabletagrules",
|
||||
credential: projGuest,
|
||||
},
|
||||
code: http.StatusForbidden,
|
||||
},
|
||||
}
|
||||
runCodeCheckingCases(t, cases...)
|
||||
|
||||
}
|
||||
|
||||
func TestImmutableTagRuleAPI_Post(t *testing.T) {
|
||||
|
||||
// body := `{
|
||||
// "projectID":1,
|
||||
// "priority":0,
|
||||
// "template": "immutable_template",
|
||||
// "action": "immutable",
|
||||
// "disabled":false,
|
||||
// "action":"immutable",
|
||||
// "template":"immutable_template",
|
||||
// "tag_selectors":[{"kind":"doublestar","decoration":"matches","pattern":"**"}],
|
||||
// "scope_selectors":{"repository":[{"kind":"doublestar","decoration":"repoMatches","pattern":"**"}]}
|
||||
// }`
|
||||
|
||||
metadata := &model.Metadata{
|
||||
ProjectID: 1,
|
||||
Disabled: false,
|
||||
Priority: 0,
|
||||
Template: "immutable_template",
|
||||
Action: "immutable",
|
||||
TagSelectors: []*model.Selector{
|
||||
{
|
||||
Kind: "doublestar",
|
||||
Decoration: "matches",
|
||||
Pattern: "release-[\\d\\.]+",
|
||||
},
|
||||
},
|
||||
ScopeSelectors: map[string][]*model.Selector{
|
||||
"repository": {
|
||||
{
|
||||
Kind: "doublestar",
|
||||
Decoration: "matches",
|
||||
Pattern: ".+",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cases := []*codeCheckingCase{
|
||||
// 401
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodPost,
|
||||
url: "/api/projects/1/immutabletagrules",
|
||||
bodyJSON: metadata,
|
||||
},
|
||||
code: http.StatusUnauthorized,
|
||||
},
|
||||
// 201
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodPost,
|
||||
url: "/api/projects/1/immutabletagrules",
|
||||
credential: admin,
|
||||
bodyJSON: metadata,
|
||||
},
|
||||
code: http.StatusCreated,
|
||||
},
|
||||
// 201
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodPost,
|
||||
url: "/api/projects/1/immutabletagrules",
|
||||
credential: projAdmin,
|
||||
bodyJSON: metadata,
|
||||
},
|
||||
code: http.StatusCreated,
|
||||
},
|
||||
// 403
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodPost,
|
||||
url: "/api/projects/1/immutabletagrules",
|
||||
credential: projGuest,
|
||||
bodyJSON: metadata,
|
||||
},
|
||||
code: http.StatusForbidden,
|
||||
},
|
||||
}
|
||||
runCodeCheckingCases(t, cases...)
|
||||
|
||||
}
|
||||
|
||||
func TestImmutableTagRuleAPI_Put(t *testing.T) {
|
||||
|
||||
metadata := &model.Metadata{
|
||||
ProjectID: 1,
|
||||
Disabled: false,
|
||||
TagSelectors: []*model.Selector{
|
||||
{
|
||||
Kind: "doublestar",
|
||||
Decoration: "matches",
|
||||
Pattern: "release-[\\d\\.]+",
|
||||
},
|
||||
},
|
||||
ScopeSelectors: map[string][]*model.Selector{
|
||||
"repository": {
|
||||
{
|
||||
Kind: "doublestar",
|
||||
Decoration: "matches",
|
||||
Pattern: ".+",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
metadata2 := &model.Metadata{
|
||||
ProjectID: 1,
|
||||
Disabled: false,
|
||||
TagSelectors: []*model.Selector{
|
||||
{
|
||||
Kind: "doublestar",
|
||||
Decoration: "matches",
|
||||
Pattern: "latest",
|
||||
},
|
||||
},
|
||||
ScopeSelectors: map[string][]*model.Selector{
|
||||
"repository": {
|
||||
{
|
||||
Kind: "doublestar",
|
||||
Decoration: "matches",
|
||||
Pattern: ".+",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
mgr := immutabletag.NewDefaultRuleManager()
|
||||
id, err := mgr.CreateImmutableRule(metadata)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer mgr.DeleteImmutableRule(id)
|
||||
|
||||
url := fmt.Sprintf("/api/projects/1/immutabletagrules/%d", id)
|
||||
cases := []*codeCheckingCase{
|
||||
// 401
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodPut,
|
||||
url: url,
|
||||
bodyJSON: metadata2,
|
||||
},
|
||||
code: http.StatusUnauthorized,
|
||||
},
|
||||
// 200
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodPut,
|
||||
url: url,
|
||||
credential: admin,
|
||||
bodyJSON: metadata2,
|
||||
},
|
||||
code: http.StatusOK,
|
||||
},
|
||||
// 200
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodPut,
|
||||
url: url,
|
||||
credential: projAdmin,
|
||||
bodyJSON: metadata2,
|
||||
},
|
||||
code: http.StatusOK,
|
||||
},
|
||||
// 403
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodPut,
|
||||
url: url,
|
||||
credential: projGuest,
|
||||
bodyJSON: metadata2,
|
||||
},
|
||||
code: http.StatusForbidden,
|
||||
},
|
||||
}
|
||||
runCodeCheckingCases(t, cases...)
|
||||
}
|
||||
|
||||
func TestImmutableTagRuleAPI_Delete(t *testing.T) {
|
||||
metadata := &model.Metadata{
|
||||
ProjectID: 1,
|
||||
Disabled: false,
|
||||
TagSelectors: []*model.Selector{
|
||||
{
|
||||
Kind: "doublestar",
|
||||
Decoration: "matches",
|
||||
Pattern: "latest",
|
||||
},
|
||||
},
|
||||
ScopeSelectors: map[string][]*model.Selector{
|
||||
"repository": {
|
||||
{
|
||||
Kind: "doublestar",
|
||||
Decoration: "matches",
|
||||
Pattern: ".+",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
mgr := immutabletag.NewDefaultRuleManager()
|
||||
id, err := mgr.CreateImmutableRule(metadata)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer mgr.DeleteImmutableRule(id)
|
||||
|
||||
url := fmt.Sprintf("/api/projects/1/immutabletagrules/%d", id)
|
||||
|
||||
cases := []*codeCheckingCase{
|
||||
// 401
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodDelete,
|
||||
url: url,
|
||||
},
|
||||
code: http.StatusUnauthorized,
|
||||
},
|
||||
// 403
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodDelete,
|
||||
url: url,
|
||||
credential: projGuest,
|
||||
},
|
||||
code: http.StatusForbidden,
|
||||
},
|
||||
// 200
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodDelete,
|
||||
url: url,
|
||||
credential: projAdmin,
|
||||
},
|
||||
code: http.StatusOK,
|
||||
},
|
||||
}
|
||||
runCodeCheckingCases(t, cases...)
|
||||
}
|
@ -7,6 +7,8 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/goharbor/harbor/src/common/rbac"
|
||||
"github.com/goharbor/harbor/src/common/utils"
|
||||
@ -273,7 +275,8 @@ func (w *NotificationPolicyAPI) Test() {
|
||||
}
|
||||
|
||||
if err := notification.PolicyMgr.Test(policy); err != nil {
|
||||
w.SendBadRequestError(fmt.Errorf("notification policy %s test failed: %v", policy.Name, err))
|
||||
log.Errorf("notification policy %s test failed: %v", policy.Name, err)
|
||||
w.SendBadRequestError(fmt.Errorf("notification policy %s test failed", policy.Name))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -16,6 +16,7 @@ package api
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
"github.com/goharbor/harbor/src/common/utils/oidc"
|
||||
)
|
||||
@ -50,7 +51,7 @@ func (oa *OIDCAPI) Ping() {
|
||||
}
|
||||
if err := oidc.TestEndpoint(c); err != nil {
|
||||
log.Errorf("Failed to verify connection: %+v, err: %v", c, err)
|
||||
oa.SendBadRequestError(err)
|
||||
oa.SendBadRequestError(errors.New("failed to verify connection"))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
112
src/core/api/pro_scanner.go
Normal file
@ -0,0 +1,112 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"github.com/goharbor/harbor/src/common/rbac"
|
||||
"github.com/goharbor/harbor/src/pkg/scan/api/scanner"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProjectScannerAPI provides rest API for managing the project level scanner(s).
|
||||
type ProjectScannerAPI struct {
|
||||
// The base controller to provide common utilities
|
||||
BaseController
|
||||
// Scanner controller for operating scanner registrations.
|
||||
c scanner.Controller
|
||||
// ID of the project
|
||||
pid int64
|
||||
}
|
||||
|
||||
// Prepare sth. for the subsequent actions
|
||||
func (sa *ProjectScannerAPI) Prepare() {
|
||||
// Call super prepare method
|
||||
sa.BaseController.Prepare()
|
||||
|
||||
// Check access permissions
|
||||
if !sa.RequireAuthenticated() {
|
||||
return
|
||||
}
|
||||
|
||||
// Get ID of the project
|
||||
pid, err := sa.GetInt64FromPath(":pid")
|
||||
if err != nil {
|
||||
sa.SendBadRequestError(errors.Wrap(err, "project scanner API"))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if the project exists
|
||||
exists, err := sa.ProjectMgr.Exists(pid)
|
||||
if err != nil {
|
||||
sa.SendInternalServerError(errors.Wrap(err, "project scanner API"))
|
||||
return
|
||||
}
|
||||
|
||||
if !exists {
|
||||
sa.SendNotFoundError(errors.Errorf("project with id %d", sa.pid))
|
||||
return
|
||||
}
|
||||
|
||||
sa.pid = pid
|
||||
|
||||
sa.c = scanner.DefaultController
|
||||
}
|
||||
|
||||
// GetProjectScanner gets the project level scanner
|
||||
func (sa *ProjectScannerAPI) GetProjectScanner() {
|
||||
// Check access permissions
|
||||
if !sa.RequireProjectAccess(sa.pid, rbac.ActionRead, rbac.ResourceConfiguration) {
|
||||
return
|
||||
}
|
||||
|
||||
r, err := sa.c.GetRegistrationByProject(sa.pid)
|
||||
if err != nil {
|
||||
sa.SendInternalServerError(errors.Wrap(err, "scanner API: get project scanners"))
|
||||
return
|
||||
}
|
||||
|
||||
if r != nil {
|
||||
sa.Data["json"] = r
|
||||
} else {
|
||||
sa.Data["json"] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
sa.ServeJSON()
|
||||
}
|
||||
|
||||
// SetProjectScanner sets the project level scanner
|
||||
func (sa *ProjectScannerAPI) SetProjectScanner() {
|
||||
// Check access permissions
|
||||
if !sa.RequireProjectAccess(sa.pid, rbac.ActionUpdate, rbac.ResourceConfiguration) {
|
||||
return
|
||||
}
|
||||
|
||||
body := make(map[string]string)
|
||||
if err := sa.DecodeJSONReq(&body); err != nil {
|
||||
sa.SendBadRequestError(errors.Wrap(err, "scanner API: set project scanners"))
|
||||
return
|
||||
}
|
||||
|
||||
uuid, ok := body["uuid"]
|
||||
if !ok || len(uuid) == 0 {
|
||||
sa.SendBadRequestError(errors.New("missing scanner uuid when setting project scanner"))
|
||||
return
|
||||
}
|
||||
|
||||
if err := sa.c.SetRegistrationByProject(sa.pid, uuid); err != nil {
|
||||
sa.SendInternalServerError(errors.Wrap(err, "scanner API: set project scanners"))
|
||||
return
|
||||
}
|
||||
}
|
95
src/core/api/pro_scanner_test.go
Normal file
@ -0,0 +1,95 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/goharbor/harbor/src/pkg/scan/dao/scanner"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
sc "github.com/goharbor/harbor/src/pkg/scan/api/scanner"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
// ProScannerAPITestSuite is test suite for testing the project scanner API
|
||||
type ProScannerAPITestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
originC sc.Controller
|
||||
mockC *MockScannerAPIController
|
||||
}
|
||||
|
||||
// TestProScannerAPI is the entry of ProScannerAPITestSuite
|
||||
func TestProScannerAPI(t *testing.T) {
|
||||
suite.Run(t, new(ProScannerAPITestSuite))
|
||||
}
|
||||
|
||||
// SetupSuite prepares testing env
|
||||
func (suite *ProScannerAPITestSuite) SetupTest() {
|
||||
suite.originC = sc.DefaultController
|
||||
m := &MockScannerAPIController{}
|
||||
sc.DefaultController = m
|
||||
|
||||
suite.mockC = m
|
||||
}
|
||||
|
||||
// TearDownTest clears test case env
|
||||
func (suite *ProScannerAPITestSuite) TearDownTest() {
|
||||
// Restore
|
||||
sc.DefaultController = suite.originC
|
||||
}
|
||||
|
||||
// TestScannerAPIProjectScanner tests the API of getting/setting project level scanner
|
||||
func (suite *ProScannerAPITestSuite) TestScannerAPIProjectScanner() {
|
||||
suite.mockC.On("SetRegistrationByProject", int64(1), "uuid").Return(nil)
|
||||
|
||||
// Set
|
||||
body := make(map[string]interface{}, 1)
|
||||
body["uuid"] = "uuid"
|
||||
runCodeCheckingCases(suite.T(), &codeCheckingCase{
|
||||
request: &testingRequest{
|
||||
url: fmt.Sprintf("/api/projects/%d/scanner", 1),
|
||||
method: http.MethodPut,
|
||||
credential: projAdmin,
|
||||
bodyJSON: body,
|
||||
},
|
||||
code: http.StatusOK,
|
||||
})
|
||||
|
||||
r := &scanner.Registration{
|
||||
ID: 1004,
|
||||
UUID: "uuid",
|
||||
Name: "TestScannerAPIProjectScanner",
|
||||
Description: "JUST FOR TEST",
|
||||
URL: "https://a.b.c",
|
||||
}
|
||||
suite.mockC.On("GetRegistrationByProject", int64(1)).Return(r, nil)
|
||||
|
||||
// Get
|
||||
rr := &scanner.Registration{}
|
||||
err := handleAndParse(&testingRequest{
|
||||
url: fmt.Sprintf("/api/projects/%d/scanner", 1),
|
||||
method: http.MethodGet,
|
||||
credential: projAdmin,
|
||||
}, rr)
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
assert.Equal(suite.T(), r.Name, rr.Name)
|
||||
assert.Equal(suite.T(), r.UUID, rr.UUID)
|
||||
}
|
@ -234,6 +234,12 @@ func (p *ProjectAPI) Post() {
|
||||
|
||||
// Head ...
|
||||
func (p *ProjectAPI) Head() {
|
||||
|
||||
if !p.SecurityCtx.IsAuthenticated() {
|
||||
p.SendUnAuthorizedError(errors.New("Unauthorized"))
|
||||
return
|
||||
}
|
||||
|
||||
name := p.GetString("project_name")
|
||||
if len(name) == 0 {
|
||||
p.SendBadRequestError(errors.New("project_name is needed"))
|
||||
|
@ -329,13 +329,13 @@ func TestDeleteProject(t *testing.T) {
|
||||
|
||||
}
|
||||
func TestProHead(t *testing.T) {
|
||||
fmt.Println("\nTest for Project HEAD API")
|
||||
t.Log("\nTest for Project HEAD API")
|
||||
assert := assert.New(t)
|
||||
|
||||
apiTest := newHarborAPI()
|
||||
|
||||
// ----------------------------case 1 : Response Code=200----------------------------//
|
||||
fmt.Println("case 1: response code:200")
|
||||
t.Log("case 1: response code:200")
|
||||
httpStatusCode, err := apiTest.ProjectsHead(*admin, "library")
|
||||
if err != nil {
|
||||
t.Error("Error while search project by proName", err.Error())
|
||||
@ -345,7 +345,7 @@ func TestProHead(t *testing.T) {
|
||||
}
|
||||
|
||||
// ----------------------------case 2 : Response Code=404:Project name does not exist.----------------------------//
|
||||
fmt.Println("case 2: response code:404,Project name does not exist.")
|
||||
t.Log("case 2: response code:404,Project name does not exist.")
|
||||
httpStatusCode, err = apiTest.ProjectsHead(*admin, "libra")
|
||||
if err != nil {
|
||||
t.Error("Error while search project by proName", err.Error())
|
||||
@ -354,6 +354,24 @@ func TestProHead(t *testing.T) {
|
||||
assert.Equal(int(404), httpStatusCode, "httpStatusCode should be 404")
|
||||
}
|
||||
|
||||
t.Log("case 3: response code:401. Project exist with unauthenticated user")
|
||||
httpStatusCode, err = apiTest.ProjectsHead(*unknownUsr, "library")
|
||||
if err != nil {
|
||||
t.Error("Error while search project by proName", err.Error())
|
||||
t.Log(err)
|
||||
} else {
|
||||
assert.Equal(int(401), httpStatusCode, "httpStatusCode should be 404")
|
||||
}
|
||||
|
||||
t.Log("case 4: response code:401. Project name does not exist with unauthenticated user")
|
||||
httpStatusCode, err = apiTest.ProjectsHead(*unknownUsr, "libra")
|
||||
if err != nil {
|
||||
t.Error("Error while search project by proName", err.Error())
|
||||
t.Log(err)
|
||||
} else {
|
||||
assert.Equal(int(401), httpStatusCode, "httpStatusCode should be 404")
|
||||
}
|
||||
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
|
||||
|
@ -251,8 +251,8 @@ func AddProjectMember(projectID int64, request models.MemberReq) (int, error) {
|
||||
return 0, err
|
||||
}
|
||||
member.EntityID = groupID
|
||||
} else if len(request.MemberGroup.GroupName) > 0 && request.MemberGroup.GroupType == common.HTTPGroupType {
|
||||
ugs, err := group.QueryUserGroup(models.UserGroup{GroupName: request.MemberGroup.GroupName, GroupType: common.HTTPGroupType})
|
||||
} else if len(request.MemberGroup.GroupName) > 0 && request.MemberGroup.GroupType == common.HTTPGroupType || request.MemberGroup.GroupType == common.OIDCGroupType {
|
||||
ugs, err := group.QueryUserGroup(models.UserGroup{GroupName: request.MemberGroup.GroupName, GroupType: request.MemberGroup.GroupType})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -196,7 +196,7 @@ func TestProjectMemberAPI_Post(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
code: http.StatusBadRequest,
|
||||
code: http.StatusInternalServerError,
|
||||
},
|
||||
{
|
||||
request: &testingRequest{
|
||||
@ -241,7 +241,7 @@ func TestProjectMemberAPI_Post(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
code: http.StatusBadRequest,
|
||||
code: http.StatusInternalServerError,
|
||||
},
|
||||
}
|
||||
runCodeCheckingCases(t, cases...)
|
||||
|
@ -284,6 +284,9 @@ func persistPB(projects []quota.ProjectInfo) error {
|
||||
}
|
||||
_, err = dao.AddBlobsToProject(pro.ProjectID, blobsOfPro...)
|
||||
if err != nil {
|
||||
if err == dao.ErrDupRows {
|
||||
continue
|
||||
}
|
||||
log.Error(err)
|
||||
return err
|
||||
}
|
||||
|
@ -25,6 +25,11 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
|
||||
"github.com/goharbor/harbor/src/pkg/scan/api/scan"
|
||||
v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1"
|
||||
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
@ -40,7 +45,6 @@ import (
|
||||
"github.com/goharbor/harbor/src/core/config"
|
||||
notifierEvt "github.com/goharbor/harbor/src/core/notifier/event"
|
||||
coreutils "github.com/goharbor/harbor/src/core/utils"
|
||||
"github.com/goharbor/harbor/src/pkg/scan"
|
||||
"github.com/goharbor/harbor/src/replication"
|
||||
"github.com/goharbor/harbor/src/replication/event"
|
||||
"github.com/goharbor/harbor/src/replication/model"
|
||||
@ -397,6 +401,13 @@ func (ra *RepositoryAPI) GetTag() {
|
||||
return
|
||||
}
|
||||
|
||||
project, err := ra.ProjectMgr.Get(projectName)
|
||||
if err != nil {
|
||||
ra.ParseAndHandleError(fmt.Sprintf("failed to get the project %s",
|
||||
projectName), err)
|
||||
return
|
||||
}
|
||||
|
||||
client, err := coreutils.NewRepositoryClientForUI(ra.SecurityCtx.GetUsername(), repository)
|
||||
if err != nil {
|
||||
ra.SendInternalServerError(fmt.Errorf("failed to initialize the client for %s: %v",
|
||||
@ -414,7 +425,7 @@ func (ra *RepositoryAPI) GetTag() {
|
||||
return
|
||||
}
|
||||
|
||||
result := assembleTagsInParallel(client, repository, []string{tag},
|
||||
result := assembleTagsInParallel(client, project.ProjectID, repository, []string{tag},
|
||||
ra.SecurityCtx.GetUsername())
|
||||
ra.Data["json"] = result[0]
|
||||
ra.ServeJSON()
|
||||
@ -523,14 +534,14 @@ func (ra *RepositoryAPI) GetTags() {
|
||||
}
|
||||
|
||||
projectName, _ := utils.ParseRepository(repoName)
|
||||
exist, err := ra.ProjectMgr.Exists(projectName)
|
||||
project, err := ra.ProjectMgr.Get(projectName)
|
||||
if err != nil {
|
||||
ra.ParseAndHandleError(fmt.Sprintf("failed to check the existence of project %s",
|
||||
ra.ParseAndHandleError(fmt.Sprintf("failed to get the project %s",
|
||||
projectName), err)
|
||||
return
|
||||
}
|
||||
|
||||
if !exist {
|
||||
if project == nil {
|
||||
ra.SendNotFoundError(fmt.Errorf("project %s not found", projectName))
|
||||
return
|
||||
}
|
||||
@ -587,8 +598,13 @@ func (ra *RepositoryAPI) GetTags() {
|
||||
return
|
||||
}
|
||||
|
||||
ra.Data["json"] = assembleTagsInParallel(client, repoName, tags,
|
||||
ra.SecurityCtx.GetUsername())
|
||||
ra.Data["json"] = assembleTagsInParallel(
|
||||
client,
|
||||
project.ProjectID,
|
||||
repoName,
|
||||
tags,
|
||||
ra.SecurityCtx.GetUsername(),
|
||||
)
|
||||
ra.ServeJSON()
|
||||
}
|
||||
|
||||
@ -607,7 +623,7 @@ func simpleTags(tags []string) []*models.TagResp {
|
||||
|
||||
// get config, signature and scan overview and assemble them into one
|
||||
// struct for each tag in tags
|
||||
func assembleTagsInParallel(client *registry.Repository, repository string,
|
||||
func assembleTagsInParallel(client *registry.Repository, projectID int64, repository string,
|
||||
tags []string, username string) []*models.TagResp {
|
||||
var err error
|
||||
signatures := map[string][]notarymodel.Target{}
|
||||
@ -621,8 +637,15 @@ func assembleTagsInParallel(client *registry.Repository, repository string,
|
||||
|
||||
c := make(chan *models.TagResp)
|
||||
for _, tag := range tags {
|
||||
go assembleTag(c, client, repository, tag, config.WithClair(),
|
||||
config.WithNotary(), signatures)
|
||||
go assembleTag(
|
||||
c,
|
||||
client,
|
||||
projectID,
|
||||
repository,
|
||||
tag,
|
||||
config.WithNotary(),
|
||||
signatures,
|
||||
)
|
||||
}
|
||||
result := []*models.TagResp{}
|
||||
var item *models.TagResp
|
||||
@ -636,8 +659,8 @@ func assembleTagsInParallel(client *registry.Repository, repository string,
|
||||
return result
|
||||
}
|
||||
|
||||
func assembleTag(c chan *models.TagResp, client *registry.Repository,
|
||||
repository, tag string, clairEnabled, notaryEnabled bool,
|
||||
func assembleTag(c chan *models.TagResp, client *registry.Repository, projectID int64,
|
||||
repository, tag string, notaryEnabled bool,
|
||||
signatures map[string][]notarymodel.Target) {
|
||||
item := &models.TagResp{}
|
||||
// labels
|
||||
@ -659,8 +682,9 @@ func assembleTag(c chan *models.TagResp, client *registry.Repository,
|
||||
}
|
||||
|
||||
// scan overview
|
||||
if clairEnabled {
|
||||
item.ScanOverview = getScanOverview(item.Digest, item.Name)
|
||||
so := getSummary(projectID, repository, item.Digest)
|
||||
if len(so) > 0 {
|
||||
item.ScanOverview = so
|
||||
}
|
||||
|
||||
// signature, compare both digest and tag
|
||||
@ -968,73 +992,6 @@ func (ra *RepositoryAPI) GetSignatures() {
|
||||
ra.ServeJSON()
|
||||
}
|
||||
|
||||
// ScanImage handles request POST /api/repository/$repository/tags/$tag/scan to trigger image scan manually.
|
||||
func (ra *RepositoryAPI) ScanImage() {
|
||||
if !config.WithClair() {
|
||||
log.Warningf("Harbor is not deployed with Clair, scan is disabled.")
|
||||
ra.SendInternalServerError(errors.New("harbor is not deployed with Clair, scan is disabled"))
|
||||
return
|
||||
}
|
||||
repoName := ra.GetString(":splat")
|
||||
tag := ra.GetString(":tag")
|
||||
projectName, _ := utils.ParseRepository(repoName)
|
||||
exist, err := ra.ProjectMgr.Exists(projectName)
|
||||
if err != nil {
|
||||
ra.ParseAndHandleError(fmt.Sprintf("failed to check the existence of project %s",
|
||||
projectName), err)
|
||||
return
|
||||
}
|
||||
if !exist {
|
||||
ra.SendNotFoundError(fmt.Errorf("project %s not found", projectName))
|
||||
return
|
||||
}
|
||||
if !ra.SecurityCtx.IsAuthenticated() {
|
||||
ra.SendUnAuthorizedError(errors.New("Unauthorized"))
|
||||
return
|
||||
}
|
||||
|
||||
if !ra.RequireProjectAccess(projectName, rbac.ActionCreate, rbac.ResourceRepositoryTagScanJob) {
|
||||
return
|
||||
}
|
||||
err = coreutils.TriggerImageScan(repoName, tag)
|
||||
if err != nil {
|
||||
log.Errorf("Error while calling job service to trigger image scan: %v", err)
|
||||
ra.SendInternalServerError(errors.New("Failed to scan image, please check log for details"))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// VulnerabilityDetails fetch vulnerability info from clair, transform to Harbor's format and return to client.
|
||||
func (ra *RepositoryAPI) VulnerabilityDetails() {
|
||||
if !config.WithClair() {
|
||||
log.Warningf("Harbor is not deployed with Clair, it's not impossible to get vulnerability details.")
|
||||
ra.SendInternalServerError(errors.New("harbor is not deployed with Clair, it's not impossible to get vulnerability details"))
|
||||
return
|
||||
}
|
||||
repository := ra.GetString(":splat")
|
||||
tag := ra.GetString(":tag")
|
||||
exist, digest, err := ra.checkExistence(repository, tag)
|
||||
if err != nil {
|
||||
ra.SendInternalServerError(fmt.Errorf("failed to check the existence of resource, error: %v", err))
|
||||
return
|
||||
}
|
||||
if !exist {
|
||||
ra.SendNotFoundError(fmt.Errorf("resource: %s:%s not found", repository, tag))
|
||||
return
|
||||
}
|
||||
|
||||
projectName, _ := utils.ParseRepository(repository)
|
||||
if !ra.RequireProjectAccess(projectName, rbac.ActionList, rbac.ResourceRepositoryTagVulnerability) {
|
||||
return
|
||||
}
|
||||
res, err := scan.VulnListByDigest(digest)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get vulnerability list for image: %s:%s", repository, tag)
|
||||
}
|
||||
ra.Data["json"] = res
|
||||
ra.ServeJSON()
|
||||
}
|
||||
|
||||
func getSignatures(username, repository string) (map[string][]notarymodel.Target, error) {
|
||||
targets, err := notary.GetInternalTargets(config.InternalNotaryEndpoint(),
|
||||
username, repository)
|
||||
@ -1079,33 +1036,19 @@ func (ra *RepositoryAPI) checkExistence(repository, tag string) (bool, string, e
|
||||
return true, digest, nil
|
||||
}
|
||||
|
||||
// will return nil when it failed to get data. The parm "tag" is for logging only.
|
||||
func getScanOverview(digest string, tag string) *models.ImgScanOverview {
|
||||
if len(digest) == 0 {
|
||||
log.Debug("digest is nil")
|
||||
return nil
|
||||
func getSummary(pid int64, repository string, digest string) map[string]interface{} {
|
||||
// At present, only get harbor native report as default behavior.
|
||||
artifact := &v1.Artifact{
|
||||
NamespaceID: pid,
|
||||
Repository: repository,
|
||||
Digest: digest,
|
||||
MimeType: v1.MimeTypeDockerArtifact,
|
||||
}
|
||||
data, err := dao.GetImgScanOverview(digest)
|
||||
|
||||
sum, err := scan.DefaultController.GetSummary(artifact, []string{v1.MimeTypeNativeReport})
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get scan result for tag:%s, digest: %s, error: %v", tag, digest, err)
|
||||
logger.Errorf("Failed to get scan report summary with error: %s", err)
|
||||
}
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
job, err := dao.GetScanJob(data.JobID)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get scan job for id:%d, error: %v", data.JobID, err)
|
||||
return nil
|
||||
} else if job == nil { // job does not exist
|
||||
log.Errorf("The scan job with id: %d does not exist, returning nil", data.JobID)
|
||||
return nil
|
||||
}
|
||||
data.Status = job.Status
|
||||
if data.Status != models.JobFinished {
|
||||
log.Debugf("Unsetting vulnerable related historical values, job status: %s", data.Status)
|
||||
data.Sev = 0
|
||||
data.CompOverview = nil
|
||||
data.DetailsKey = ""
|
||||
}
|
||||
return data
|
||||
|
||||
return sum
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ func TestGetRepos(t *testing.T) {
|
||||
} else {
|
||||
assert.Equal(int(200), code, "response code should be 200")
|
||||
if repos, ok := repositories.([]repoResp); ok {
|
||||
assert.Equal(int(1), len(repos), "the length of repositories should be 1")
|
||||
require.Equal(t, int(1), len(repos), "the length of repositories should be 1")
|
||||
assert.Equal(repos[0].Name, "library/hello-world", "unexpected repository name")
|
||||
} else {
|
||||
t.Error("unexpected response")
|
||||
|
@ -15,29 +15,30 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
"github.com/goharbor/harbor/src/common/dao"
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/goharbor/harbor/src/common/rbac"
|
||||
"github.com/goharbor/harbor/src/common/token"
|
||||
"github.com/goharbor/harbor/src/core/config"
|
||||
"github.com/goharbor/harbor/src/common/rbac/project"
|
||||
"github.com/goharbor/harbor/src/pkg/q"
|
||||
"github.com/goharbor/harbor/src/pkg/robot"
|
||||
"github.com/goharbor/harbor/src/pkg/robot/model"
|
||||
"github.com/pkg/errors"
|
||||
"net/http"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// RobotAPI ...
|
||||
type RobotAPI struct {
|
||||
BaseController
|
||||
project *models.Project
|
||||
robot *models.Robot
|
||||
ctr robot.Controller
|
||||
robot *model.Robot
|
||||
}
|
||||
|
||||
// Prepare ...
|
||||
func (r *RobotAPI) Prepare() {
|
||||
|
||||
r.BaseController.Prepare()
|
||||
method := r.Ctx.Request.Method
|
||||
|
||||
@ -67,6 +68,7 @@ func (r *RobotAPI) Prepare() {
|
||||
return
|
||||
}
|
||||
r.project = project
|
||||
r.ctr = robot.RobotCtr
|
||||
|
||||
if method == http.MethodPut || method == http.MethodDelete {
|
||||
id, err := r.GetInt64FromPath(":id")
|
||||
@ -74,8 +76,7 @@ func (r *RobotAPI) Prepare() {
|
||||
r.SendBadRequestError(errors.New("invalid robot ID"))
|
||||
return
|
||||
}
|
||||
|
||||
robot, err := dao.GetRobotByID(id)
|
||||
robot, err := r.ctr.GetRobotAccount(id)
|
||||
if err != nil {
|
||||
r.SendInternalServerError(fmt.Errorf("failed to get robot %d: %v", id, err))
|
||||
return
|
||||
@ -100,62 +101,39 @@ func (r *RobotAPI) Post() {
|
||||
return
|
||||
}
|
||||
|
||||
var robotReq models.RobotReq
|
||||
var robotReq model.RobotCreate
|
||||
isValid, err := r.DecodeJSONReqAndValidate(&robotReq)
|
||||
if !isValid {
|
||||
r.SendBadRequestError(err)
|
||||
return
|
||||
}
|
||||
robotReq.Visible = true
|
||||
robotReq.ProjectID = r.project.ProjectID
|
||||
|
||||
// Token duration in minutes
|
||||
tokenDuration := time.Duration(config.RobotTokenDuration()) * time.Minute
|
||||
expiresAt := time.Now().UTC().Add(tokenDuration).Unix()
|
||||
createdName := common.RobotPrefix + robotReq.Name
|
||||
|
||||
// first to add a robot account, and get its id.
|
||||
robot := models.Robot{
|
||||
Name: createdName,
|
||||
Description: robotReq.Description,
|
||||
ProjectID: r.project.ProjectID,
|
||||
ExpiresAt: expiresAt,
|
||||
if err := validateRobotReq(r.project, &robotReq); err != nil {
|
||||
r.SendBadRequestError(err)
|
||||
return
|
||||
}
|
||||
id, err := dao.AddRobot(&robot)
|
||||
|
||||
robot, err := r.ctr.CreateRobotAccount(&robotReq)
|
||||
if err != nil {
|
||||
if err == dao.ErrDupRows {
|
||||
r.SendConflictError(errors.New("conflict robot account"))
|
||||
return
|
||||
}
|
||||
r.SendInternalServerError(fmt.Errorf("failed to create robot account: %v", err))
|
||||
r.SendInternalServerError(errors.Wrap(err, "robot API: post"))
|
||||
return
|
||||
}
|
||||
|
||||
// generate the token, and return it with response data.
|
||||
// token is not stored in the database.
|
||||
jwtToken, err := token.New(id, r.project.ProjectID, expiresAt, robotReq.Access)
|
||||
if err != nil {
|
||||
r.SendInternalServerError(fmt.Errorf("failed to valid parameters to generate token for robot account, %v", err))
|
||||
err := dao.DeleteRobot(id)
|
||||
if err != nil {
|
||||
r.SendInternalServerError(fmt.Errorf("failed to delete the robot account: %d, %v", id, err))
|
||||
}
|
||||
return
|
||||
}
|
||||
w := r.Ctx.ResponseWriter
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
rawTk, err := jwtToken.Raw()
|
||||
if err != nil {
|
||||
r.SendInternalServerError(fmt.Errorf("failed to sign token for robot account, %v", err))
|
||||
err := dao.DeleteRobot(id)
|
||||
if err != nil {
|
||||
r.SendInternalServerError(fmt.Errorf("failed to delete the robot account: %d, %v", id, err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
robotRep := models.RobotRep{
|
||||
robotRep := model.RobotRep{
|
||||
Name: robot.Name,
|
||||
Token: rawTk,
|
||||
Token: robot.Token,
|
||||
}
|
||||
r.Redirect(http.StatusCreated, strconv.FormatInt(id, 10))
|
||||
|
||||
r.Redirect(http.StatusCreated, strconv.FormatInt(robot.ID, 10))
|
||||
r.Data["json"] = robotRep
|
||||
r.ServeJSON()
|
||||
}
|
||||
@ -166,28 +144,25 @@ func (r *RobotAPI) List() {
|
||||
return
|
||||
}
|
||||
|
||||
query := models.RobotQuery{
|
||||
ProjectID: r.project.ProjectID,
|
||||
keywords := make(map[string]interface{})
|
||||
keywords["ProjectID"] = r.project.ProjectID
|
||||
keywords["Visible"] = true
|
||||
query := &q.Query{
|
||||
Keywords: keywords,
|
||||
}
|
||||
|
||||
count, err := dao.CountRobot(&query)
|
||||
robots, err := r.ctr.ListRobotAccount(query)
|
||||
if err != nil {
|
||||
r.SendInternalServerError(fmt.Errorf("failed to list robots on project: %d, %v", r.project.ProjectID, err))
|
||||
r.SendInternalServerError(errors.Wrap(err, "robot API: list"))
|
||||
return
|
||||
}
|
||||
query.Page, query.Size, err = r.GetPaginationParams()
|
||||
count := len(robots)
|
||||
page, size, err := r.GetPaginationParams()
|
||||
if err != nil {
|
||||
r.SendBadRequestError(err)
|
||||
return
|
||||
}
|
||||
|
||||
robots, err := dao.ListRobots(&query)
|
||||
if err != nil {
|
||||
r.SendInternalServerError(fmt.Errorf("failed to get robots %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
r.SetPaginationHeader(count, query.Page, query.Size)
|
||||
r.SetPaginationHeader(int64(count), page, size)
|
||||
r.Data["json"] = robots
|
||||
r.ServeJSON()
|
||||
}
|
||||
@ -204,13 +179,17 @@ func (r *RobotAPI) Get() {
|
||||
return
|
||||
}
|
||||
|
||||
robot, err := dao.GetRobotByID(id)
|
||||
robot, err := r.ctr.GetRobotAccount(id)
|
||||
if err != nil {
|
||||
r.SendInternalServerError(fmt.Errorf("failed to get robot %d: %v", id, err))
|
||||
r.SendInternalServerError(errors.Wrap(err, "robot API: get robot"))
|
||||
return
|
||||
}
|
||||
if robot == nil {
|
||||
r.SendNotFoundError(fmt.Errorf("robot %d not found", id))
|
||||
r.SendNotFoundError(fmt.Errorf("robot API: robot %d not found", id))
|
||||
return
|
||||
}
|
||||
if !robot.Visible {
|
||||
r.SendForbiddenError(fmt.Errorf("robot API: robot %d is invisible", id))
|
||||
return
|
||||
}
|
||||
|
||||
@ -224,7 +203,7 @@ func (r *RobotAPI) Put() {
|
||||
return
|
||||
}
|
||||
|
||||
var robotReq models.RobotReq
|
||||
var robotReq model.RobotCreate
|
||||
if err := r.DecodeJSONReq(&robotReq); err != nil {
|
||||
r.SendBadRequestError(err)
|
||||
return
|
||||
@ -232,8 +211,8 @@ func (r *RobotAPI) Put() {
|
||||
|
||||
r.robot.Disabled = robotReq.Disabled
|
||||
|
||||
if err := dao.UpdateRobot(r.robot); err != nil {
|
||||
r.SendInternalServerError(fmt.Errorf("failed to update robot %d: %v", r.robot.ID, err))
|
||||
if err := r.ctr.UpdateRobotAccount(r.robot); err != nil {
|
||||
r.SendInternalServerError(errors.Wrap(err, "robot API: update"))
|
||||
return
|
||||
}
|
||||
|
||||
@ -245,8 +224,30 @@ func (r *RobotAPI) Delete() {
|
||||
return
|
||||
}
|
||||
|
||||
if err := dao.DeleteRobot(r.robot.ID); err != nil {
|
||||
r.SendInternalServerError(fmt.Errorf("failed to delete robot %d: %v", r.robot.ID, err))
|
||||
if err := r.ctr.DeleteRobotAccount(r.robot.ID); err != nil {
|
||||
r.SendInternalServerError(errors.Wrap(err, "robot API: delete"))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func validateRobotReq(p *models.Project, robotReq *model.RobotCreate) error {
|
||||
if len(robotReq.Access) == 0 {
|
||||
return errors.New("access required")
|
||||
}
|
||||
|
||||
namespace, _ := rbac.Resource(fmt.Sprintf("/project/%d", p.ProjectID)).GetNamespace()
|
||||
policies := project.GetAllPolicies(namespace)
|
||||
|
||||
mp := map[string]bool{}
|
||||
for _, policy := range policies {
|
||||
mp[policy.String()] = true
|
||||
}
|
||||
|
||||
for _, policy := range robotReq.Access {
|
||||
if !mp[policy.String()] {
|
||||
return fmt.Errorf("%s action of %s resource not exist in project %s", policy.Action, policy.Resource, p.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -16,10 +16,11 @@ package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/goharbor/harbor/src/common/rbac"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/rbac"
|
||||
"github.com/goharbor/harbor/src/pkg/robot/model"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -28,9 +29,10 @@ var (
|
||||
)
|
||||
|
||||
func TestRobotAPIPost(t *testing.T) {
|
||||
res := rbac.Resource("/project/1")
|
||||
|
||||
rbacPolicy := &rbac.Policy{
|
||||
Resource: "/project/libray/repository",
|
||||
Resource: res.Subresource(rbac.ResourceRepository),
|
||||
Action: "pull",
|
||||
}
|
||||
policies := []*rbac.Policy{}
|
||||
@ -51,7 +53,7 @@ func TestRobotAPIPost(t *testing.T) {
|
||||
request: &testingRequest{
|
||||
method: http.MethodPost,
|
||||
url: robotPath,
|
||||
bodyJSON: &models.RobotReq{},
|
||||
bodyJSON: &model.RobotCreate{},
|
||||
credential: nonSysAdmin,
|
||||
},
|
||||
code: http.StatusForbidden,
|
||||
@ -61,7 +63,7 @@ func TestRobotAPIPost(t *testing.T) {
|
||||
request: &testingRequest{
|
||||
method: http.MethodPost,
|
||||
url: robotPath,
|
||||
bodyJSON: &models.RobotReq{
|
||||
bodyJSON: &model.RobotCreate{
|
||||
Name: "test",
|
||||
Description: "test desc",
|
||||
Access: policies,
|
||||
@ -75,7 +77,7 @@ func TestRobotAPIPost(t *testing.T) {
|
||||
request: &testingRequest{
|
||||
method: http.MethodPost,
|
||||
url: robotPath,
|
||||
bodyJSON: &models.RobotReq{
|
||||
bodyJSON: &model.RobotCreate{
|
||||
Name: "testIllgel#",
|
||||
Description: "test desc",
|
||||
},
|
||||
@ -83,12 +85,57 @@ func TestRobotAPIPost(t *testing.T) {
|
||||
},
|
||||
code: http.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodPost,
|
||||
url: robotPath,
|
||||
bodyJSON: &model.RobotCreate{
|
||||
Name: "test",
|
||||
Description: "resource not exist",
|
||||
Access: []*rbac.Policy{
|
||||
{Resource: res.Subresource("foo"), Action: rbac.ActionCreate},
|
||||
},
|
||||
},
|
||||
credential: projAdmin4Robot,
|
||||
},
|
||||
code: http.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodPost,
|
||||
url: robotPath,
|
||||
bodyJSON: &model.RobotCreate{
|
||||
Name: "test",
|
||||
Description: "action not exist",
|
||||
Access: []*rbac.Policy{
|
||||
{Resource: res.Subresource(rbac.ResourceRepository), Action: "foo"},
|
||||
},
|
||||
},
|
||||
credential: projAdmin4Robot,
|
||||
},
|
||||
code: http.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodPost,
|
||||
url: robotPath,
|
||||
bodyJSON: &model.RobotCreate{
|
||||
Name: "test",
|
||||
Description: "policy not exit",
|
||||
Access: []*rbac.Policy{
|
||||
{Resource: res.Subresource(rbac.ResourceMember), Action: rbac.ActionPush},
|
||||
},
|
||||
},
|
||||
credential: projAdmin4Robot,
|
||||
},
|
||||
code: http.StatusBadRequest,
|
||||
},
|
||||
// 403 -- developer
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodPost,
|
||||
url: robotPath,
|
||||
bodyJSON: &models.RobotReq{
|
||||
bodyJSON: &model.RobotCreate{
|
||||
Name: "test2",
|
||||
Description: "test2 desc",
|
||||
},
|
||||
@ -102,7 +149,7 @@ func TestRobotAPIPost(t *testing.T) {
|
||||
request: &testingRequest{
|
||||
method: http.MethodPost,
|
||||
url: robotPath,
|
||||
bodyJSON: &models.RobotReq{
|
||||
bodyJSON: &model.RobotCreate{
|
||||
Name: "test",
|
||||
Description: "test desc",
|
||||
Access: policies,
|
||||
@ -259,7 +306,7 @@ func TestRobotAPIPut(t *testing.T) {
|
||||
request: &testingRequest{
|
||||
method: http.MethodPut,
|
||||
url: fmt.Sprintf("%s/%d", robotPath, 1),
|
||||
bodyJSON: &models.Robot{
|
||||
bodyJSON: &model.Robot{
|
||||
Disabled: true,
|
||||
},
|
||||
credential: projAdmin4Robot,
|
||||
|
192
src/core/api/scan.go
Normal file
@ -0,0 +1,192 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/goharbor/harbor/src/pkg/scan/report"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/goharbor/harbor/src/common/rbac"
|
||||
"github.com/goharbor/harbor/src/common/utils"
|
||||
coreutils "github.com/goharbor/harbor/src/core/utils"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
"github.com/goharbor/harbor/src/pkg/scan/api/scan"
|
||||
v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var digestFunc digestGetter = getDigest
|
||||
|
||||
// ScanAPI handles the scan related actions
|
||||
type ScanAPI struct {
|
||||
BaseController
|
||||
|
||||
// Target artifact
|
||||
artifact *v1.Artifact
|
||||
// Project reference
|
||||
pro *models.Project
|
||||
}
|
||||
|
||||
// Prepare sth. for the subsequent actions
|
||||
func (sa *ScanAPI) Prepare() {
|
||||
// Call super prepare method
|
||||
sa.BaseController.Prepare()
|
||||
|
||||
// Parse parameters
|
||||
repoName := sa.GetString(":splat")
|
||||
tag := sa.GetString(":tag")
|
||||
projectName, _ := utils.ParseRepository(repoName)
|
||||
|
||||
pro, err := sa.ProjectMgr.Get(projectName)
|
||||
if err != nil {
|
||||
sa.SendInternalServerError(errors.Wrap(err, "scan API: prepare"))
|
||||
return
|
||||
}
|
||||
if pro == nil {
|
||||
sa.SendNotFoundError(errors.Errorf("project %s not found", projectName))
|
||||
return
|
||||
}
|
||||
sa.pro = pro
|
||||
|
||||
// Check authentication
|
||||
if !sa.RequireAuthenticated() {
|
||||
return
|
||||
}
|
||||
|
||||
// Assemble artifact object
|
||||
digest, err := digestFunc(repoName, tag, sa.SecurityCtx.GetUsername())
|
||||
if err != nil {
|
||||
sa.SendInternalServerError(errors.Wrap(err, "scan API: prepare"))
|
||||
return
|
||||
}
|
||||
|
||||
sa.artifact = &v1.Artifact{
|
||||
NamespaceID: pro.ProjectID,
|
||||
Repository: repoName,
|
||||
Tag: tag,
|
||||
Digest: digest,
|
||||
MimeType: v1.MimeTypeDockerArtifact,
|
||||
}
|
||||
|
||||
logger.Debugf("Scan API receives artifact: %#v", sa.artifact)
|
||||
}
|
||||
|
||||
// Scan artifact
|
||||
func (sa *ScanAPI) Scan() {
|
||||
// Check access permissions
|
||||
if !sa.RequireProjectAccess(sa.pro.ProjectID, rbac.ActionCreate, rbac.ResourceScan) {
|
||||
return
|
||||
}
|
||||
|
||||
if err := scan.DefaultController.Scan(sa.artifact); err != nil {
|
||||
sa.SendInternalServerError(errors.Wrap(err, "scan API: scan"))
|
||||
return
|
||||
}
|
||||
|
||||
sa.Ctx.ResponseWriter.WriteHeader(http.StatusAccepted)
|
||||
}
|
||||
|
||||
// Report returns the required reports with the given mime types.
|
||||
func (sa *ScanAPI) Report() {
|
||||
// Check access permissions
|
||||
if !sa.RequireProjectAccess(sa.pro.ProjectID, rbac.ActionRead, rbac.ResourceScan) {
|
||||
return
|
||||
}
|
||||
|
||||
// Extract mime types
|
||||
producesMimes := make([]string, 0)
|
||||
if hl, ok := sa.Ctx.Request.Header[v1.HTTPAcceptHeader]; ok && len(hl) > 0 {
|
||||
producesMimes = append(producesMimes, hl...)
|
||||
}
|
||||
|
||||
// Get the reports
|
||||
reports, err := scan.DefaultController.GetReport(sa.artifact, producesMimes)
|
||||
if err != nil {
|
||||
sa.SendInternalServerError(errors.Wrap(err, "scan API: get report"))
|
||||
return
|
||||
}
|
||||
|
||||
vulItems := make(map[string]interface{})
|
||||
for _, rp := range reports {
|
||||
// Resolve scan report data only when it is ready
|
||||
if len(rp.Report) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
vrp, err := report.ResolveData(rp.MimeType, []byte(rp.Report))
|
||||
if err != nil {
|
||||
sa.SendInternalServerError(errors.Wrap(err, "scan API: get report"))
|
||||
return
|
||||
}
|
||||
|
||||
vulItems[rp.MimeType] = vrp
|
||||
}
|
||||
|
||||
sa.Data["json"] = vulItems
|
||||
sa.ServeJSON()
|
||||
}
|
||||
|
||||
// Log returns the log stream
|
||||
func (sa *ScanAPI) Log() {
|
||||
// Check access permissions
|
||||
if !sa.RequireProjectAccess(sa.pro.ProjectID, rbac.ActionRead, rbac.ResourceScan) {
|
||||
return
|
||||
}
|
||||
|
||||
uuid := sa.GetString(":uuid")
|
||||
bytes, err := scan.DefaultController.GetScanLog(uuid)
|
||||
if err != nil {
|
||||
sa.SendInternalServerError(errors.Wrap(err, "scan API: log"))
|
||||
return
|
||||
}
|
||||
|
||||
if bytes == nil {
|
||||
// Not found
|
||||
sa.SendNotFoundError(errors.Errorf("report with uuid %s does not exist", uuid))
|
||||
return
|
||||
}
|
||||
|
||||
sa.Ctx.ResponseWriter.Header().Set(http.CanonicalHeaderKey("Content-Length"), strconv.Itoa(len(bytes)))
|
||||
sa.Ctx.ResponseWriter.Header().Set(http.CanonicalHeaderKey("Content-Type"), "text/plain")
|
||||
_, err = sa.Ctx.ResponseWriter.Write(bytes)
|
||||
if err != nil {
|
||||
sa.SendInternalServerError(errors.Wrap(err, "scan API: log"))
|
||||
}
|
||||
}
|
||||
|
||||
// digestGetter is a function template for getting digest.
|
||||
// TODO: This can be removed if the registry access interface is ready.
|
||||
type digestGetter func(repo, tag string, username string) (string, error)
|
||||
|
||||
func getDigest(repo, tag string, username string) (string, error) {
|
||||
client, err := coreutils.NewRepositoryClientForUI(username, repo)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
digest, exists, err := client.ManifestExist(tag)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return "", errors.Errorf("tag %s does exist", tag)
|
||||
}
|
||||
|
||||
return digest, nil
|
||||
}
|
@ -1,82 +0,0 @@
|
||||
// Copyright 2018 Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"github.com/goharbor/harbor/src/common/dao"
|
||||
"github.com/goharbor/harbor/src/common/rbac"
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
"github.com/goharbor/harbor/src/core/utils"
|
||||
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ScanJobAPI handles request to /api/scanJobs/:id/log
|
||||
type ScanJobAPI struct {
|
||||
BaseController
|
||||
jobID int64
|
||||
projectName string
|
||||
jobUUID string
|
||||
}
|
||||
|
||||
// Prepare validates that whether user has read permission to the project of the repo the scan job scanned.
|
||||
func (sj *ScanJobAPI) Prepare() {
|
||||
sj.BaseController.Prepare()
|
||||
if !sj.SecurityCtx.IsAuthenticated() {
|
||||
sj.SendUnAuthorizedError(errors.New("UnAuthorized"))
|
||||
return
|
||||
}
|
||||
id, err := sj.GetInt64FromPath(":id")
|
||||
if err != nil {
|
||||
sj.SendBadRequestError(errors.New("invalid ID"))
|
||||
return
|
||||
}
|
||||
sj.jobID = id
|
||||
|
||||
data, err := dao.GetScanJob(id)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to load job data for job: %d, error: %v", id, err)
|
||||
sj.SendInternalServerError(errors.New("Failed to get Job data"))
|
||||
return
|
||||
}
|
||||
|
||||
projectName := strings.SplitN(data.Repository, "/", 2)[0]
|
||||
if !sj.RequireProjectAccess(projectName, rbac.ActionRead, rbac.ResourceRepositoryTagScanJob) {
|
||||
log.Errorf("User does not have read permission for project: %s", projectName)
|
||||
return
|
||||
}
|
||||
sj.projectName = projectName
|
||||
sj.jobUUID = data.UUID
|
||||
}
|
||||
|
||||
// GetLog ...
|
||||
func (sj *ScanJobAPI) GetLog() {
|
||||
logBytes, err := utils.GetJobServiceClient().GetJobLog(sj.jobUUID)
|
||||
if err != nil {
|
||||
sj.ParseAndHandleError(fmt.Sprintf("Failed to get job logs, uuid: %s, error: %v", sj.jobUUID, err), err)
|
||||
return
|
||||
}
|
||||
sj.Ctx.ResponseWriter.Header().Set(http.CanonicalHeaderKey("Content-Length"), strconv.Itoa(len(logBytes)))
|
||||
sj.Ctx.ResponseWriter.Header().Set(http.CanonicalHeaderKey("Content-Type"), "text/plain")
|
||||
_, err = sj.Ctx.ResponseWriter.Write(logBytes)
|
||||
if err != nil {
|
||||
sj.SendInternalServerError(fmt.Errorf("Failed to write job logs, uuid: %s, error: %v", sj.jobUUID, err))
|
||||
}
|
||||
|
||||
}
|
214
src/core/api/scan_test.go
Normal file
@ -0,0 +1,214 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/pkg/scan/api/scan"
|
||||
dscan "github.com/goharbor/harbor/src/pkg/scan/dao/scan"
|
||||
v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
var scanBaseURL = "/api/repositories/library/hello-world/tags/latest/scan"
|
||||
|
||||
// ScanAPITestSuite is the test suite for scan API.
|
||||
type ScanAPITestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
originalC scan.Controller
|
||||
c *MockScanAPIController
|
||||
|
||||
originalDigestGetter digestGetter
|
||||
|
||||
artifact *v1.Artifact
|
||||
}
|
||||
|
||||
// TestScanAPI is the entry point of ScanAPITestSuite.
|
||||
func TestScanAPI(t *testing.T) {
|
||||
suite.Run(t, new(ScanAPITestSuite))
|
||||
}
|
||||
|
||||
// SetupSuite prepares test env for suite.
|
||||
func (suite *ScanAPITestSuite) SetupSuite() {
|
||||
suite.artifact = &v1.Artifact{
|
||||
NamespaceID: (int64)(1),
|
||||
Repository: "library/hello-world",
|
||||
Tag: "latest",
|
||||
Digest: "digest-code-001",
|
||||
MimeType: v1.MimeTypeDockerArtifact,
|
||||
}
|
||||
}
|
||||
|
||||
// SetupTest prepares test env for test cases.
|
||||
func (suite *ScanAPITestSuite) SetupTest() {
|
||||
suite.originalC = scan.DefaultController
|
||||
suite.c = &MockScanAPIController{}
|
||||
|
||||
scan.DefaultController = suite.c
|
||||
|
||||
suite.originalDigestGetter = digestFunc
|
||||
digestFunc = func(repo, tag string, username string) (s string, e error) {
|
||||
return "digest-code-001", nil
|
||||
}
|
||||
}
|
||||
|
||||
// TearDownTest ...
|
||||
func (suite *ScanAPITestSuite) TearDownTest() {
|
||||
scan.DefaultController = suite.originalC
|
||||
digestFunc = suite.originalDigestGetter
|
||||
}
|
||||
|
||||
// TestScanAPIBase ...
|
||||
func (suite *ScanAPITestSuite) TestScanAPIBase() {
|
||||
suite.c.On("Scan", &v1.Artifact{}).Return(nil)
|
||||
// Including general cases
|
||||
cases := []*codeCheckingCase{
|
||||
// 401
|
||||
{
|
||||
request: &testingRequest{
|
||||
url: scanBaseURL,
|
||||
method: http.MethodGet,
|
||||
},
|
||||
code: http.StatusUnauthorized,
|
||||
},
|
||||
// 403
|
||||
{
|
||||
request: &testingRequest{
|
||||
url: scanBaseURL,
|
||||
method: http.MethodPost,
|
||||
credential: projGuest,
|
||||
},
|
||||
code: http.StatusForbidden,
|
||||
},
|
||||
}
|
||||
|
||||
runCodeCheckingCases(suite.T(), cases...)
|
||||
}
|
||||
|
||||
// TestScanAPIScan ...
|
||||
func (suite *ScanAPITestSuite) TestScanAPIScan() {
|
||||
suite.c.On("Scan", suite.artifact).Return(nil)
|
||||
|
||||
// Including general cases
|
||||
cases := []*codeCheckingCase{
|
||||
// 202
|
||||
{
|
||||
request: &testingRequest{
|
||||
url: scanBaseURL,
|
||||
method: http.MethodPost,
|
||||
credential: projDeveloper,
|
||||
},
|
||||
code: http.StatusAccepted,
|
||||
},
|
||||
}
|
||||
|
||||
runCodeCheckingCases(suite.T(), cases...)
|
||||
}
|
||||
|
||||
// TestScanAPIReport ...
|
||||
func (suite *ScanAPITestSuite) TestScanAPIReport() {
|
||||
suite.c.On("GetReport", suite.artifact, []string{v1.MimeTypeNativeReport}).Return([]*dscan.Report{}, nil)
|
||||
|
||||
vulItems := make(map[string]interface{})
|
||||
|
||||
header := make(http.Header)
|
||||
header.Add("Accept", v1.MimeTypeNativeReport)
|
||||
err := handleAndParse(
|
||||
&testingRequest{
|
||||
url: scanBaseURL,
|
||||
method: http.MethodGet,
|
||||
credential: projDeveloper,
|
||||
header: header,
|
||||
}, &vulItems)
|
||||
require.NoError(suite.T(), err)
|
||||
}
|
||||
|
||||
// TestScanAPILog ...
|
||||
func (suite *ScanAPITestSuite) TestScanAPILog() {
|
||||
suite.c.On("GetScanLog", "the-uuid-001").Return([]byte(`{"log": "this is my log"}`), nil)
|
||||
|
||||
logs := make(map[string]string)
|
||||
err := handleAndParse(
|
||||
&testingRequest{
|
||||
url: fmt.Sprintf("%s/%s", scanBaseURL, "the-uuid-001/log"),
|
||||
method: http.MethodGet,
|
||||
credential: projDeveloper,
|
||||
}, &logs)
|
||||
require.NoError(suite.T(), err)
|
||||
assert.Condition(suite.T(), func() (success bool) {
|
||||
success = len(logs) > 0
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
// Mock things
|
||||
|
||||
// MockScanAPIController ...
|
||||
type MockScanAPIController struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// Scan ...
|
||||
func (msc *MockScanAPIController) Scan(artifact *v1.Artifact) error {
|
||||
args := msc.Called(artifact)
|
||||
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (msc *MockScanAPIController) GetReport(artifact *v1.Artifact, mimeTypes []string) ([]*dscan.Report, error) {
|
||||
args := msc.Called(artifact, mimeTypes)
|
||||
|
||||
if args.Get(0) == nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
|
||||
return args.Get(0).([]*dscan.Report), args.Error(1)
|
||||
}
|
||||
|
||||
func (msc *MockScanAPIController) GetSummary(artifact *v1.Artifact, mimeTypes []string) (map[string]interface{}, error) {
|
||||
args := msc.Called(artifact, mimeTypes)
|
||||
|
||||
if args.Get(0) == nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
|
||||
return args.Get(0).(map[string]interface{}), args.Error(1)
|
||||
}
|
||||
|
||||
func (msc *MockScanAPIController) GetScanLog(uuid string) ([]byte, error) {
|
||||
args := msc.Called(uuid)
|
||||
|
||||
if args.Get(0) == nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
|
||||
return args.Get(0).([]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (msc *MockScanAPIController) HandleJobHooks(trackID string, change *job.StatusChange) error {
|
||||
args := msc.Called(trackID, change)
|
||||
|
||||
return args.Error(0)
|
||||
}
|