diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..cb1bb5248 --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +src/portal/node_modules/ \ No newline at end of file diff --git a/.travis.yml b/.travis.yml index 5a765fa0e..2183b06e5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,23 +1,23 @@ sudo: true language: go go: -- 1.11.2 +- 1.12.5 go_import_path: github.com/goharbor/harbor services: - docker dist: trusty matrix: include: - - go: 1.11.2 + - go: 1.12.5 env: - UTTEST=true - - go: 1.11.2 + - go: 1.12.5 env: - APITEST_DB=true - - go: 1.11.2 + - go: 1.12.5 env: - APITEST_LDAP=true - - go: 1.11.2 + - go: 1.12.5 env: - OFFLINE=true env: diff --git a/CHANGELOG.md b/CHANGELOG.md index ffc447181..2618ea0b3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +# v1.8.0 (2019-05-21) +[Full list of issues fixed in v1.8.0](https://github.com/goharbor/harbor/issues?q=is%3Aissue+is%3Aclosed+label%3Atarget%2F1.8.0) +* Support for OpenID Connect - OpenID Connect (OIDC) is an authentication layer on top of OAuth 2.0, allowing Harbor to verify the identity of users based on the authentication performed by an external authorization server or identity provider. +* Robot accounts - Robot accounts can be configured to provide administrators with a token that can be granted appropriate permissions for pulling or pushing images. Harbor users can continue operating Harbor using their enterprise SSO credentials, and use robot accounts for CI/CD systems that perform Docker client commands. +* Replication advancements - Harbor new version replication allows you to replicate your Harbor repository to and from non-Harbor registries. Harbor 1.8 expands on the Harbor-to-Harbor replication feature, adding the ability to replicate resources between Harbor and Docker Hub, Docker Registry, and Huawei Registry. This is enabled through both push and pull mode replication. +* Health check API, showing detailed status and health of all Harbor components. +* Support for defining cron-based scheduled tasks in the Harbor UI. Administrators can now use cron strings to define the schedule of a job. Scan, garbage collection and replication jobs are all supported. +API explorer integration. End users can now explore and trigger Harbor’s API via the swagger UI nested inside Harbor’s UI. +* Introduce a new master role to project, the role's permissions are more than developer and less than project admin. +* Introduce harbor.yml as the replacement of harbor.cfg and refactor the prepare script to provide more flexibility to the installation process based on docker-compose +* Enhancement of the Job Service engine to include webhook events, additional APIs for automation, and numerous bug fixes to improve the stability of the service. +* Docker Registry upgraded to v2.7.1. + ## v1.7.5 (2019-04-02) * Bumped up Clair to v2.0.8 * Fixed issues in supporting windows images. #6992 #6369 diff --git a/Makefile b/Makefile index 652558396..79e5584aa 100644 --- a/Makefile +++ b/Makefile @@ -70,7 +70,6 @@ SRCPATH=./src TOOLSPATH=$(BUILDPATH)/tools CORE_PATH=$(BUILDPATH)/src/core PORTAL_PATH=$(BUILDPATH)/src/portal -GOBASEPATH=/go/src/github.com/goharbor CHECKENVCMD=checkenv.sh # parameters @@ -101,14 +100,14 @@ PREPARE_VERSION_NAME=versions REGISTRYVERSION=v2.7.1-patch-2819 NGINXVERSION=$(VERSIONTAG) NOTARYVERSION=v0.6.1 -CLAIRVERSION=v2.0.8 +CLAIRVERSION=v2.0.9 CLAIRDBVERSION=$(VERSIONTAG) MIGRATORVERSION=$(VERSIONTAG) REDISVERSION=$(VERSIONTAG) NOTARYMIGRATEVERSION=v3.5.4 # version of chartmuseum -CHARTMUSEUMVERSION=v0.8.1 +CHARTMUSEUMVERSION=v0.9.0 define VERSIONS_FOR_PREPARE VERSION_TAG: $(VERSIONTAG) @@ -136,10 +135,10 @@ GOINSTALL=$(GOCMD) install GOTEST=$(GOCMD) test GODEP=$(GOTEST) -i GOFMT=gofmt -w -GOBUILDIMAGE=golang:1.11.2 -GOBUILDPATH=$(GOBASEPATH)/harbor +GOBUILDIMAGE=golang:1.12.5 +GOBUILDPATH=/harbor GOIMAGEBUILDCMD=/usr/local/go/bin/go -GOIMAGEBUILD=$(GOIMAGEBUILDCMD) build +GOIMAGEBUILD=$(GOIMAGEBUILDCMD) build -mod vendor GOBUILDPATH_CORE=$(GOBUILDPATH)/src/core GOBUILDPATH_JOBSERVICE=$(GOBUILDPATH)/src/jobservice GOBUILDPATH_REGISTRYCTL=$(GOBUILDPATH)/src/registryctl @@ -243,7 +242,7 @@ PACKAGE_ONLINE_PARA=-zcvf harbor-online-installer-$(PKGVERSIONTAG).tgz \ $(HARBORPKG)/install.sh \ $(HARBORPKG)/harbor.yml -DOCKERCOMPOSE_LIST=-f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME) +DOCKERCOMPOSE_FILE_OPT=-f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME) ifeq ($(NOTARYFLAG), true) DOCKERSAVE_PARA+= goharbor/notary-server-photon:$(NOTARYVERSION)-$(VERSIONTAG) goharbor/notary-signer-photon:$(NOTARYVERSION)-$(VERSIONTAG) @@ -271,7 +270,6 @@ check_environment: compile_core: @echo "compiling binary for core (golang image)..." - @echo $(GOBASEPATH) @echo $(GOBUILDPATH) @$(DOCKERCMD) run --rm -v $(BUILDPATH):$(GOBUILDPATH) -w $(GOBUILDPATH_CORE) $(GOBUILDIMAGE) $(GOIMAGEBUILD) -o $(GOBUILDMAKEPATH_CORE)/$(CORE_BINARYNAME) @echo "Done." @@ -294,7 +292,7 @@ compile_notary_migrate_patch: compile: check_environment versions_prepare compile_core compile_jobservice compile_registryctl compile_notary_migrate_patch update_prepare_version: - @echo "substitude the prepare version tag in prepare file..." + @echo "substitute the prepare version tag in prepare file..." @$(SEDCMD) -i -e 's/goharbor\/prepare:.*[[:space:]]\+/goharbor\/prepare:$(VERSIONTAG) /' $(MAKEPATH)/prepare ; prepare: update_prepare_version @@ -414,17 +412,16 @@ pushimage: start: @echo "loading harbor images..." - @$(DOCKERCOMPOSECMD) $(DOCKERCOMPOSE_LIST) up -d + @$(DOCKERCOMPOSECMD) $(DOCKERCOMPOSE_FILE_OPT) up -d @echo "Start complete. You can visit harbor now." down: - @echo "Please make sure to set -e NOTARYFLAG=true/CLAIRFLAG=true/CHARTFLAG=true if you are using Notary/CLAIR/Chartmuseum in Harbor, otherwise the Notary/CLAIR/Chartmuseum containers cannot be stop automaticlly." @while [ -z "$$CONTINUE" ]; do \ read -r -p "Type anything but Y or y to exit. [Y/N]: " CONTINUE; \ done ; \ [ $$CONTINUE = "y" ] || [ $$CONTINUE = "Y" ] || (echo "Exiting."; exit 1;) @echo "stoping harbor instance..." - @$(DOCKERCOMPOSECMD) $(DOCKERCOMPOSE_LIST) down -v + @$(DOCKERCOMPOSECMD) $(DOCKERCOMPOSE_FILE_OPT) down -v @echo "Done." swagger_client: diff --git a/README.md b/README.md index 14cadb59e..7e0264fa4 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ Please use [releases](https://github.com/vmware/harbor/releases) instead of the Harbor -Harbor is an an open source trusted cloud native registry project that stores, signs, and scans content. Harbor extends the open source Docker Distribution by adding the functionalities usually required by users such as security, identity and management. Having a registry closer to the build and run environment can improve the image transfer efficiency. Harbor supports replication of images between registries, and also offers advanced security features such as user management, access control and activity auditing. +Harbor is an open source trusted cloud native registry project that stores, signs, and scans content. Harbor extends the open source Docker Distribution by adding the functionalities usually required by users such as security, identity and management. Having a registry closer to the build and run environment can improve the image transfer efficiency. Harbor supports replication of images between registries, and also offers advanced security features such as user management, access control and activity auditing. Harbor is hosted by the [Cloud Native Computing Foundation](https://cncf.io) (CNCF). If you are an organization that wants to help shape the evolution of cloud native technologies, consider joining the CNCF. For details about who's involved and how Harbor plays a role, read the CNCF [announcement](https://www.cncf.io/blog/2018/07/31/cncf-to-host-harbor-in-the-sandbox/). @@ -33,22 +33,23 @@ Harbor is hosted by the [Cloud Native Computing Foundation](https://cncf.io) (CN ## Features * **Cloud native registry**: With support for both container images and [Helm](https://helm.sh) charts, Harbor serves as registry for cloud native environments like container runtimes and orchestration platforms. -* **Role based access control**: Users and repositories are organized via 'projects' and a user can have different permission for images under a project. -* **Policy based image replication**: Images can be replicated (synchronized) between multiple registry instances based on policies with multiple filters (repository, tag and label). Harbor will auto-retry to replicate if it encounters any errors. Great for load balancing, high availability, multi-datacenter, hybrid and multi-cloud scenarios. +* **Role based access control**: Users and repositories are organized via 'projects' and a user can have different permission for images or Helm charts under a project. +* **Policy based replication**: Images and charts can be replicated (synchronized) between multiple registry instances based on policies with multiple filters (repository, tag and label). Harbor automatically retries a replication if it encounters any errors. Great for load balancing, high availability, multi-datacenter, hybrid and multi-cloud scenarios. * **Vulnerability Scanning**: Harbor scans images regularly and warns users of vulnerabilities. * **LDAP/AD support**: Harbor integrates with existing enterprise LDAP/AD for user authentication and management, and supports importing LDAP groups into Harbor and assigning proper project roles to them. +* **OIDC support**: Harbor leverages OpenID Connect (OIDC) to verify the identity of users authenticated by an external authorization server or identity provider. Single sign-on can be enabled to log into the Harbor portal. * **Image deletion & garbage collection**: Images can be deleted and their space can be recycled. * **Notary**: Image authenticity can be ensured. * **Graphical user portal**: User can easily browse, search repositories and manage projects. * **Auditing**: All the operations to the repositories are tracked. -* **RESTful API**: RESTful APIs for most administrative operations, easy to integrate with external systems. -* **Easy deployment**: Provide both an online and offline installer. +* **RESTful API**: RESTful APIs for most administrative operations, easy to integrate with external systems. An embedded Swagger UI is available for exploring and testing the API. +* **Easy deployment**: Provide both an online and offline installer. In addition, a Helm Chart can be used to deploy Harbor on Kubernetes. ## Install & Run **System requirements:** -**On a Linux host:** docker 17.03.0-ce+ and docker-compose 1.18.0+ . +**On a Linux host:** docker 17.06.0-ce+ and docker-compose 1.18.0+ . Download binaries of **[Harbor release ](https://github.com/vmware/harbor/releases)** and follow **[Installation & Configuration Guide](docs/installation_guide.md)** to install Harbor. diff --git a/docs/compile_guide.md b/docs/compile_guide.md index 8886777fa..30743b1d1 100644 --- a/docs/compile_guide.md +++ b/docs/compile_guide.md @@ -44,25 +44,25 @@ You can compile the code by one of the three approaches: * Get official Golang image from docker hub: ```sh - $ docker pull golang:1.11.2 + $ docker pull golang:1.12.5 ``` * Build, install and bring up Harbor without Notary: ```sh - $ make install GOBUILDIMAGE=golang:1.11.2 COMPILETAG=compile_golangimage + $ make install GOBUILDIMAGE=golang:1.12.5 COMPILETAG=compile_golangimage ``` * Build, install and bring up Harbor with Notary: ```sh - $ make install GOBUILDIMAGE=golang:1.11.2 COMPILETAG=compile_golangimage NOTARYFLAG=true + $ make install GOBUILDIMAGE=golang:1.12.5 COMPILETAG=compile_golangimage NOTARYFLAG=true ``` * Build, install and bring up Harbor with Clair: ```sh - $ make install GOBUILDIMAGE=golang:1.11.2 COMPILETAG=compile_golangimage CLAIRFLAG=true + $ make install GOBUILDIMAGE=golang:1.12.5 COMPILETAG=compile_golangimage CLAIRFLAG=true ``` #### II. Compile code with your own Golang environment, then build Harbor diff --git a/docs/configure_https.md b/docs/configure_https.md index 378c10835..5ed989311 100644 --- a/docs/configure_https.md +++ b/docs/configure_https.md @@ -113,17 +113,24 @@ Notice that you may need to trust the certificate at OS level. Please refer to t **3) Configure Harbor** -Edit the file ```harbor.cfg```, update the hostname and the protocol, and update the attributes ```ssl_cert``` and ```ssl_cert_key```: +Edit the file `harbor.yml`, update the hostname and uncomment the https block, and update the attributes `certificate` and `private_key`: + +```yaml +#set hostname +hostname: yourdomain.com + +http: + port: 80 + +https: + # https port for harbor, default is 443 + port: 443 + # The path of cert and key files for nginx + certificate: /data/cert/yourdomain.com.crt + private_key: /data/cert/yourdomain.com.key -``` - #set hostname - hostname = yourdomain.com:port - #set ui_url_protocol - ui_url_protocol = https ...... - #The path of cert and key files for nginx, they are applied only the protocol is set to https - ssl_cert = /data/cert/yourdomain.com.crt - ssl_cert_key = /data/cert/yourdomain.com.key + ``` Generate configuration files for Harbor: @@ -163,7 +170,7 @@ If you've mapped nginx 443 port to another, you need to add the port to login, l ``` -##Troubleshooting +## Troubleshooting 1. You may get an intermediate certificate from a certificate issuer. In this case, you should merge the intermediate certificate with your own certificate to create a certificate bundle. You can achieve this by the below command: ``` diff --git a/docs/img/caicloudLogoWeb.png b/docs/img/caicloudLogoWeb.png deleted file mode 100644 index 2a5c9bb58..000000000 Binary files a/docs/img/caicloudLogoWeb.png and /dev/null differ diff --git a/docs/installation_guide.md b/docs/installation_guide.md index cef1a147d..687beb094 100644 --- a/docs/installation_guide.md +++ b/docs/installation_guide.md @@ -30,7 +30,7 @@ Harbor is deployed as several Docker containers, and, therefore, can be deployed |Software|Version|Description| |---|---|---| -|Docker engine|version 17.03.0-ce+ or higher|For installation instructions, please refer to: [docker engine doc](https://docs.docker.com/engine/installation/)| +|Docker engine|version 17.06.0-ce+ or higher|For installation instructions, please refer to: [docker engine doc](https://docs.docker.com/engine/installation/)| |Docker Compose|version 1.18.0 or higher|For installation instructions, please refer to: [docker compose doc](https://docs.docker.com/compose/install/)| |Openssl|latest is preferred|Generate certificate and keys for Harbor| diff --git a/docs/manage_role_by_ldap_group.md b/docs/manage_role_by_ldap_group.md index dc8d19d6f..2e4bbc658 100644 --- a/docs/manage_role_by_ldap_group.md +++ b/docs/manage_role_by_ldap_group.md @@ -17,18 +17,23 @@ This guide provides instructions to manage roles by LDAP/AD group. You can impor Besides **[basic LDAP configure parameters](https://github.com/vmware/harbor/blob/master/docs/installation_guide.md#optional-parameters)** , LDAP group related configure parameters should be configured, they can be configured before or after installation - 1. Configure parameters in harbor.cfg before installation + 1. Configure LDAP parameters via API, refer to **[Config Harbor user settings by command line](configure_user_settings.md)** +For example: +``` +curl -X PUT -u ":" -H "Content-Type: application/json" -ki https://harbor.sample.domain/api/configurations -d'{"ldap_group_basedn":"ou=groups,dc=example,dc=com"}' +``` +The following parameters are related to LDAP group configuration. * ldap_group_basedn -- The base DN from which to lookup a group in LDAP/AD, for example: ou=groups,dc=example,dc=com * ldap_group_filter -- The filter to search LDAP/AD group, for example: objectclass=groupOfNames * ldap_group_gid -- The attribute used to name an LDAP/AD group, for example: cn * ldap_group_scope -- The scope to search for LDAP/AD groups. 0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE - 2. Or Change configure parameter in web console after installation. Go to "Administration" -> "Configuration" -> "Authentication" and change following settings. - - LDAP Group Base DN -- ldap_group_basedn in harbor.cfg - - LDAP Group Filter -- ldap_group_filter in harbor.cfg - - LDAP Group GID -- ldap_group_gid in harbor.cfg - - LDAP Group Scope -- ldap_group_scope in harbor.cfg + 2. Or change configure parameter in web console after installation. Go to "Administration" -> "Configuration" -> "Authentication" and change following settings. + - LDAP Group Base DN -- ldap_group_basedn in the Harbor user settings + - LDAP Group Filter -- ldap_group_filter in the Harbor user settings + - LDAP Group GID -- ldap_group_gid in the Harbor user settings + - LDAP Group Scope -- ldap_group_scope in the Harbor user settings - LDAP Groups With Admin Privilege -- Specify an LDAP/AD group DN, all LDAPA/AD users in this group have harbor admin privileges. ![Screenshot of LDAP group config](img/group/ldap_group_config.png) @@ -49,4 +54,4 @@ If a user is in the LDAP groups with admin privilege (ldap_group_admin_dn), the ## User privileges and group privileges -If a user has both user-level role and group-level role, only the user level role privileges will be considered. +If a user has both user-level role and group-level role, these privileges are merged together. diff --git a/docs/migration_guide.md b/docs/migration_guide.md index 0b26e15e9..bf336480b 100644 --- a/docs/migration_guide.md +++ b/docs/migration_guide.md @@ -1,6 +1,6 @@ # Harbor upgrade and migration guide -This guide only covers upgrade and mgiration to version >= v1.8.0 +This guide only covers upgrade and migration to version >= v1.8.0 When upgrading your existing Harbor instance to a newer version, you may need to migrate the data in your database and the settings in `harbor.cfg`. Since the migration may alter the database schema and the settings of `harbor.cfg`, you should **always** back up your data before any migration. @@ -34,7 +34,7 @@ you follow the steps below. ``` mv harbor /my_backup_dir/harbor ``` - Back up database (by default in diretory `/data/database`) + Back up database (by default in directory `/data/database`) ``` cp -r /data/database /my_backup_dir/ ``` diff --git a/docs/swagger.yaml b/docs/swagger.yaml index 06c99f4e0..d4149635c 100644 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -2,7 +2,7 @@ swagger: '2.0' info: title: Harbor API description: These APIs provide services for manipulating Harbor project. - version: 1.7.0 + version: 1.9.0 host: localhost schemes: - http @@ -311,6 +311,34 @@ paths: description: User need to log in first. '500': description: Unexpected internal errors. + '/projects/{project_id}/summary': + get: + summary: Get summary of the project. + description: Get summary of the project. + parameters: + - name: project_id + in: path + type: integer + format: int64 + required: true + description: Relevant project ID + tags: + - Products + responses: + '200': + description: Get summary of the project successfully. + schema: + $ref: '#/definitions/ProjectSummary' + '400': + description: Illegal format of provided ID value. + '401': + description: User need to log in first. + '404': + description: Project ID does not exist. + '403': + description: User does not have permission to get summary of the project. + '500': + description: Unexpected internal errors. '/projects/{project_id}/metadatas': get: summary: Get project metadata. @@ -516,7 +544,7 @@ paths: '403': description: User in session does not have permission to the project. '409': - description: An LDAP user group with same DN already exist. + description: A user group with same group name already exist or an LDAP user group with same DN already exist. '500': description: Unexpected internal errors. '/projects/{project_id}/members/{mid}': @@ -1235,11 +1263,16 @@ paths: type: string required: true description: Relevant repository name. - - name: label_ids + - name: label_id in: query type: string required: false - description: A list of comma separated label IDs. + description: A label ID. + - name: detail + in: query + type: boolean + required: false + description: Bool value indicating whether return detailed information of the tag, such as vulnerability scan info, if set to false, only tag name is returned. tags: - Products responses: @@ -2376,6 +2409,20 @@ paths: $ref: '#/responses/UnsupportedMediaType' '500': description: Unexpected internal errors. + /internal/syncquota: + post: + summary: Sync quota from registry/chart to DB. + description: | + This endpoint is for syncing quota usage of registry/chart with database. + tags: + - Products + responses: + '200': + description: Sync repositories successfully. + '401': + description: User need to log in first. + '403': + description: User does not have permission of system admin role. /systeminfo: get: summary: Get general system info @@ -2575,7 +2622,7 @@ paths: '403': description: User in session does not have permission to the user group. '409': - description: An LDAP user group with same DN already exist. + description: A user group with same group name already exist, or an LDAP user group with same DN already exist. '500': description: Unexpected internal errors. '/usergroups/{group_id}': @@ -3031,7 +3078,9 @@ paths: description: The chart name responses: '200': - $ref: '#/definitions/ChartVersions' + description: Retrieved all versions of the specified chart + schema: + $ref: '#/definitions/ChartVersions' '401': $ref: '#/definitions/UnauthorizedChartAPIError' '403': @@ -3091,7 +3140,9 @@ paths: description: The chart version responses: '200': - $ref: '#/definitions/ChartVersionDetails' + description: Successfully retrieved the chart version + schema: + $ref: '#/definitions/ChartVersionDetails' '401': $ref: '#/definitions/UnauthorizedChartAPIError' '403': @@ -3474,6 +3525,441 @@ paths: description: The robot account is not found. '500': description: Unexpected internal errors. + '/system/oidc/ping': + post: + summary: Test the OIDC endpoint. + description: Test the OIDC endpoint, the setting of the endpoint is provided in the request. This API can only + be called by system admin. + tags: + - Products + - System + parameters: + - name: endpoint + in: body + description: Request body for OIDC endpoint to be tested. + required: true + schema: + type: object + properties: + url: + type: string + description: The URL of OIDC endpoint to be tested. + verify_cert: + type: boolean + description: Whether the certificate should be verified + responses: + '200': + description: Ping succeeded. The OIDC endpoint is valid. + '400': + description: The ping failed + '401': + description: User need to log in first. + '403': + description: User does not have permission to call this API + '/system/CVEWhitelist': + get: + summary: Get the system level whitelist of CVE. + description: Get the system level whitelist of CVE. This API can be called by all authenticated users. + tags: + - Products + - System + responses: + '200': + description: Successfully retrieved the CVE whitelist. + schema: + $ref: "#/definitions/CVEWhitelist" + '401': + description: User is not authenticated. + '500': + description: Unexpected internal errors. + put: + summary: Update the system level whitelist of CVE. + description: This API overwrites the system level whitelist of CVE with the list in request body. Only system Admin + has permission to call this API. + tags: + - Products + - System + parameters: + - in: body + name: whitelist + description: The whitelist with new content + schema: + $ref: "#/definitions/CVEWhitelist" + responses: + '200': + description: Successfully updated the CVE whitelist. + '401': + description: User is not authenticated. + '403': + description: User does not have permission to call this API. + '500': + description: Unexpected internal errors. + '/quotas': + get: + summary: List quotas + description: List quotas + tags: + - quota + parameters: + - name: reference + in: query + description: The reference type of quota. + required: false + type: string + - name: sort + in: query + type: string + required: false + description: | + Sort method, valid values include: + 'hard.resource_name', '-hard.resource_name', 'used.resource_name', '-used.resource_name'. + Here '-' stands for descending order, resource_name should be the real resource name of the quota. + - name: page + in: query + type: integer + format: int32 + required: false + description: 'The page nubmer, default is 1.' + - name: page_size + in: query + type: integer + format: int32 + required: false + description: 'The size of per page, default is 10, maximum is 100.' + responses: + '200': + description: Successfully retrieved the quotas. + schema: + type: array + items: + $ref: '#/definitions/Quota' + headers: + X-Total-Count: + description: The total count of access logs + type: integer + Link: + description: Link refers to the previous page and next page + type: string + '401': + description: User is not authenticated. + '403': + description: User does not have permission to call this API. + '500': + description: Unexpected internal errors. + '/quotas/{id}': + get: + summary: Get the specified quota + description: Get the specified quota + tags: + - quota + parameters: + - name: id + in: path + type: integer + required: true + description: Quota ID + responses: + '200': + description: Successfully retrieved the quota. + schema: + $ref: '#/definitions/Quota' + '401': + description: User need to log in first. + '403': + description: User does not have permission to call this API + '404': + description: Quota does not exist. + '500': + description: Unexpected internal errors. + put: + summary: Update the specified quota + description: Update hard limits of the specified quota + tags: + - quota + parameters: + - name: id + in: path + type: integer + required: true + description: Quota ID + - name: hard + in: body + required: true + description: The new hard limits for the quota + schema: + $ref: '#/definitions/QuotaUpdateReq' + responses: + '200': + description: Updated quota hard limits successfully. + '400': + description: Illegal format of quota update request. + '401': + description: User need to log in first. + '403': + description: User does not have permission to the quota. + '404': + description: Quota ID does not exist. + '500': + description: Unexpected internal errors. + '/projects/{project_id}/webhook/policies': + get: + summary: List project webhook policies. + description: | + This endpoint returns webhook policies of a project. + parameters: + - name: project_id + in: path + type: integer + format: int64 + required: true + description: Relevant project ID. + tags: + - Products + responses: + '200': + description: List project webhook policies successfully. + schema: + type: array + items: + $ref: '#/definitions/WebhookPolicy' + '400': + description: Illegal format of provided ID value. + '401': + description: User need to log in first. + '403': + description: User have no permission to list webhook policies of the project. + '500': + description: Unexpected internal errors. + post: + summary: Create project webhook policy. + description: | + This endpoint create a webhook policy if the project does not have one. + parameters: + - name: project_id + in: path + type: integer + format: int64 + required: true + description: Relevant project ID + - name: policy + in: body + description: Properties "targets" and "event_types" needed. + required: true + schema: + $ref: '#/definitions/WebhookPolicy' + tags: + - Products + responses: + '201': + description: Project webhook policy create successfully. + '400': + description: Illegal format of provided ID value. + '401': + description: User need to log in first. + '403': + description: User have no permission to create webhook policy of the project. + '500': + description: Unexpected internal errors. + '/projects/{project_id}/webhook/policies/{policy_id}': + get: + summary: Get project webhook policy + description: | + This endpoint returns specified webhook policy of a project. + parameters: + - name: project_id + in: path + description: Relevant project ID. + required: true + type: integer + format: int64 + - name: policy_id + in: path + description: The id of webhook policy. + required: true + type: integer + format: int64 + tags: + - Products + responses: + '200': + description: Get webhook policy successfully. + schema: + $ref: '#/definitions/WebhookPolicy' + '400': + description: Illegal format of provided ID value. + '401': + description: User need to log in first. + '403': + description: User have no permission to get webhook policy of the project. + '404': + description: Webhook policy ID does not exist. + '500': + description: Internal server errors. + put: + summary: Update webhook policy of a project. + description: | + This endpoint is aimed to update the webhook policy of a project. + parameters: + - name: project_id + in: path + description: Relevant project ID. + required: true + type: integer + format: int64 + - name: policy_id + in: path + description: The id of webhook policy. + required: true + type: integer + format: int64 + - name: policy + in: body + description: All properties needed except "id", "project_id", "creation_time", "update_time". + required: true + schema: + $ref: '#/definitions/WebhookPolicy' + tags: + - Products + responses: + '200': + description: Update webhook policy successfully. + '400': + description: Illegal format of provided ID value. + '401': + description: User need to log in first. + '403': + description: User have no permission to update webhook policy of the project. + '404': + description: Webhook policy ID does not exist. + '500': + description: Internal server errors. + delete: + summary: Delete webhook policy of a project + description: | + This endpoint is aimed to delete webhookpolicy of a project. + parameters: + - name: project_id + in: path + description: Relevant project ID. + required: true + type: integer + format: int64 + - name: policy_id + in: path + description: The id of webhook policy. + required: true + type: integer + format: int64 + tags: + - Products + responses: + '200': + description: Delete webhook policy successfully. + '400': + description: Illegal format of provided ID value. + '401': + description: User need to log in first. + '403': + description: User have no permission to delete webhook policy of the project. + '404': + description: Webhook policy ID does not exist. + '500': + description: Internal server errors. + '/projects/{project_id}/webhook/policies/test': + post: + summary: Test project webhook connection + description: | + This endpoint tests webhook connection of a project. + parameters: + - name: project_id + in: path + description: Relevant project ID. + required: true + type: integer + format: int64 + - name: policy + in: body + description: Only property "targets" needed. + required: true + schema: + $ref: '#/definitions/WebhookPolicy' + tags: + - Products + responses: + '200': + description: Test webhook connection successfully. + '400': + description: Illegal format of provided ID value. + '401': + description: User need to log in first. + '403': + description: User have no permission to get webhook policy of the project. + '500': + description: Internal server errors. + '/projects/{project_id}/webhook/lasttrigger': + get: + summary: Get project webhook policy last trigger info + description: | + This endpoint returns last trigger information of project webhook policy. + parameters: + - name: project_id + in: path + description: Relevant project ID. + required: true + type: integer + format: int64 + tags: + - Products + responses: + '200': + description: Test webhook connection successfully. + schema: + type: array + items: + $ref: '#/definitions/WebhookLastTrigger' + '400': + description: Illegal format of provided ID value. + '401': + description: User need to log in first. + '403': + description: User have no permission to get webhook policy of the project. + '500': + description: Internal server errors. + '/projects/{project_id}/webhook/jobs': + get: + summary: List project webhook jobs + description: | + This endpoint returns webhook jobs of a project. + parameters: + - name: project_id + in: path + type: integer + format: int64 + required: true + description: Relevant project ID. + - name: policy_id + in: query + type: integer + format: int64 + required: true + description: The policy ID. + tags: + - Products + responses: + '200': + description: List project webhook jobs successfully. + schema: + type: array + items: + $ref: '#/definitions/WebhookJob' + '400': + description: Illegal format of provided ID value. + '401': + description: User need to log in first. + '403': + description: User have no permission to list webhook jobs of the project. + '500': + description: Unexpected internal errors. responses: OK: description: 'Success' @@ -3556,6 +4042,17 @@ definitions: metadata: description: The metadata of the project. $ref: '#/definitions/ProjectMetadata' + cve_whitelist: + description: The CVE whitelist of the project. + $ref: '#/definitions/CVEWhitelist' + count_limit: + type: integer + format: int64 + description: The count quota of the project. + storage_limit: + type: integer + format: int64 + description: The storage quota of the project. Project: type: object properties: @@ -3597,6 +4094,9 @@ definitions: metadata: description: The metadata of the project. $ref: '#/definitions/ProjectMetadata' + cve_whitelist: + description: The CVE whitelist of this project. + $ref: '#/definitions/CVEWhitelist' ProjectMetadata: type: object properties: @@ -3605,16 +4105,50 @@ definitions: description: 'The public status of the project. The valid values are "true", "false".' enable_content_trust: type: string - description: 'Whether content trust is enabled or not. If it is enabled, user cann''t pull unsigned images from this project. The valid values are "true", "false".' + description: 'Whether content trust is enabled or not. If it is enabled, user can''t pull unsigned images from this project. The valid values are "true", "false".' prevent_vul: type: string description: 'Whether prevent the vulnerable images from running. The valid values are "true", "false".' severity: type: string - description: 'If the vulnerability is high than severity defined here, the images cann''t be pulled. The valid values are "negligible", "low", "medium", "high", "critical".' + description: 'If the vulnerability is high than severity defined here, the images can''t be pulled. The valid values are "negligible", "low", "medium", "high", "critical".' auto_scan: type: string description: 'Whether scan images automatically when pushing. The valid values are "true", "false".' + reuse_sys_cve_whitelist: + type: string + description: 'Whether this project reuse the system level CVE whitelist as the whitelist of its own. The valid values are "true", "false". + If it is set to "true" the actual whitelist associate with this project, if any, will be ignored.' + ProjectSummary: + type: object + properties: + repo_count: + type: integer + description: The number of the repositories under this project. + chart_count: + type: integer + description: The total number of charts under this project. + project_admin_count: + type: integer + description: The total number of project admin members. + master_count: + type: integer + description: The total number of master members. + developer_count: + type: integer + description: The total number of developer members. + guest_count: + type: integer + description: The total number of guest members. + quota: + type: object + properties: + hard: + $ref: "#/definitions/ResourceList" + description: The hard limits of the quota + used: + $ref: "#/definitions/ResourceList" + description: The used status of the quota Manifest: type: object properties: @@ -4270,6 +4804,9 @@ definitions: auth_mode: type: string description: 'The auth mode of current system, such as "db_auth", "ldap_auth"' + count_per_project: + type: string + description: The default count quota for the new created projects. email_from: type: string description: The sender name for Email notification. @@ -4330,12 +4867,18 @@ definitions: project_creation_restriction: type: string description: This attribute restricts what users have the permission to create project. It can be "everyone" or "adminonly". + quota_per_project_enable: + type: boolean + description: This attribute indicates whether quota per project enabled in harbor read_only: type: boolean description: '''docker push'' is prohibited by Harbor if you set it to true. ' self_registration: type: boolean description: 'Whether the Harbor instance supports self-registration. If it''s set to false, admin need to add user to the instance.' + storage_per_project: + type: string + description: The default storage quota for the new created projects. token_expiration: type: integer description: 'The expiration time of the token for internal Registry, in minutes.' @@ -4361,6 +4904,9 @@ definitions: auth_mode: $ref: '#/definitions/StringConfigItem' description: 'The auth mode of current system, such as "db_auth", "ldap_auth"' + count_per_project: + $ref: '#/definitions/IntegerConfigItem' + description: The default count quota for the new created projects. email_from: $ref: '#/definitions/StringConfigItem' description: The sender name for Email notification. @@ -4421,12 +4967,18 @@ definitions: project_creation_restriction: $ref: '#/definitions/StringConfigItem' description: This attribute restricts what users have the permission to create project. It can be "everyone" or "adminonly". + quota_per_project_enable: + $ref: '#/definitions/BoolConfigItem' + description: This attribute indicates whether quota per project enabled in harbor read_only: $ref: '#/definitions/BoolConfigItem' description: '''docker push'' is prohibited by Harbor if you set it to true. ' self_registration: $ref: '#/definitions/BoolConfigItem' description: 'Whether the Harbor instance supports self-registration. If it''s set to false, admin need to add user to the instance.' + storage_per_project: + $ref: '#/definitions/IntegerConfigItem' + description: The default storage quota for the new created projects. token_expiration: $ref: '#/definitions/IntegerConfigItem' description: 'The expiration time of the token for internal Registry, in minutes.' @@ -4542,7 +5094,7 @@ definitions: description: The name of the user group group_type: type: integer - description: 'The group type, 1 for LDAP group.' + description: 'The group type, 1 for LDAP group, 2 for HTTP group.' ldap_group_dn: type: string description: The DN of the LDAP group if group type is 1 (LDAP group). @@ -4829,7 +5381,9 @@ definitions: properties: type: type: string - description: The schedule type. The valid values are hourly, daily, weekly, custom and None. 'None' means to cancel the schedule. + description: | + The schedule type. The valid values are 'Hourly', 'Daily', 'Weekly', 'Custom', 'Manually' and 'None'. + 'Manually' means to trigger it right away and 'None' means to cancel the schedule. cron: type: string description: A cron expression, a time-based job scheduler. @@ -5065,4 +5619,162 @@ definitions: description: The name of namespace metadata: type: object - description: The metadata of namespace \ No newline at end of file + description: The metadata of namespace + CVEWhitelist: + type: object + description: The CVE Whitelist for system or project + properties: + id: + type: integer + description: ID of the whitelist + project_id: + type: integer + description: ID of the project which the whitelist belongs to. For system level whitelist this attribute is zero. + expires_at: + type: integer + description: the time for expiration of the whitelist, in the form of seconds since epoch. This is an optional attribute, if it's not set the CVE whitelist does not expire. + items: + type: array + items: + $ref: "#/definitions/CVEWhitelistItem" + CVEWhitelistItem: + type: object + description: The item in CVE whitelist + properties: + cve_id: + type: string + description: The ID of the CVE, such as "CVE-2019-10164" + ResourceList: + type: object + additionalProperties: + type: integer + QuotaUpdateReq: + type: object + properties: + hard: + $ref: "#/definitions/ResourceList" + description: The new hard limits for the quota + QuotaRefObject: + type: object + additionalProperties: {} + Quota: + type: object + description: The quota object + properties: + id: + type: integer + description: ID of the quota + ref: + $ref: "#/definitions/QuotaRefObject" + description: The reference object of the quota + hard: + $ref: "#/definitions/ResourceList" + description: The hard limits of the quota + used: + $ref: "#/definitions/ResourceList" + description: The used status of the quota + creation_time: + type: string + description: the creation time of the quota + update_time: + type: string + description: the update time of the quota + WebhookTargetObject: + type: object + description: The webhook policy target object. + properties: + type: + type: string + description: The webhook target notify type. + address: + type: string + description: The webhook target address. + auth_header: + type: string + description: The webhook auth header. + skip_cert_verify: + type: boolean + description: Whether or not to skip cert verify. + WebhookPolicy: + type: object + description: The webhook policy object + properties: + id: + type: integer + format: int64 + description: The webhook policy ID. + name: + type: string + description: The name of webhook policy. + description: + type: string + description: The description of webhook policy. + project_id: + type: integer + description: The project ID of webhook policy. + targets: + type: array + items: + $ref: '#/definitions/WebhookTargetObject' + event_types: + type: array + items: + type: string + creator: + type: string + description: The creator of the webhook policy. + creation_time: + type: string + description: The create time of the webhook policy. + update_time: + type: string + description: The update time of the webhook policy. + enabled: + type: boolean + description: Whether the webhook policy is enabled or not. + WebhookLastTrigger: + type: object + description: The webhook policy and last trigger time group by event type. + properties: + event_type: + type: string + description: The webhook event type. + enabled: + type: boolean + description: Whether or not the webhook policy enabled. + creation_time: + type: string + description: The creation time of webhook policy. + last_trigger_time: + type: string + description: The last trigger time of webhook policy. + WebhookJob: + type: object + description: The webhook job. + properties: + id: + type: integer + format: int64 + description: The webhook job ID. + policy_id: + type: integer + format: int64 + description: The webhook policy ID. + event_type: + type: string + description: The webhook job event type. + notify_type: + type: string + description: The webhook job notify type. + status: + type: string + description: The webhook job status. + job_detail: + type: string + description: The webhook job notify detailed data. + creation_time: + type: string + description: The webhook job creation time. + update_time: + type: string + description: The webhook job update time. diff --git a/docs/use_make.md b/docs/use_make.md index c18da1ad6..2d56f6eaa 100644 --- a/docs/use_make.md +++ b/docs/use_make.md @@ -36,10 +36,10 @@ version | set harbor version #### EXAMPLE: #### Build and run harbor from source code. -make install GOBUILDIMAGE=golang:1.11.2 COMPILETAG=compile_golangimage NOTARYFLAG=true +make install GOBUILDIMAGE=golang:1.12.5 COMPILETAG=compile_golangimage NOTARYFLAG=true ### Package offline installer -make package_offline GOBUILDIMAGE=golang:1.11.2 COMPILETAG=compile_golangimage NOTARYFLAG=true +make package_offline GOBUILDIMAGE=golang:1.12.5 COMPILETAG=compile_golangimage NOTARYFLAG=true ### Start harbor with notary make -e NOTARYFLAG=true start diff --git a/docs/user_guide.md b/docs/user_guide.md index a3d3cb889..840e81ce2 100644 --- a/docs/user_guide.md +++ b/docs/user_guide.md @@ -573,7 +573,7 @@ Before working, Harbor should be added into the repository list with `helm repo With this mode Helm can be made aware of all the charts located in different projects and which are accessible by the currently authenticated user. ``` -helm repo add --ca-file ca.crt --cert-file server.crt --key-file server.key --username=admin --password=Passw0rd myrepo https://xx.xx.xx.xx/chartrepo +helm repo add --ca-file ca.crt --username=admin --password=Passw0rd myrepo https://xx.xx.xx.xx/chartrepo ``` **NOTES:** Providing both ca file and cert files is caused by an issue from helm. @@ -581,7 +581,7 @@ helm repo add --ca-file ca.crt --cert-file server.crt --key-file server.key --us With this mode, helm can only pull charts in the specified project. ``` -helm repo add --ca-file ca.crt --cert-file server.crt --key-file server.key --username=admin --password=Passw0rd myrepo https://xx.xx.xx.xx/chartrepo/myproject +helm repo add --ca-file ca.crt --username=admin --password=Passw0rd myrepo https://xx.xx.xx.xx/chartrepo/myproject ``` #### Push charts to the repository server by CLI @@ -591,7 +591,7 @@ helm plugin install https://github.com/chartmuseum/helm-push ``` After a successful installation, run `push` command to upload your charts: ``` -helm push --ca-file=ca.crt --key-file=server.key --cert-file=server.crt --username=admin --password=passw0rd chart_repo/hello-helm-0.1.0.tgz myrepo +helm push --ca-file=ca.crt --username=admin --password=passw0rd chart_repo/hello-helm-0.1.0.tgz myrepo ``` **NOTES:** `push` command does not support pushing a prov file of a signed chart yet. @@ -609,7 +609,7 @@ helm search hello ``` Everything is ready, install the chart to your kubernetes: ``` -helm install --ca-file=ca.crt --key-file=server.key --cert-file=server.crt --username=admin --password=Passw0rd --version 0.1.10 repo248/chart_repo/hello-helm +helm install --ca-file=ca.crt --username=admin --password=Passw0rd --version 0.1.10 repo248/chart_repo/hello-helm ``` For other more helm commands like how to sign a chart, please refer to the [helm doc](https://docs.helm.sh/helm/#helm). diff --git a/make/harbor.yml b/make/harbor.yml index d1d708a53..ba860ffca 100644 --- a/make/harbor.yml +++ b/make/harbor.yml @@ -30,6 +30,11 @@ harbor_admin_password: Harbor12345 database: # The password for the root user of Harbor DB. Change this before any production use. password: root123 + # The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained. + max_idle_conns: 50 + # The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections. + # Note: the default number of connections is 100 for postgres. + max_open_conns: 100 # The default data volume data_volume: /data @@ -50,20 +55,18 @@ data_volume: /data # disabled: false # Clair configuration -clair: +clair: # The interval of clair updaters, the unit is hour, set to 0 to disable the updaters. updaters_interval: 12 - # Config http proxy for Clair, e.g. http://my.proxy.com:3128 - # Clair doesn't need to connect to harbor internal components via http proxy. - http_proxy: - https_proxy: - no_proxy: 127.0.0.1,localhost,core,registry - jobservice: - # Maximum number of job workers in job service + # Maximum number of job workers in job service max_job_workers: 10 +notification: + # Maximum retry count for webhook job + webhook_job_max_retry: 10 + chart: # Change the value of absolute_url to enabled can enable absolute url in chart absolute_url: disabled @@ -72,14 +75,25 @@ chart: log: # options are debug, info, warning, error, fatal level: info - # Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated. - rotate_count: 50 - # Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes. - # If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G - # are all valid. - rotate_size: 200M - # The directory on your host that store log - location: /var/log/harbor + # configs for logs in local storage + local: + # Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated. + rotate_count: 50 + # Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes. + # If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G + # are all valid. + rotate_size: 200M + # The directory on your host that store log + location: /var/log/harbor + + # Uncomment following lines to enable external syslog endpoint. + # external_endpoint: + # # protocol used to transmit log to external endpoint, options is tcp or udp + # protocol: tcp + # # The host of external endpoint + # host: localhost + # # Port of external endpoint + # port: 5140 #This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY! _version: 1.8.0 @@ -128,3 +142,20 @@ _version: 1.8.0 # Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert. # uaa: # ca_file: /path/to/ca + +# Global proxy +# Config http proxy for components, e.g. http://my.proxy.com:3128 +# Components doesn't need to connect to each others via http proxy. +# Remove component from `components` array if want disable proxy +# for it. If you want use proxy for replication, MUST enable proxy +# for core and jobservice, and set `http_proxy` and `https_proxy`. +# Add domain to the `no_proxy` field, when you want disable proxy +# for some special registry. +proxy: + http_proxy: + https_proxy: + no_proxy: 127.0.0.1,localhost,.local,.internal,log,db,redis,nginx,core,portal,postgresql,jobservice,registry,registryctl,clair + components: + - core + - jobservice + - clair diff --git a/make/migrations/postgresql/0001_initial_schema.up.sql b/make/migrations/postgresql/0001_initial_schema.up.sql index bccd7f4cb..e3f2bb903 100644 --- a/make/migrations/postgresql/0001_initial_schema.up.sql +++ b/make/migrations/postgresql/0001_initial_schema.up.sql @@ -56,9 +56,9 @@ $$; CREATE TRIGGER harbor_user_update_time_at_modtime BEFORE UPDATE ON harbor_user FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column(); -insert into harbor_user (username, email, password, realname, comment, deleted, sysadmin_flag, creation_time, update_time) values -('admin', 'admin@example.com', '', 'system admin', 'admin user',false, true, NOW(), NOW()), -('anonymous', 'anonymous@example.com', '', 'anonymous user', 'anonymous user', true, false, NOW(), NOW()); +insert into harbor_user (username, password, realname, comment, deleted, sysadmin_flag, creation_time, update_time) values +('admin', '', 'system admin', 'admin user',false, true, NOW(), NOW()), +('anonymous', '', 'anonymous user', 'anonymous user', true, false, NOW(), NOW()); create table project ( project_id SERIAL PRIMARY KEY NOT NULL, diff --git a/make/migrations/postgresql/0005_1.8.2_schema.up.sql b/make/migrations/postgresql/0005_1.8.2_schema.up.sql new file mode 100644 index 000000000..45f8f722d --- /dev/null +++ b/make/migrations/postgresql/0005_1.8.2_schema.up.sql @@ -0,0 +1,30 @@ +/* +Rename the duplicate names before adding "UNIQUE" constraint +*/ +DO $$ +BEGIN + WHILE EXISTS (SELECT count(*) FROM user_group GROUP BY group_name HAVING count(*) > 1) LOOP + UPDATE user_group AS r + SET group_name = ( + /* + truncate the name if it is too long after appending the sequence number + */ + CASE WHEN (length(group_name)+length(v.seq::text)+1) > 256 + THEN + substring(group_name from 1 for (255-length(v.seq::text))) || '_' || v.seq + ELSE + group_name || '_' || v.seq + END + ) + FROM (SELECT id, row_number() OVER (PARTITION BY group_name ORDER BY id) AS seq FROM user_group) AS v + WHERE r.id = v.id AND v.seq > 1; + END LOOP; +END $$; + +ALTER TABLE user_group ADD CONSTRAINT unique_group_name UNIQUE (group_name); + + +/* +Fix issue https://github.com/goharbor/harbor/issues/8526, delete the none scan_all schedule. + */ +UPDATE admin_job SET deleted='true' WHERE cron_str='{"type":"none"}'; diff --git a/make/migrations/postgresql/0010_1.9.0_schema.up.sql b/make/migrations/postgresql/0010_1.9.0_schema.up.sql new file mode 100644 index 000000000..261b6d9a0 --- /dev/null +++ b/make/migrations/postgresql/0010_1.9.0_schema.up.sql @@ -0,0 +1,183 @@ +/* add table for CVE whitelist */ +CREATE TABLE cve_whitelist +( + id SERIAL PRIMARY KEY NOT NULL, + project_id int, + creation_time timestamp default CURRENT_TIMESTAMP, + update_time timestamp default CURRENT_TIMESTAMP, + expires_at bigint, + items text NOT NULL, + UNIQUE (project_id) +); + +CREATE TABLE blob +( + id SERIAL PRIMARY KEY NOT NULL, + /* + digest of config, layer, manifest + */ + digest varchar(255) NOT NULL, + content_type varchar(255) NOT NULL, + size int NOT NULL, + creation_time timestamp default CURRENT_TIMESTAMP, + UNIQUE (digest) +); + +/* add the table for project and blob */ +CREATE TABLE project_blob ( + id SERIAL PRIMARY KEY NOT NULL, + project_id int NOT NULL, + blob_id int NOT NULL, + creation_time timestamp default CURRENT_TIMESTAMP, + CONSTRAINT unique_project_blob UNIQUE (project_id, blob_id) +); + +CREATE TABLE artifact +( + id SERIAL PRIMARY KEY NOT NULL, + project_id int NOT NULL, + repo varchar(255) NOT NULL, + tag varchar(255) NOT NULL, + /* + digest of manifest + */ + digest varchar(255) NOT NULL, + /* + kind of artifact, image, chart, etc.. + */ + kind varchar(255) NOT NULL, + creation_time timestamp default CURRENT_TIMESTAMP, + pull_time timestamp, + push_time timestamp, + CONSTRAINT unique_artifact UNIQUE (project_id, repo, tag) +); + +/* add the table for relation of artifact and blob */ +CREATE TABLE artifact_blob +( + id SERIAL PRIMARY KEY NOT NULL, + digest_af varchar(255) NOT NULL, + digest_blob varchar(255) NOT NULL, + creation_time timestamp default CURRENT_TIMESTAMP, + CONSTRAINT unique_artifact_blob UNIQUE (digest_af, digest_blob) +); + +/* add quota table */ +CREATE TABLE quota +( + id SERIAL PRIMARY KEY NOT NULL, + reference VARCHAR(255) NOT NULL, + reference_id VARCHAR(255) NOT NULL, + hard JSONB NOT NULL, + creation_time timestamp default CURRENT_TIMESTAMP, + update_time timestamp default CURRENT_TIMESTAMP, + UNIQUE (reference, reference_id) +); + +/* add quota usage table */ +CREATE TABLE quota_usage +( + id SERIAL PRIMARY KEY NOT NULL, + reference VARCHAR(255) NOT NULL, + reference_id VARCHAR(255) NOT NULL, + used JSONB NOT NULL, + creation_time timestamp default CURRENT_TIMESTAMP, + update_time timestamp default CURRENT_TIMESTAMP, + UNIQUE (reference, reference_id) +); + +/* only set quota and usage for 'library', and let the sync quota handling others. */ +INSERT INTO quota (reference, reference_id, hard, creation_time, update_time) +SELECT 'project', + CAST(project_id AS VARCHAR), + '{"count": -1, "storage": -1}', + NOW(), + NOW() +FROM project +WHERE name = 'library' and deleted = 'f'; + +INSERT INTO quota_usage (id, reference, reference_id, used, creation_time, update_time) +SELECT id, + reference, + reference_id, + '{"count": 0, "storage": 0}', + creation_time, + update_time +FROM quota; + +create table retention_policy +( + id serial PRIMARY KEY NOT NULL, + scope_level varchar(20), + scope_reference integer, + trigger_kind varchar(20), + data text, + create_time time, + update_time time +); + +create table retention_execution +( + id serial PRIMARY KEY NOT NULL, + policy_id integer, + dry_run boolean, + trigger varchar(20), + start_time timestamp +); + +create table retention_task +( + id SERIAL NOT NULL, + execution_id integer, + repository varchar(255), + job_id varchar(64), + status varchar(32), + status_code integer, + start_time timestamp default CURRENT_TIMESTAMP, + end_time timestamp default CURRENT_TIMESTAMP, + total integer, + retained integer, + PRIMARY KEY (id) +); + +create table schedule +( + id SERIAL NOT NULL, + job_id varchar(64), + status varchar(64), + creation_time timestamp default CURRENT_TIMESTAMP, + update_time timestamp default CURRENT_TIMESTAMP, + PRIMARY KEY (id) +); + +/*add notification policy table*/ +create table notification_policy ( + id SERIAL NOT NULL, + name varchar(256), + project_id int NOT NULL, + enabled boolean NOT NULL DEFAULT true, + description text, + targets text, + event_types text, + creator varchar(256), + creation_time timestamp default CURRENT_TIMESTAMP, + update_time timestamp default CURRENT_TIMESTAMP, + PRIMARY KEY (id), + CONSTRAINT unique_project_id UNIQUE (project_id) + ); + +/*add notification job table*/ + CREATE TABLE notification_job ( + id SERIAL NOT NULL, + policy_id int NOT NULL, + status varchar(32), + /* event_type is the type of trigger event, eg. pushImage, pullImage, uploadChart... */ + event_type varchar(256), + /* notify_type is the type to notify event to user, eg. HTTP, Email... */ + notify_type varchar(256), + job_detail text, + job_uuid varchar(64), + creation_time timestamp default CURRENT_TIMESTAMP, + update_time timestamp default CURRENT_TIMESTAMP, + PRIMARY KEY (id) + ); diff --git a/make/photon/chartserver/builder b/make/photon/chartserver/builder index c1fb5f09a..a1d6c3c3f 100755 --- a/make/photon/chartserver/builder +++ b/make/photon/chartserver/builder @@ -4,7 +4,7 @@ set +e usage(){ echo "Usage: builder " - echo "e.g: builder golang:1.11.2 github.com/helm/chartmuseum v0.8.1 cmd/chartmuseum chartm" + echo "e.g: builder golang:1.11.2 github.com/helm/chartmuseum v0.9.0 cmd/chartmuseum chartm" exit 1 } @@ -13,7 +13,7 @@ if [ $# != 5 ]; then fi GOLANG_IMAGE="$1" -CODE_PATH="$2" +GIT_PATH="$2" CODE_VERSION="$3" MAIN_GO_PATH="$4" BIN_NAME="$5" @@ -27,7 +27,7 @@ mkdir -p binary rm -rf binary/$BIN_NAME || true cp compile.sh binary/ -docker run -it -v $cur/binary:/go/bin --name golang_code_builder $GOLANG_IMAGE /bin/bash /go/bin/compile.sh $CODE_PATH $CODE_VERSION $MAIN_GO_PATH $BIN_NAME +docker run -it --rm -v $cur/binary:/go/bin --name golang_code_builder $GOLANG_IMAGE /bin/bash /go/bin/compile.sh $GIT_PATH $CODE_VERSION $MAIN_GO_PATH $BIN_NAME #Clear docker rm -f golang_code_builder diff --git a/make/photon/chartserver/compile.sh b/make/photon/chartserver/compile.sh index dca0d6c1d..4634c6d15 100644 --- a/make/photon/chartserver/compile.sh +++ b/make/photon/chartserver/compile.sh @@ -11,24 +11,21 @@ if [ $# != 4 ]; then usage fi -CODE_PATH="$1" +GIT_PATH="$1" VERSION="$2" MAIN_GO_PATH="$3" BIN_NAME="$4" -#Get the source code of chartmusem -go get $CODE_PATH - +#Get the source code +git clone $GIT_PATH src_code +ls +SRC_PATH=$(pwd)/src_code set -e #Checkout the released tag branch -cd /go/src/$CODE_PATH -git checkout tags/$VERSION -b $VERSION - -#Install the go dep tool to restore the package dependencies -curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh -dep ensure +cd $SRC_PATH +git checkout tags/$VERSION -b $VERSION #Compile -cd /go/src/$CODE_PATH/$MAIN_GO_PATH && go build -a -o $BIN_NAME +cd $SRC_PATH/$MAIN_GO_PATH && go build -a -o $BIN_NAME mv $BIN_NAME /go/bin/ diff --git a/make/photon/core/Dockerfile b/make/photon/core/Dockerfile index 39b7cf574..7eaa4191c 100644 --- a/make/photon/core/Dockerfile +++ b/make/photon/core/Dockerfile @@ -6,11 +6,11 @@ RUN tdnf install sudo -y >> /dev/null\ && mkdir /harbor/ HEALTHCHECK CMD curl --fail -s http://127.0.0.1:8080/api/ping || exit 1 -COPY ./make/photon/core/harbor_core ./make/photon/core/start.sh ./UIVERSION /harbor/ +COPY ./make/photon/core/harbor_core ./UIVERSION /harbor/ COPY ./src/core/views /harbor/views COPY ./make/migrations /harbor/migrations -RUN chmod u+x /harbor/start.sh /harbor/harbor_core +RUN chmod u+x /harbor/harbor_core WORKDIR /harbor/ - -ENTRYPOINT ["/harbor/start.sh"] +USER harbor +ENTRYPOINT ["/harbor/harbor_core"] diff --git a/make/photon/core/start.sh b/make/photon/core/start.sh deleted file mode 100644 index 20267e671..000000000 --- a/make/photon/core/start.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh -sudo -E -u \#10000 "/harbor/harbor_core" - diff --git a/make/photon/db/Dockerfile b/make/photon/db/Dockerfile index 5672b2f25..e9d765393 100644 --- a/make/photon/db/Dockerfile +++ b/make/photon/db/Dockerfile @@ -18,15 +18,16 @@ RUN tdnf erase -y toybox && tdnf install -y util-linux net-tools VOLUME /var/lib/postgresql/data -ADD ./make/photon/db/docker-entrypoint.sh /entrypoint.sh -ADD ./make/photon/db/docker-healthcheck.sh /docker-healthcheck.sh -RUN chmod u+x /entrypoint.sh /docker-healthcheck.sh -ENTRYPOINT ["/entrypoint.sh"] -HEALTHCHECK CMD ["/docker-healthcheck.sh"] - +COPY ./make/photon/db/docker-entrypoint.sh /docker-entrypoint.sh +COPY ./make/photon/db/docker-healthcheck.sh /docker-healthcheck.sh COPY ./make/photon/db/initial-notaryserver.sql /docker-entrypoint-initdb.d/ COPY ./make/photon/db/initial-notarysigner.sql /docker-entrypoint-initdb.d/ COPY ./make/photon/db/initial-registry.sql /docker-entrypoint-initdb.d/ +RUN chown -R postgres:postgres /docker-entrypoint.sh /docker-healthcheck.sh /docker-entrypoint-initdb.d \ + && chmod u+x /docker-entrypoint.sh /docker-healthcheck.sh + +ENTRYPOINT ["/docker-entrypoint.sh"] +HEALTHCHECK CMD ["/docker-healthcheck.sh"] EXPOSE 5432 -CMD ["postgres"] +USER postgres diff --git a/make/photon/db/docker-entrypoint.sh b/make/photon/db/docker-entrypoint.sh index c8f667282..abfabe4ec 100644 --- a/make/photon/db/docker-entrypoint.sh +++ b/make/photon/db/docker-entrypoint.sh @@ -23,95 +23,88 @@ file_env() { unset "$fileVar" } -if [ "${1:0:1}" = '-' ]; then - set -- postgres "$@" -fi - -if [ "$1" = 'postgres' ]; then - chown -R postgres:postgres $PGDATA - # look specifically for PG_VERSION, as it is expected in the DB dir - if [ ! -s "$PGDATA/PG_VERSION" ]; then - file_env 'POSTGRES_INITDB_ARGS' - if [ "$POSTGRES_INITDB_XLOGDIR" ]; then - export POSTGRES_INITDB_ARGS="$POSTGRES_INITDB_ARGS --xlogdir $POSTGRES_INITDB_XLOGDIR" - fi - su - $1 -c "initdb -D $PGDATA -U postgres -E UTF-8 --lc-collate=en_US.UTF-8 --lc-ctype=en_US.UTF-8 $POSTGRES_INITDB_ARGS" - # check password first so we can output the warning before postgres - # messes it up - file_env 'POSTGRES_PASSWORD' - if [ "$POSTGRES_PASSWORD" ]; then - pass="PASSWORD '$POSTGRES_PASSWORD'" - authMethod=md5 - else - # The - option suppresses leading tabs but *not* spaces. :) - cat >&2 <<-EOF - **************************************************** - WARNING: No password has been set for the database. - This will allow anyone with access to the - Postgres port to access your database. In - Docker's default configuration, this is - effectively any other container on the same - system. - Use "-e POSTGRES_PASSWORD=password" to set - it in "docker run". - **************************************************** +# look specifically for PG_VERSION, as it is expected in the DB dir +if [ ! -s "$PGDATA/PG_VERSION" ]; then + file_env 'POSTGRES_INITDB_ARGS' + if [ "$POSTGRES_INITDB_XLOGDIR" ]; then + export POSTGRES_INITDB_ARGS="$POSTGRES_INITDB_ARGS --xlogdir $POSTGRES_INITDB_XLOGDIR" + fi + initdb -D $PGDATA -U postgres -E UTF-8 --lc-collate=en_US.UTF-8 --lc-ctype=en_US.UTF-8 $POSTGRES_INITDB_ARGS + # check password first so we can output the warning before postgres + # messes it up + file_env 'POSTGRES_PASSWORD' + if [ "$POSTGRES_PASSWORD" ]; then + pass="PASSWORD '$POSTGRES_PASSWORD'" + authMethod=md5 + else + # The - option suppresses leading tabs but *not* spaces. :) + cat >&2 <<-EOF + **************************************************** + WARNING: No password has been set for the database. + This will allow anyone with access to the + Postgres port to access your database. In + Docker's default configuration, this is + effectively any other container on the same + system. + Use "-e POSTGRES_PASSWORD=password" to set + it in "docker run". + **************************************************** EOF - pass= - authMethod=trust - fi + pass= + authMethod=trust + fi - { - echo - echo "host all all all $authMethod" - } >> "$PGDATA/pg_hba.conf" - su postgres - echo `whoami` - # internal start of server in order to allow set-up using psql-client - # does not listen on external TCP/IP and waits until start finishes - su - $1 -c "pg_ctl -D \"$PGDATA\" -o \"-c listen_addresses='localhost'\" -w start" + { + echo + echo "host all all all $authMethod" + } >> "$PGDATA/pg_hba.conf" + echo `whoami` + # internal start of server in order to allow set-up using psql-client + # does not listen on external TCP/IP and waits until start finishes + pg_ctl -D "$PGDATA" -o "-c listen_addresses=''" -w start - file_env 'POSTGRES_USER' 'postgres' - file_env 'POSTGRES_DB' "$POSTGRES_USER" + file_env 'POSTGRES_USER' 'postgres' + file_env 'POSTGRES_DB' "$POSTGRES_USER" - psql=( psql -v ON_ERROR_STOP=1 ) + psql=( psql -v ON_ERROR_STOP=1 ) - if [ "$POSTGRES_DB" != 'postgres' ]; then - "${psql[@]}" --username postgres <<-EOSQL - CREATE DATABASE "$POSTGRES_DB" ; -EOSQL - echo - fi - - if [ "$POSTGRES_USER" = 'postgres' ]; then - op='ALTER' - else - op='CREATE' - fi + if [ "$POSTGRES_DB" != 'postgres' ]; then "${psql[@]}" --username postgres <<-EOSQL - $op USER "$POSTGRES_USER" WITH SUPERUSER $pass ; + CREATE DATABASE "$POSTGRES_DB" ; EOSQL echo - - psql+=( --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" ) - - echo - for f in /docker-entrypoint-initdb.d/*; do - case "$f" in - *.sh) echo "$0: running $f"; . "$f" ;; - *.sql) echo "$0: running $f"; "${psql[@]}" -f "$f"; echo ;; - *.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${psql[@]}"; echo ;; - *) echo "$0: ignoring $f" ;; - esac - echo - done - - PGUSER="${PGUSER:-postgres}" \ - su - $1 -c "pg_ctl -D \"$PGDATA\" -m fast -w stop" - - echo - echo 'PostgreSQL init process complete; ready for start up.' - echo fi + + if [ "$POSTGRES_USER" = 'postgres' ]; then + op='ALTER' + else + op='CREATE' + fi + "${psql[@]}" --username postgres <<-EOSQL + $op USER "$POSTGRES_USER" WITH SUPERUSER $pass ; +EOSQL + echo + + psql+=( --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" ) + + echo + for f in /docker-entrypoint-initdb.d/*; do + case "$f" in + *.sh) echo "$0: running $f"; . "$f" ;; + *.sql) echo "$0: running $f"; "${psql[@]}" -f "$f"; echo ;; + *.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${psql[@]}"; echo ;; + *) echo "$0: ignoring $f" ;; + esac + echo + done + + PGUSER="${PGUSER:-postgres}" \ + pg_ctl -D "$PGDATA" -m fast -w stop + + echo + echo 'PostgreSQL init process complete; ready for start up.' + echo fi -exec su - $1 -c "$@ -D $PGDATA" + +postgres -D $PGDATA diff --git a/make/photon/jobservice/Dockerfile b/make/photon/jobservice/Dockerfile index 3131550d2..eddb8e65b 100644 --- a/make/photon/jobservice/Dockerfile +++ b/make/photon/jobservice/Dockerfile @@ -1,12 +1,19 @@ FROM photon:2.0 -RUN mkdir /harbor/ \ - && tdnf install sudo -y >> /dev/null\ +RUN tdnf install sudo -y >> /dev/null\ && tdnf clean all \ - && groupadd -r -g 10000 harbor && useradd --no-log-init -r -g 10000 -u 10000 harbor + && groupadd -r -g 10000 harbor && useradd --no-log-init -r -g 10000 -u 10000 harbor -COPY ./make/photon/jobservice/start.sh ./make/photon/jobservice/harbor_jobservice /harbor/ +COPY ./make/photon/jobservice/harbor_jobservice /harbor/ + +RUN chmod u+x /harbor/harbor_jobservice -RUN chmod u+x /harbor/harbor_jobservice /harbor/start.sh WORKDIR /harbor/ -ENTRYPOINT ["/harbor/start.sh"] + +USER harbor + +VOLUME ["/var/log/jobs/"] + +HEALTHCHECK CMD curl --fail -s http://127.0.0.1:8080/api/v1/stats || exit 1 + +ENTRYPOINT ["/harbor/harbor_jobservice", "-c", "/etc/jobservice/config.yml"] diff --git a/make/photon/jobservice/start.sh b/make/photon/jobservice/start.sh deleted file mode 100644 index 517971b16..000000000 --- a/make/photon/jobservice/start.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh -if [ -d /var/log/jobs ]; then - chown -R 10000:10000 /var/log/jobs/ -fi -sudo -E -u \#10000 "/harbor/harbor_jobservice" "-c" "/etc/jobservice/config.yml" - diff --git a/make/photon/log/rsyslog_docker.conf b/make/photon/log/rsyslog_docker.conf index a21cc5078..5264d85db 100644 --- a/make/photon/log/rsyslog_docker.conf +++ b/make/photon/log/rsyslog_docker.conf @@ -1,8 +1,5 @@ # Rsyslog configuration file for docker. - -template(name="DynaFile" type="string" - string="/var/log/docker/%syslogtag:R,ERE,0,DFLT:[^[]*--end:secpath-replace%.log" -) -#if $programname == "docker" then ?DynaFile -if $programname != "rsyslogd" then -?DynaFile - +template(name="DynaFile" type="string" string="/var/log/docker/%programname%.log") +if $programname != "rsyslogd" then { + action(type="omfile" dynaFile="DynaFile") +} diff --git a/make/photon/nginx/Dockerfile b/make/photon/nginx/Dockerfile index 3d244ee58..902107205 100644 --- a/make/photon/nginx/Dockerfile +++ b/make/photon/nginx/Dockerfile @@ -1,14 +1,19 @@ FROM photon:2.0 -RUN tdnf install -y nginx >> /dev/null\ +RUN tdnf install sudo nginx -y >> /dev/null\ + && tdnf clean all \ + && groupadd -r -g 10000 nginx && useradd --no-log-init -r -g 10000 -u 10000 nginx \ && ln -sf /dev/stdout /var/log/nginx/access.log \ - && ln -sf /dev/stderr /var/log/nginx/error.log \ - && tdnf clean all + && ln -sf /dev/stderr /var/log/nginx/error.log -EXPOSE 80 VOLUME /var/cache/nginx /var/log/nginx /run + +EXPOSE 8080 + STOPSIGNAL SIGQUIT -HEALTHCHECK CMD curl --fail -s http://127.0.0.1 || exit 1 +HEALTHCHECK CMD curl --fail -s http://127.0.0.1:8080 || exit 1 + +USER nginx CMD ["nginx", "-g", "daemon off;"] diff --git a/make/photon/notary/server-start.sh b/make/photon/notary/server-start.sh deleted file mode 100644 index 0e38be19e..000000000 --- a/make/photon/notary/server-start.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -sudo -E -u \#10000 sh -c "migrate-patch -database='${DB_URL}' && /migrations/migrate.sh && /bin/notary-server -config=/etc/notary/server-config.postgres.json -logf=logfmt" diff --git a/make/photon/notary/server.Dockerfile b/make/photon/notary/server.Dockerfile index 5d60d17f4..4b0172439 100644 --- a/make/photon/notary/server.Dockerfile +++ b/make/photon/notary/server.Dockerfile @@ -4,12 +4,12 @@ RUN tdnf install -y shadow sudo \ && tdnf clean all \ && groupadd -r -g 10000 notary \ && useradd --no-log-init -r -g 10000 -u 10000 notary - COPY ./make/photon/notary/migrate-patch /bin/migrate-patch COPY ./make/photon/notary/binary/notary-server /bin/notary-server COPY ./make/photon/notary/binary/migrate /bin/migrate COPY ./make/photon/notary/binary/migrations/ /migrations/ -COPY ./make/photon/notary/server-start.sh /bin/server-start.sh -RUN chmod +x /bin/notary-server /migrations/migrate.sh /bin/migrate /bin/migrate-patch /bin/server-start.sh + +RUN chmod +x /bin/notary-server /migrations/migrate.sh /bin/migrate /bin/migrate-patch ENV SERVICE_NAME=notary_server -ENTRYPOINT [ "/bin/server-start.sh" ] +USER notary +CMD migrate-patch -database=${DB_URL} && /migrations/migrate.sh && /bin/notary-server -config=/etc/notary/server-config.postgres.json -logf=logfmt \ No newline at end of file diff --git a/make/photon/notary/signer-start.sh b/make/photon/notary/signer-start.sh deleted file mode 100644 index 05fc15118..000000000 --- a/make/photon/notary/signer-start.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -sudo -E -u \#10000 sh -c "migrate-patch -database='${DB_URL}' && /migrations/migrate.sh && /bin/notary-signer -config=/etc/notary/signer-config.postgres.json -logf=logfmt" diff --git a/make/photon/notary/signer.Dockerfile b/make/photon/notary/signer.Dockerfile index b27bd3cd5..95e98bfd8 100644 --- a/make/photon/notary/signer.Dockerfile +++ b/make/photon/notary/signer.Dockerfile @@ -8,8 +8,8 @@ COPY ./make/photon/notary/migrate-patch /bin/migrate-patch COPY ./make/photon/notary/binary/notary-signer /bin/notary-signer COPY ./make/photon/notary/binary/migrate /bin/migrate COPY ./make/photon/notary/binary/migrations/ /migrations/ -COPY ./make/photon/notary/signer-start.sh /bin/signer-start.sh -RUN chmod +x /bin/notary-signer /migrations/migrate.sh /bin/migrate /bin/migrate-patch /bin/signer-start.sh +RUN chmod +x /bin/notary-signer /migrations/migrate.sh /bin/migrate /bin/migrate-patch ENV SERVICE_NAME=notary_signer -ENTRYPOINT [ "/bin/signer-start.sh" ] +USER notary +CMD migrate-patch -database=${DB_URL} && /migrations/migrate.sh && /bin/notary-signer -config=/etc/notary/signer-config.postgres.json -logf=logfmt \ No newline at end of file diff --git a/make/photon/portal/Dockerfile b/make/photon/portal/Dockerfile index 6201519da..9f71410f7 100644 --- a/make/photon/portal/Dockerfile +++ b/make/photon/portal/Dockerfile @@ -1,39 +1,44 @@ FROM node:10.15.0 as nodeportal -RUN mkdir -p /portal_src -RUN mkdir -p /build_dir - -COPY make/photon/portal/entrypoint.sh / COPY src/portal /portal_src COPY ./docs/swagger.yaml /portal_src +COPY ./LICENSE /portal_src -WORKDIR /portal_src +WORKDIR /build_dir -RUN npm install && \ - chmod u+x /entrypoint.sh -RUN /entrypoint.sh -VOLUME ["/portal_src"] +RUN cp -r /portal_src/* /build_dir \ + && ls -la \ + && apt-get update \ + && apt-get install -y --no-install-recommends python-yaml=3.12-1 \ + && python -c 'import sys, yaml, json; y=yaml.load(sys.stdin.read()); print json.dumps(y)' < swagger.yaml > swagger.json \ + && npm install \ + && npm run build_lib \ + && npm run link_lib \ + && npm run release FROM photon:2.0 -RUN tdnf install -y nginx >> /dev/null \ - && ln -sf /dev/stdout /var/log/nginx/access.log \ - && ln -sf /dev/stderr /var/log/nginx/error.log \ - && tdnf clean all - -EXPOSE 80 -VOLUME /var/cache/nginx /var/log/nginx /run - - COPY --from=nodeportal /build_dir/dist /usr/share/nginx/html COPY --from=nodeportal /build_dir/swagger.yaml /usr/share/nginx/html COPY --from=nodeportal /build_dir/swagger.json /usr/share/nginx/html +COPY --from=nodeportal /build_dir/LICENSE /usr/share/nginx/html COPY make/photon/portal/nginx.conf /etc/nginx/nginx.conf +RUN tdnf install -y nginx sudo >> /dev/null \ + && ln -sf /dev/stdout /var/log/nginx/access.log \ + && ln -sf /dev/stderr /var/log/nginx/error.log \ + && groupadd -r -g 10000 nginx && useradd --no-log-init -r -g 10000 -u 10000 nginx \ + && chown -R nginx:nginx /etc/nginx \ + && tdnf clean all + +EXPOSE 8080 +VOLUME /var/cache/nginx /var/log/nginx /run + STOPSIGNAL SIGQUIT -HEALTHCHECK CMD curl --fail -s http://127.0.0.1 || exit 1 - +HEALTHCHECK CMD curl --fail -s http://127.0.0.1:8080 || exit 1 +USER nginx CMD ["nginx", "-g", "daemon off;"] + diff --git a/make/photon/portal/entrypoint.sh b/make/photon/portal/entrypoint.sh deleted file mode 100644 index c00b5e0dc..000000000 --- a/make/photon/portal/entrypoint.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -set -e - -cd /build_dir -cp -r /portal_src/* . -ls -la - -# Update -apt-get update -apt-get install -y ruby -ruby -ryaml -rjson -e 'puts JSON.pretty_generate(YAML.load(ARGF))' swagger.yaml>swagger.json - -cat ./package.json -npm install - -## Build harbor-portal and link it -npm run build_lib -npm run link_lib - -## Build production -npm run release diff --git a/make/photon/portal/nginx.conf b/make/photon/portal/nginx.conf index b9b631df7..96da5243f 100644 --- a/make/photon/portal/nginx.conf +++ b/make/photon/portal/nginx.conf @@ -1,13 +1,21 @@ -worker_processes 1; +worker_processes auto; +pid /tmp/nginx.pid; events { worker_connections 1024; } http { + + client_body_temp_path /tmp/client_body_temp; + proxy_temp_path /tmp/proxy_temp; + fastcgi_temp_path /tmp/fastcgi_temp; + uwsgi_temp_path /tmp/uwsgi_temp; + scgi_temp_path /tmp/scgi_temp; + server { - listen 80; + listen 8080; server_name localhost; root /usr/share/nginx/html; diff --git a/make/photon/prepare/g.py b/make/photon/prepare/g.py index bb766f07b..229f61a54 100644 --- a/make/photon/prepare/g.py +++ b/make/photon/prepare/g.py @@ -5,11 +5,19 @@ from pathlib import Path DEFAULT_UID = 10000 DEFAULT_GID = 10000 +PG_UID = 999 +PG_GID = 999 + +REDIS_UID = 999 +REDIS_GID = 999 + ## Global variable +host_root_dir = '/hostfs' + base_dir = '/harbor_make' templates_dir = "/usr/src/app/templates" config_dir = '/config' - +data_dir = '/data' secret_dir = '/secret' secret_key_dir='/secret/keys' diff --git a/make/photon/prepare/main.py b/make/photon/prepare/main.py index 604d2735c..e617baebc 100644 --- a/make/photon/prepare/main.py +++ b/make/photon/prepare/main.py @@ -16,6 +16,7 @@ from utils.clair import prepare_clair from utils.chart import prepare_chartmuseum from utils.docker_compose import prepare_docker_compose from utils.nginx import prepare_nginx, nginx_confd_dir +from utils.redis import prepare_redis from g import (config_dir, input_config_path, private_key_pem_path, root_crt_path, secret_key_dir, old_private_key_pem_path, old_crt_path) @@ -38,6 +39,7 @@ def main(conf, with_notary, with_clair, with_chartmuseum): prepare_registry_ctl(config_dict) prepare_db(config_dict) prepare_job_service(config_dict) + prepare_redis(config_dict) get_secret_key(secret_key_dir) diff --git a/make/photon/prepare/templates/clair/clair_env.jinja b/make/photon/prepare/templates/clair/clair_env.jinja index 038f1a130..3825ca8fb 100644 --- a/make/photon/prepare/templates/clair/clair_env.jinja +++ b/make/photon/prepare/templates/clair/clair_env.jinja @@ -1,3 +1,3 @@ -http_proxy={{clair_http_proxy}} -https_proxy={{clair_https_proxy}} -no_proxy={{clair_no_proxy}} +HTTP_PROXY={{clair_http_proxy}} +HTTPS_PROXY={{clair_https_proxy}} +NO_PROXY={{clair_no_proxy}} diff --git a/make/photon/prepare/templates/clair/config.yaml.jinja b/make/photon/prepare/templates/clair/config.yaml.jinja index 00062b917..210df726c 100644 --- a/make/photon/prepare/templates/clair/config.yaml.jinja +++ b/make/photon/prepare/templates/clair/config.yaml.jinja @@ -17,9 +17,3 @@ clair: timeout: 300s updater: interval: {{clair_updaters_interval}}h - - notifier: - attempts: 3 - renotifyinterval: 2h - http: - endpoint: http://core:8080/service/notifications/clair diff --git a/make/photon/prepare/templates/core/env.jinja b/make/photon/prepare/templates/core/env.jinja index 5e2ae21bb..d6413678e 100644 --- a/make/photon/prepare/templates/core/env.jinja +++ b/make/photon/prepare/templates/core/env.jinja @@ -15,6 +15,8 @@ POSTGRESQL_USERNAME={{harbor_db_username}} POSTGRESQL_PASSWORD={{harbor_db_password}} POSTGRESQL_DATABASE={{harbor_db_name}} POSTGRESQL_SSLMODE={{harbor_db_sslmode}} +POSTGRESQL_MAX_IDLE_CONNS={{harbor_db_max_idle_conns}} +POSTGRESQL_MAX_OPEN_CONNS={{harbor_db_max_open_conns}} REGISTRY_URL={{registry_url}} TOKEN_SERVICE_URL={{token_service_url}} HARBOR_ADMIN_PASSWORD={{harbor_admin_password}} @@ -31,6 +33,7 @@ CLAIR_DB_USERNAME={{clair_db_username}} CLAIR_DB={{clair_db_name}} CLAIR_DB_SSLMODE={{clair_db_sslmode}} CORE_URL={{core_url}} +CORE_LOCAL_URL={{core_local_url}} JOBSERVICE_URL={{jobservice_url}} CLAIR_URL={{clair_url}} NOTARY_URL={{notary_url}} @@ -40,3 +43,7 @@ RELOAD_KEY={{reload_key}} CHART_REPOSITORY_URL={{chart_repository_url}} REGISTRY_CONTROLLER_URL={{registry_controller_url}} WITH_CHARTMUSEUM={{with_chartmuseum}} + +HTTP_PROXY={{core_http_proxy}} +HTTPS_PROXY={{core_https_proxy}} +NO_PROXY={{core_no_proxy}} diff --git a/make/photon/prepare/templates/docker_compose/docker-compose.yml.jinja b/make/photon/prepare/templates/docker_compose/docker-compose.yml.jinja index 95b63099e..cb6785766 100644 --- a/make/photon/prepare/templates/docker_compose/docker-compose.yml.jinja +++ b/make/photon/prepare/templates/docker_compose/docker-compose.yml.jinja @@ -14,7 +14,8 @@ services: - SETUID volumes: - {{log_location}}/:/var/log/docker/:z - - ./common/config/log/:/etc/logrotate.d/:z + - ./common/config/log/logrotate.conf:/etc/logrotate.d/logrotate.conf:z + - ./common/config/log/rsyslog_docker.conf:/etc/rsyslog.d/rsyslog_docker.conf:z ports: - 127.0.0.1:1514:10514 networks: @@ -275,12 +276,7 @@ services: volumes: - ./common/config/nginx:/etc/nginx:z {% if protocol == 'https' %} - - type: bind - source: {{cert_key_path}} - target: /etc/cert/server.key - - type: bind - source: {{cert_path}} - target: /etc/cert/server.crt + - {{data_volume}}/secret/cert:/etc/cert:z {% endif %} networks: - harbor @@ -289,9 +285,9 @@ services: {% endif %} dns_search: . ports: - - {{http_port}}:80 + - {{http_port}}:8080 {% if protocol == 'https' %} - - {{https_port}}:443 + - {{https_port}}:8443 {% endif %} {% if with_notary %} - 4443:4443 @@ -419,7 +415,7 @@ services: {% if gcs_keyfile %} - type: bind source: {{gcs_keyfile}} - target: /etc/registry/gcs.key + target: /etc/chartserver/gcs.key {% endif %} {%if registry_custom_ca_bundle_path %} - type: bind diff --git a/make/photon/prepare/templates/jobservice/env.jinja b/make/photon/prepare/templates/jobservice/env.jinja index 2f4923248..c38534f02 100644 --- a/make/photon/prepare/templates/jobservice/env.jinja +++ b/make/photon/prepare/templates/jobservice/env.jinja @@ -1,3 +1,8 @@ CORE_SECRET={{core_secret}} JOBSERVICE_SECRET={{jobservice_secret}} CORE_URL={{core_url}} +JOBSERVICE_WEBHOOK_JOB_MAX_RETRY={{notification_webhook_job_max_retry}} + +HTTP_PROXY={{jobservice_http_proxy}} +HTTPS_PROXY={{jobservice_https_proxy}} +NO_PROXY={{jobservice_no_proxy}} diff --git a/make/photon/prepare/templates/log/rsyslog_docker.conf.jinja b/make/photon/prepare/templates/log/rsyslog_docker.conf.jinja new file mode 100644 index 000000000..9071237fd --- /dev/null +++ b/make/photon/prepare/templates/log/rsyslog_docker.conf.jinja @@ -0,0 +1,11 @@ +# Rsyslog configuration file for docker. + +template(name="DynaFile" type="string" string="/var/log/docker/%programname%.log") + +if $programname != "rsyslogd" then { +{%if log_external %} + action(type="omfwd" Target="{{log_ep_host}}" Port="{{log_ep_port}}" Protocol="{{log_ep_protocol}}" Template="RSYSLOG_SyslogProtocol23Format") +{% else %} + action(type="omfile" dynaFile="DynaFile") +{% endif %} +} \ No newline at end of file diff --git a/make/photon/prepare/templates/nginx/nginx.http.conf.jinja b/make/photon/prepare/templates/nginx/nginx.http.conf.jinja index 0f7f5107e..09e1f4346 100644 --- a/make/photon/prepare/templates/nginx/nginx.http.conf.jinja +++ b/make/photon/prepare/templates/nginx/nginx.http.conf.jinja @@ -1,4 +1,5 @@ worker_processes auto; +pid /tmp/nginx.pid; events { worker_connections 1024; @@ -7,6 +8,11 @@ events { } http { + client_body_temp_path /tmp/client_body_temp; + proxy_temp_path /tmp/proxy_temp; + fastcgi_temp_path /tmp/fastcgi_temp; + uwsgi_temp_path /tmp/uwsgi_temp; + scgi_temp_path /tmp/scgi_temp; tcp_nodelay on; # this is necessary for us to be able to disable request buffering in all cases @@ -17,7 +23,7 @@ http { } upstream portal { - server portal:80; + server portal:8080; } log_format timed_combined '$remote_addr - ' @@ -28,7 +34,7 @@ http { access_log /dev/stdout timed_combined; server { - listen 80; + listen 8080; server_tokens off; # disable any limits to avoid HTTP 413 for large image uploads client_max_body_size 0; @@ -117,7 +123,7 @@ http { proxy_request_buffering off; } - location /service/notifications { + location /service/notifications { return 404; } } diff --git a/make/photon/prepare/templates/nginx/nginx.https.conf.jinja b/make/photon/prepare/templates/nginx/nginx.https.conf.jinja index 1ae2a9754..e4ac93078 100644 --- a/make/photon/prepare/templates/nginx/nginx.https.conf.jinja +++ b/make/photon/prepare/templates/nginx/nginx.https.conf.jinja @@ -1,4 +1,5 @@ worker_processes auto; +pid /tmp/nginx.pid; events { worker_connections 1024; @@ -7,6 +8,11 @@ events { } http { + client_body_temp_path /tmp/client_body_temp; + proxy_temp_path /tmp/proxy_temp; + fastcgi_temp_path /tmp/fastcgi_temp; + uwsgi_temp_path /tmp/uwsgi_temp; + scgi_temp_path /tmp/scgi_temp; tcp_nodelay on; include /etc/nginx/conf.d/*.upstream.conf; @@ -18,7 +24,7 @@ http { } upstream portal { - server portal:80; + server portal:8080; } log_format timed_combined '$remote_addr - ' @@ -31,7 +37,7 @@ http { include /etc/nginx/conf.d/*.server.conf; server { - listen 443 ssl; + listen 8443 ssl; # server_name harbordomain.com; server_tokens off; # SSL @@ -136,13 +142,13 @@ http { proxy_buffering off; proxy_request_buffering off; } - - location /service/notifications { + + location /service/notifications { return 404; } } - server { - listen 80; + server { + listen 8080; #server_name harbordomain.com; return 308 https://$host$request_uri; } diff --git a/make/photon/prepare/utils/clair.py b/make/photon/prepare/utils/clair.py index 72db85038..8d8680249 100644 --- a/make/photon/prepare/utils/clair.py +++ b/make/photon/prepare/utils/clair.py @@ -2,12 +2,12 @@ import os, shutil from g import templates_dir, config_dir, DEFAULT_UID, DEFAULT_GID from .jinja import render_jinja -from .misc import prepare_config_dir +from .misc import prepare_dir clair_template_dir = os.path.join(templates_dir, "clair") def prepare_clair(config_dict): - clair_config_dir = prepare_config_dir(config_dir, "clair") + clair_config_dir = prepare_dir(config_dir, "clair") if os.path.exists(os.path.join(clair_config_dir, "postgresql-init.d")): print("Copying offline data file for clair DB") diff --git a/make/photon/prepare/utils/configs.py b/make/photon/prepare/utils/configs.py index aaf2747db..df14a53de 100644 --- a/make/photon/prepare/utils/configs.py +++ b/make/photon/prepare/utils/configs.py @@ -13,6 +13,14 @@ def validate(conf, **kwargs): if not conf.get("cert_key_path"): raise Exception("Error: The protocol is https but attribute ssl_cert_key is not set") + # log endpoint validate + if ('log_ep_host' in conf) and not conf['log_ep_host']: + raise Exception('Error: must set log endpoint host to enable external host') + if ('log_ep_port' in conf) and not conf['log_ep_port']: + raise Exception('Error: must set log endpoint port to enable external host') + if ('log_ep_protocol' in conf) and (conf['log_ep_protocol'] not in ['udp', 'tcp']): + raise Exception("Protocol in external log endpoint must be one of 'udp' or 'tcp' ") + # Storage validate valid_storage_drivers = ["filesystem", "azure", "gcs", "s3", "swift", "oss"] storage_provider_name = conf.get("storage_provider_name") @@ -59,6 +67,7 @@ def parse_yaml_config(config_file_path): 'registry_url': "http://registry:5000", 'registry_controller_url': "http://registryctl:8080", 'core_url': "http://core:8080", + 'core_local_url': "http://127.0.0.1:8080", 'token_service_url': "http://core:8080/service/token", 'jobservice_url': 'http://jobservice:8080', 'clair_url': 'http://clair:6060', @@ -103,6 +112,11 @@ def parse_yaml_config(config_file_path): config_dict['harbor_db_username'] = 'postgres' config_dict['harbor_db_password'] = db_configs.get("password") or '' config_dict['harbor_db_sslmode'] = 'disable' + + default_max_idle_conns = 2 # NOTE: https://golang.org/pkg/database/sql/#DB.SetMaxIdleConns + default_max_open_conns = 0 # NOTE: https://golang.org/pkg/database/sql/#DB.SetMaxOpenConns + config_dict['harbor_db_max_idle_conns'] = db_configs.get("max_idle_conns") or default_max_idle_conns + config_dict['harbor_db_max_open_conns'] = db_configs.get("max_open_conns") or default_max_open_conns # clari db config_dict['clair_db_host'] = 'postgresql' config_dict['clair_db_port'] = 5432 @@ -162,13 +176,18 @@ def parse_yaml_config(config_file_path): if storage_config.get('redirect'): config_dict['storage_redirect_disabled'] = storage_config['redirect']['disabled'] + # Global proxy configs + proxy_config = configs.get('proxy') or {} + proxy_components = proxy_config.get('components') or [] + for proxy_component in proxy_components: + config_dict[proxy_component + '_http_proxy'] = proxy_config.get('http_proxy') or '' + config_dict[proxy_component + '_https_proxy'] = proxy_config.get('https_proxy') or '' + config_dict[proxy_component + '_no_proxy'] = proxy_config.get('no_proxy') or '127.0.0.1,localhost,core,registry' + # Clair configs, optional clair_configs = configs.get("clair") or {} config_dict['clair_db'] = 'postgres' config_dict['clair_updaters_interval'] = clair_configs.get("updaters_interval") or 12 - config_dict['clair_http_proxy'] = clair_configs.get('http_proxy') or '' - config_dict['clair_https_proxy'] = clair_configs.get('https_proxy') or '' - config_dict['clair_no_proxy'] = clair_configs.get('no_proxy') or '127.0.0.1,localhost,core,registry' # Chart configs chart_configs = configs.get("chart") or {} @@ -179,18 +198,34 @@ def parse_yaml_config(config_file_path): config_dict['max_job_workers'] = js_config["max_job_workers"] config_dict['jobservice_secret'] = generate_random_string(16) + # notification config + notification_config = configs.get('notification') or {} + config_dict['notification_webhook_job_max_retry'] = notification_config["webhook_job_max_retry"] # Log configs allowed_levels = ['debug', 'info', 'warning', 'error', 'fatal'] log_configs = configs.get('log') or {} - config_dict['log_location'] = log_configs["location"] - config_dict['log_rotate_count'] = log_configs["rotate_count"] - config_dict['log_rotate_size'] = log_configs["rotate_size"] + log_level = log_configs['level'] if log_level not in allowed_levels: raise Exception('log level must be one of debug, info, warning, error, fatal') config_dict['log_level'] = log_level.lower() + # parse local log related configs + local_logs = log_configs.get('local') or {} + if local_logs: + config_dict['log_location'] = local_logs.get('location') or '/var/log/harbor' + config_dict['log_rotate_count'] = local_logs.get('rotate_count') or 50 + config_dict['log_rotate_size'] = local_logs.get('rotate_size') or '200M' + + # parse external log endpoint related configs + if log_configs.get('external_endpoint'): + config_dict['log_external'] = True + config_dict['log_ep_protocol'] = log_configs['external_endpoint']['protocol'] + config_dict['log_ep_host'] = log_configs['external_endpoint']['host'] + config_dict['log_ep_port'] = log_configs['external_endpoint']['port'] + else: + config_dict['log_external'] = False # external DB, optional, if external_db enabled, it will cover the database config external_db_configs = configs.get('external_database') or {} @@ -202,7 +237,7 @@ def parse_yaml_config(config_file_path): config_dict['harbor_db_username'] = external_db_configs['harbor']['username'] config_dict['harbor_db_password'] = external_db_configs['harbor']['password'] config_dict['harbor_db_sslmode'] = external_db_configs['harbor']['ssl_mode'] - # clari db + # clair db config_dict['clair_db_host'] = external_db_configs['clair']['host'] config_dict['clair_db_port'] = external_db_configs['clair']['port'] config_dict['clair_db_name'] = external_db_configs['clair']['db_name'] @@ -261,4 +296,4 @@ def parse_yaml_config(config_file_path): # UAA configs config_dict['uaa'] = configs.get('uaa') or {} - return config_dict \ No newline at end of file + return config_dict diff --git a/make/photon/prepare/utils/core.py b/make/photon/prepare/utils/core.py index 9f062efb8..5da6fc6fa 100644 --- a/make/photon/prepare/utils/core.py +++ b/make/photon/prepare/utils/core.py @@ -1,7 +1,7 @@ import shutil, os from g import config_dir, templates_dir -from utils.misc import prepare_config_dir, generate_random_string +from utils.misc import prepare_dir, generate_random_string from utils.jinja import render_jinja core_config_dir = os.path.join(config_dir, "core", "certificates") @@ -33,7 +33,7 @@ def prepare_core(config_dict, with_notary, with_clair, with_chartmuseum): copy_core_config(core_conf_template_path, core_conf) def prepare_core_config_dir(): - prepare_config_dir(core_config_dir) + prepare_dir(core_config_dir) def copy_core_config(core_templates_path, core_config_path): shutil.copyfile(core_templates_path, core_config_path) diff --git a/make/photon/prepare/utils/db.py b/make/photon/prepare/utils/db.py index 53ef3d93e..30b7e050d 100644 --- a/make/photon/prepare/utils/db.py +++ b/make/photon/prepare/utils/db.py @@ -1,20 +1,18 @@ import os -from g import config_dir, templates_dir -from utils.misc import prepare_config_dir +from g import config_dir, templates_dir, data_dir, PG_UID, PG_GID +from utils.misc import prepare_dir from utils.jinja import render_jinja db_config_dir = os.path.join(config_dir, "db") db_env_template_path = os.path.join(templates_dir, "db", "env.jinja") db_conf_env = os.path.join(config_dir, "db", "env") +database_data_path = os.path.join(data_dir, 'database') def prepare_db(config_dict): - prepare_db_config_dir() - + prepare_dir(database_data_path, uid=PG_UID, gid=PG_GID) + prepare_dir(db_config_dir) render_jinja( db_env_template_path, db_conf_env, harbor_db_password=config_dict['harbor_db_password']) - -def prepare_db_config_dir(): - prepare_config_dir(db_config_dir) \ No newline at end of file diff --git a/make/photon/prepare/utils/docker_compose.py b/make/photon/prepare/utils/docker_compose.py index cf129c2a2..648d6b979 100644 --- a/make/photon/prepare/utils/docker_compose.py +++ b/make/photon/prepare/utils/docker_compose.py @@ -13,8 +13,8 @@ def prepare_docker_compose(configs, with_clair, with_notary, with_chartmuseum): VERSION_TAG = versions.get('VERSION_TAG') or 'dev' REGISTRY_VERSION = versions.get('REGISTRY_VERSION') or 'v2.7.1' NOTARY_VERSION = versions.get('NOTARY_VERSION') or 'v0.6.1' - CLAIR_VERSION = versions.get('CLAIR_VERSION') or 'v2.0.7' - CHARTMUSEUM_VERSION = versions.get('CHARTMUSEUM_VERSION') or 'v0.8.1' + CLAIR_VERSION = versions.get('CLAIR_VERSION') or 'v2.0.9' + CHARTMUSEUM_VERSION = versions.get('CHARTMUSEUM_VERSION') or 'v0.9.0' rendering_variables = { 'version': VERSION_TAG, @@ -33,17 +33,25 @@ def prepare_docker_compose(configs, with_clair, with_notary, with_chartmuseum): 'with_chartmuseum': with_chartmuseum } + # for gcs storage_config = configs.get('storage_provider_config') or {} if storage_config.get('keyfile') and configs['storage_provider_name'] == 'gcs': rendering_variables['gcs_keyfile'] = storage_config['keyfile'] + # for http if configs['protocol'] == 'https': rendering_variables['cert_key_path'] = configs['cert_key_path'] rendering_variables['cert_path'] = configs['cert_path'] rendering_variables['https_port'] = configs['https_port'] + # for uaa uaa_config = configs.get('uaa') or {} if uaa_config.get('ca_file'): rendering_variables['uaa_ca_file'] = uaa_config['ca_file'] + # for log + log_ep_host = configs.get('log_ep_host') + if log_ep_host: + rendering_variables['external_log_endpoint'] = True + render_jinja(docker_compose_template_path, docker_compose_yml_path, **rendering_variables) \ No newline at end of file diff --git a/make/photon/prepare/utils/jobservice.py b/make/photon/prepare/utils/jobservice.py index 08aac9441..1b10900ee 100644 --- a/make/photon/prepare/utils/jobservice.py +++ b/make/photon/prepare/utils/jobservice.py @@ -1,7 +1,7 @@ import os from g import config_dir, DEFAULT_GID, DEFAULT_UID, templates_dir -from utils.misc import prepare_config_dir +from utils.misc import prepare_dir from utils.jinja import render_jinja job_config_dir = os.path.join(config_dir, "jobservice") @@ -10,15 +10,14 @@ job_service_conf_env = os.path.join(config_dir, "jobservice", "env") job_service_conf_template_path = os.path.join(templates_dir, "jobservice", "config.yml.jinja") jobservice_conf = os.path.join(config_dir, "jobservice", "config.yml") - def prepare_job_service(config_dict): - prepare_config_dir(job_config_dir) + prepare_dir(job_config_dir, uid=DEFAULT_UID, gid=DEFAULT_GID) log_level = config_dict['log_level'].upper() # Job log is stored in data dir job_log_dir = os.path.join('/data', "job_logs") - prepare_config_dir(job_log_dir) + prepare_dir(job_log_dir, uid=DEFAULT_UID, gid=DEFAULT_GID) # Render Jobservice env render_jinja( job_service_env_template_path, @@ -33,4 +32,4 @@ def prepare_job_service(config_dict): gid=DEFAULT_GID, max_job_workers=config_dict['max_job_workers'], redis_url=config_dict['redis_url_js'], - level=log_level) \ No newline at end of file + level=log_level) diff --git a/make/photon/prepare/utils/log.py b/make/photon/prepare/utils/log.py index d5fd52e20..a8a2a1d20 100644 --- a/make/photon/prepare/utils/log.py +++ b/make/photon/prepare/utils/log.py @@ -1,15 +1,21 @@ import os from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID -from utils.misc import prepare_config_dir +from utils.misc import prepare_dir from utils.jinja import render_jinja log_config_dir = os.path.join(config_dir, "log") + +# logrotate config file logrotate_template_path = os.path.join(templates_dir, "log", "logrotate.conf.jinja") log_rotate_config = os.path.join(config_dir, "log", "logrotate.conf") +# syslog docker config file +log_syslog_docker_template_path = os.path.join(templates_dir, 'log', 'rsyslog_docker.conf.jinja') +log_syslog_docker_config = os.path.join(config_dir, 'log', 'rsyslog_docker.conf') + def prepare_log_configs(config_dict): - prepare_config_dir(log_config_dir) + prepare_dir(log_config_dir) # Render Log config render_jinja( @@ -17,4 +23,13 @@ def prepare_log_configs(config_dict): log_rotate_config, uid=DEFAULT_UID, gid=DEFAULT_GID, - **config_dict) \ No newline at end of file + **config_dict) + + # Render syslog docker config + render_jinja( + log_syslog_docker_template_path, + log_syslog_docker_config, + uid=DEFAULT_UID, + gid=DEFAULT_GID, + **config_dict + ) \ No newline at end of file diff --git a/make/photon/prepare/utils/misc.py b/make/photon/prepare/utils/misc.py index fe6bcc7f8..e7b62faff 100644 --- a/make/photon/prepare/utils/misc.py +++ b/make/photon/prepare/utils/misc.py @@ -3,7 +3,7 @@ import string import random from g import DEFAULT_UID, DEFAULT_GID - +from pathlib import Path # To meet security requirement # By default it will change file mode to 0600, and make the owner of the file to 10000:10000 @@ -84,6 +84,26 @@ def prepare_config_dir(root, *name): os.makedirs(absolute_path) return absolute_path +def prepare_dir(root: str, *args, **kwargs) -> str: + gid, uid = kwargs.get('gid'), kwargs.get('uid') + absolute_path = Path(os.path.join(root, *args)) + if absolute_path.is_file(): + raise Exception('Path exists and the type is regular file') + mode = kwargs.get('mode') or 0o755 + absolute_path.mkdir(mode, parents=True, exist_ok=True) + + # if uid or gid not None, then change the ownership of this dir + if not(gid is None and uid is None): + dir_uid, dir_gid = absolute_path.stat().st_uid, absolute_path.stat().st_gid + if uid is None: + uid = dir_uid + if gid is None: + gid = dir_gid + os.chown(absolute_path, uid, gid) + + return str(absolute_path) + + def delfile(src): if os.path.isfile(src): diff --git a/make/photon/prepare/utils/nginx.py b/make/photon/prepare/utils/nginx.py index a8706349a..0d1117448 100644 --- a/make/photon/prepare/utils/nginx.py +++ b/make/photon/prepare/utils/nginx.py @@ -2,11 +2,13 @@ import os, shutil from fnmatch import fnmatch from pathlib import Path -from g import config_dir, templates_dir -from utils.misc import prepare_config_dir, mark_file +from g import config_dir, templates_dir, host_root_dir, DEFAULT_GID, DEFAULT_UID, data_dir +from utils.misc import prepare_dir, mark_file from utils.jinja import render_jinja from utils.cert import SSL_CERT_KEY_PATH, SSL_CERT_PATH +host_ngx_real_cert_dir = Path(os.path.join(data_dir, 'secret', 'cert')) + nginx_conf = os.path.join(config_dir, "nginx", "nginx.conf") nginx_confd_dir = os.path.join(config_dir, "nginx", "conf.d") nginx_https_conf_template = os.path.join(templates_dir, "nginx", "nginx.https.conf.jinja") @@ -17,44 +19,76 @@ CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTPS = 'harbor.https.*.conf' CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTP = 'harbor.http.*.conf' def prepare_nginx(config_dict): - prepare_config_dir(nginx_confd_dir) + prepare_dir(nginx_confd_dir, uid=DEFAULT_UID, gid=DEFAULT_GID) render_nginx_template(config_dict) + +def prepare_nginx_certs(cert_key_path, cert_path): + """ + Prepare the certs file with proper ownership + 1. Remove nginx cert files in secret dir + 2. Copy cert files on host filesystem to secret dir + 3. Change the permission to 644 and ownership to 10000:10000 + """ + host_ngx_cert_key_path = Path(os.path.join(host_root_dir, cert_key_path.lstrip('/'))) + host_ngx_cert_path = Path(os.path.join(host_root_dir, cert_path.lstrip('/'))) + + if host_ngx_real_cert_dir.exists() and host_ngx_real_cert_dir.is_dir(): + shutil.rmtree(host_ngx_real_cert_dir) + + os.makedirs(host_ngx_real_cert_dir, mode=0o755) + real_key_path = os.path.join(host_ngx_real_cert_dir, 'server.key') + real_crt_path = os.path.join(host_ngx_real_cert_dir, 'server.crt') + shutil.copy2(host_ngx_cert_key_path, real_key_path) + shutil.copy2(host_ngx_cert_path, real_crt_path) + + os.chown(host_ngx_real_cert_dir, uid=DEFAULT_UID, gid=DEFAULT_GID) + mark_file(real_key_path, uid=DEFAULT_UID, gid=DEFAULT_GID) + mark_file(real_crt_path, uid=DEFAULT_UID, gid=DEFAULT_GID) + + def render_nginx_template(config_dict): - if config_dict['protocol'] == "https": - render_jinja(nginx_https_conf_template, nginx_conf, + """ + 1. render nginx config file through protocol + 2. copy additional configs to cert.d dir + """ + if config_dict['protocol'] == 'https': + prepare_nginx_certs(config_dict['cert_key_path'], config_dict['cert_path']) + render_jinja( + nginx_https_conf_template, + nginx_conf, + uid=DEFAULT_UID, + gid=DEFAULT_GID, ssl_cert=SSL_CERT_PATH, ssl_cert_key=SSL_CERT_KEY_PATH) location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTPS - cert_dir = Path(os.path.join(config_dir, 'cert')) - ssl_key_path = Path(os.path.join(cert_dir, 'server.key')) - ssl_crt_path = Path(os.path.join(cert_dir, 'server.crt')) - cert_dir.mkdir(parents=True, exist_ok=True) - ssl_key_path.touch() - ssl_crt_path.touch() + else: render_jinja( nginx_http_conf_template, - nginx_conf) + nginx_conf, + uid=DEFAULT_UID, + gid=DEFAULT_GID) location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTP copy_nginx_location_configs_if_exist(nginx_template_ext_dir, nginx_confd_dir, location_file_pattern) -def add_additional_location_config(src, dst): - """ - These conf files is used for user that wanna add additional customized locations to harbor proxy - :params src: source of the file - :params dst: destination file path - """ - if not os.path.isfile(src): - return - print("Copying nginx configuration file {src} to {dst}".format( - src=src, dst=dst)) - shutil.copy2(src, dst) - mark_file(dst, mode=0o644) def copy_nginx_location_configs_if_exist(src_config_dir, dst_config_dir, filename_pattern): if not os.path.exists(src_config_dir): return + + def add_additional_location_config(src, dst): + """ + These conf files is used for user that wanna add additional customized locations to harbor proxy + :params src: source of the file + :params dst: destination file path + """ + if not os.path.isfile(src): + return + print("Copying nginx configuration file {src} to {dst}".format(src=src, dst=dst)) + shutil.copy2(src, dst) + mark_file(dst, mode=0o644) + map(lambda filename: add_additional_location_config( os.path.join(src_config_dir, filename), os.path.join(dst_config_dir, filename)), diff --git a/make/photon/prepare/utils/notary.py b/make/photon/prepare/utils/notary.py index 8d1d1175e..2e571a462 100644 --- a/make/photon/prepare/utils/notary.py +++ b/make/photon/prepare/utils/notary.py @@ -2,7 +2,7 @@ import os, shutil, pathlib from g import templates_dir, config_dir, root_crt_path, secret_key_dir,DEFAULT_UID, DEFAULT_GID from .cert import openssl_installed, create_cert, create_root_cert, get_alias from .jinja import render_jinja -from .misc import mark_file, prepare_config_dir +from .misc import mark_file, prepare_dir notary_template_dir = os.path.join(templates_dir, "notary") notary_signer_pg_template = os.path.join(notary_template_dir, "signer-config.postgres.json.jinja") @@ -20,12 +20,12 @@ notary_server_env_path = os.path.join(notary_config_dir, "server_env") def prepare_env_notary(nginx_config_dir): - notary_config_dir = prepare_config_dir(config_dir, "notary") + notary_config_dir = prepare_dir(config_dir, "notary") old_signer_cert_secret_path = pathlib.Path(os.path.join(config_dir, 'notary-signer.crt')) old_signer_key_secret_path = pathlib.Path(os.path.join(config_dir, 'notary-signer.key')) old_signer_ca_cert_secret_path = pathlib.Path(os.path.join(config_dir, 'notary-signer-ca.crt')) - notary_secret_dir = prepare_config_dir('/secret/notary') + notary_secret_dir = prepare_dir('/secret/notary') signer_cert_secret_path = pathlib.Path(os.path.join(notary_secret_dir, 'notary-signer.crt')) signer_key_secret_path = pathlib.Path(os.path.join(notary_secret_dir, 'notary-signer.key')) signer_ca_cert_secret_path = pathlib.Path(os.path.join(notary_secret_dir, 'notary-signer-ca.crt')) @@ -72,9 +72,12 @@ def prepare_env_notary(nginx_config_dir): print("Copying nginx configuration file for notary") - shutil.copy2( + + render_jinja( os.path.join(templates_dir, "nginx", "notary.upstream.conf.jinja"), - os.path.join(nginx_config_dir, "notary.upstream.conf")) + os.path.join(nginx_config_dir, "notary.upstream.conf"), + gid=DEFAULT_GID, + uid=DEFAULT_UID) mark_file(os.path.join(notary_secret_dir, "notary-signer.crt")) mark_file(os.path.join(notary_secret_dir, "notary-signer.key")) @@ -88,6 +91,8 @@ def prepare_notary(config_dict, nginx_config_dir, ssl_cert_path, ssl_cert_key_pa render_jinja( notary_server_nginx_config_template, os.path.join(nginx_config_dir, "notary.server.conf"), + gid=DEFAULT_GID, + uid=DEFAULT_UID, ssl_cert=ssl_cert_path, ssl_cert_key=ssl_cert_key_path) diff --git a/make/photon/prepare/utils/redis.py b/make/photon/prepare/utils/redis.py new file mode 100644 index 000000000..751a2475a --- /dev/null +++ b/make/photon/prepare/utils/redis.py @@ -0,0 +1,9 @@ +import os + +from g import data_dir, REDIS_UID, REDIS_GID +from utils.misc import prepare_dir + +redis_data_path = os.path.join(data_dir, 'redis') + +def prepare_redis(config_dict): + prepare_dir(redis_data_path, uid=REDIS_UID, gid=REDIS_GID) diff --git a/make/photon/prepare/utils/registry.py b/make/photon/prepare/utils/registry.py index e07a91bdb..2a3512d9b 100644 --- a/make/photon/prepare/utils/registry.py +++ b/make/photon/prepare/utils/registry.py @@ -1,7 +1,7 @@ import os, copy from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID -from utils.misc import prepare_config_dir +from utils.misc import prepare_dir from utils.jinja import render_jinja @@ -9,9 +9,16 @@ registry_config_dir = os.path.join(config_dir, "registry") registry_config_template_path = os.path.join(templates_dir, "registry", "config.yml.jinja") registry_conf = os.path.join(config_dir, "registry", "config.yml") +levels_map = { + 'debug': 'debug', + 'info': 'info', + 'warning': 'warn', + 'error': 'error', + 'fatal': 'fatal' +} def prepare_registry(config_dict): - prepare_config_dir(registry_config_dir) + prepare_dir(registry_config_dir) storage_provider_info = get_storage_provider_info( config_dict['storage_provider_name'], @@ -22,6 +29,7 @@ def prepare_registry(config_dict): registry_conf, uid=DEFAULT_UID, gid=DEFAULT_GID, + level=levels_map[config_dict['log_level']], storage_provider_info=storage_provider_info, **config_dict) diff --git a/make/photon/prepare/utils/registry_ctl.py b/make/photon/prepare/utils/registry_ctl.py index b3fc936f6..8ffe68104 100644 --- a/make/photon/prepare/utils/registry_ctl.py +++ b/make/photon/prepare/utils/registry_ctl.py @@ -1,7 +1,7 @@ import os, shutil from g import config_dir, templates_dir -from utils.misc import prepare_config_dir +from utils.misc import prepare_dir from utils.jinja import render_jinja registryctl_config_dir = os.path.join(config_dir, "registryctl") @@ -24,7 +24,7 @@ def prepare_registry_ctl(config_dict): copy_registry_ctl_conf(registryctl_config_template_path, registryctl_conf) def prepare_registry_ctl_config_dir(): - prepare_config_dir(registryctl_config_dir) + prepare_dir(registryctl_config_dir) def copy_registry_ctl_conf(src, dst): shutil.copyfile(src, dst) \ No newline at end of file diff --git a/make/photon/redis/Dockerfile b/make/photon/redis/Dockerfile index efac15505..a90873b4b 100644 --- a/make/photon/redis/Dockerfile +++ b/make/photon/redis/Dockerfile @@ -4,11 +4,12 @@ RUN tdnf install -y redis sudo VOLUME /var/lib/redis WORKDIR /var/lib/redis -COPY ./make/photon/redis/docker-entrypoint.sh /usr/bin/ +COPY ./make/photon/redis/docker-healthcheck /usr/bin/ COPY ./make/photon/redis/redis.conf /etc/redis.conf -RUN chmod +x /usr/bin/docker-entrypoint.sh \ +RUN chmod +x /usr/bin/docker-healthcheck \ && chown redis:redis /etc/redis.conf -ENTRYPOINT ["docker-entrypoint.sh"] +HEALTHCHECK CMD ["docker-healthcheck"] +USER redis EXPOSE 6379 CMD ["redis-server", "/etc/redis.conf"] diff --git a/make/photon/redis/docker-entrypoint.sh b/make/photon/redis/docker-entrypoint.sh deleted file mode 100644 index 5f19ac33d..000000000 --- a/make/photon/redis/docker-entrypoint.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/sh -set -e - -if [ "${1#-}" != "$1" ] || [ "${1%.conf}" != "$1" ]; then - set -- redis-server "$@" -fi - -if [ "$1" = 'redis-server' -a "$(id -u)" = '0' ]; then - chown -R redis . - exec sudo -u redis "$@" -fi - -exec "$@" diff --git a/make/photon/redis/docker-healthcheck b/make/photon/redis/docker-healthcheck new file mode 100644 index 000000000..80f5cc480 --- /dev/null +++ b/make/photon/redis/docker-healthcheck @@ -0,0 +1,9 @@ +#!/bin/bash + +set -eo pipefail + +if ping="$(redis-cli -h "127.0.0.1" ping)" && [ "$ping" = 'PONG' ]; then + exit 0 +fi + +exit 1 \ No newline at end of file diff --git a/make/prepare b/make/prepare index 8e6371f96..c628f46a3 100755 --- a/make/prepare +++ b/make/prepare @@ -1,8 +1,8 @@ #!/bin/bash set +e -# If compling source code this dir is harbor's make dir -# If install harbor via pacakge, this dir is harbor's root dir +# If compiling source code this dir is harbor's make dir. +# If installing harbor via pacakge, this dir is harbor's root dir. if [[ -n "$HARBOR_BUNDLE_DIR" ]]; then harbor_prepare_path=$HARBOR_BUNDLE_DIR else @@ -35,7 +35,7 @@ set -e # Copy harbor.yml to input dir if [[ ! "$1" =~ ^\-\- ]] && [ -f "$1" ] then - cp $1 $input_dir/harbor.yml + cp $1 $input_dir/harbor.yml else cp ${harbor_prepare_path}/harbor.yml $input_dir/harbor.yml fi @@ -45,10 +45,12 @@ secret_dir=${data_path}/secret config_dir=$harbor_prepare_path/common/config # Run prepare script -docker run --rm -v $input_dir:/input \ - -v $harbor_prepare_path:/compose_location \ - -v $config_dir:/config \ - -v $secret_dir:/secret \ +docker run --rm -v $input_dir:/input:z \ + -v $data_path:/data:z \ + -v $harbor_prepare_path:/compose_location:z \ + -v $config_dir:/config:z \ + -v $secret_dir:/secret:z \ + -v /:/hostfs:z \ goharbor/prepare:dev $@ echo "Clean up the input dir" diff --git a/src/Gopkg.lock b/src/Gopkg.lock deleted file mode 100644 index 815277ae6..000000000 --- a/src/Gopkg.lock +++ /dev/null @@ -1,802 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - digest = "1:b16fbfbcc20645cb419f78325bb2e85ec729b338e996a228124d68931a6f2a37" - name = "github.com/BurntSushi/toml" - packages = ["."] - pruneopts = "UT" - revision = "b26d9c308763d68093482582cea63d69be07a0f0" - version = "v0.3.0" - -[[projects]] - digest = "1:5d3e23515e7916c152cc665eda0f7eaf6fdf8fdfe7c3dbac97049bcbd649b33f" - name = "github.com/Knetic/govaluate" - packages = ["."] - pruneopts = "UT" - revision = "d216395917cc49052c7c7094cf57f09657ca08a8" - version = "v3.0.0" - -[[projects]] - digest = "1:55388fd080150b9a072912f97b1f5891eb0b50df43401f8b75fb4273d3fec9fc" - name = "github.com/Masterminds/semver" - packages = ["."] - pruneopts = "UT" - revision = "c7af12943936e8c39859482e61f0574c2fd7fc75" - version = "v1.4.2" - -[[projects]] - digest = "1:e8078e5f9d84e87745efb3c0961e78045500cda10d7102fdf839fbac4b49a423" - name = "github.com/Unknwon/goconfig" - packages = ["."] - pruneopts = "UT" - revision = "5f601ca6ef4d5cea8d52be2f8b3a420ee4b574a5" - -[[projects]] - branch = "master" - digest = "1:47ea4fbe2ab4aeb9808502c51e657041c2e49b36b83fc1c1a349135cdf16342f" - name = "github.com/agl/ed25519" - packages = [ - ".", - "edwards25519", - ] - pruneopts = "UT" - revision = "5312a61534124124185d41f09206b9fef1d88403" - -[[projects]] - digest = "1:d2dbd0b0ec5373e89b27d0dd9f59793aa47020a05805b4b75c63aa1b2406781b" - name = "github.com/astaxie/beego" - packages = [ - ".", - "cache", - "cache/redis", - "config", - "context", - "context/param", - "grace", - "logs", - "orm", - "session", - "session/redis", - "toolbox", - "utils", - "validation", - ] - pruneopts = "UT" - revision = "d96289a81bf67728cff7a19b067aaecc65a62ec6" - version = "v1.9.0" - -[[projects]] - digest = "1:4522bd966f53adb3da34201b39df1153534e441c8067d5e674964f05ecca3a71" - name = "github.com/beego/i18n" - packages = ["."] - pruneopts = "UT" - revision = "e87155e8f0c05bf323d0b13470e1b97af0cb5652" - -[[projects]] - digest = "1:2aaf2cc045d0219bba79655e4df795b973168c310574669cb75786684f7287d3" - name = "github.com/bmatcuk/doublestar" - packages = ["."] - pruneopts = "UT" - revision = "85a78806aa1b4707d1dbace9be592cf1ece91ab3" - version = "v1.1.1" - -[[projects]] - digest = "1:76ca0dfcbf951d1868c7449453981dba9e1f79034706d1500a5a785000f5f222" - name = "github.com/casbin/casbin" - packages = [ - ".", - "config", - "effect", - "log", - "model", - "persist", - "persist/file-adapter", - "rbac", - "rbac/default-role-manager", - "util", - ] - pruneopts = "UT" - revision = "542e16cac74562eefac970a7d0d1467640d1f1cb" - version = "v1.7.0" - -[[projects]] - digest = "1:f6e5e1bc64c2908167e6aa9a1fe0c084d515132a1c63ad5b6c84036aa06dc0c1" - name = "github.com/coreos/go-oidc" - packages = ["."] - pruneopts = "UT" - revision = "1180514eaf4d9f38d0d19eef639a1d695e066e72" - version = "v2.0.0" - -[[projects]] - digest = "1:a2c1d0e43bd3baaa071d1b9ed72c27d78169b2b269f71c105ac4ba34b1be4a39" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "UT" - revision = "346938d642f2ec3594ed81d874461961cd0faa76" - version = "v1.1.0" - -[[projects]] - digest = "1:ace1aef6acdf2c4647365dc87c14fb8b71ed8bb0b3ae114ffb216614a24da219" - name = "github.com/dghubble/sling" - packages = ["."] - pruneopts = "UT" - revision = "eb56e89ac5088bebb12eef3cb4b293300f43608b" - version = "v1.1.0" - -[[projects]] - digest = "1:d912bf9afc98bbb6539ea99c9ac3e83119853310dd1a3aec1583d76f340ece27" - name = "github.com/dgrijalva/jwt-go" - packages = ["."] - pruneopts = "UT" - revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c" - version = "v3.0.0" - -[[projects]] - digest = "1:d06c54bbda3a04ec18a2fa0577896b3c40f13409639b442379ee0a5a53be8259" - name = "github.com/docker/distribution" - packages = [ - ".", - "context", - "digestset", - "health", - "manifest", - "manifest/manifestlist", - "manifest/schema1", - "manifest/schema2", - "reference", - "registry/api/errcode", - "registry/auth", - "registry/auth/token", - "registry/client/auth/challenge", - "uuid", - ] - pruneopts = "UT" - revision = "2461543d988979529609e8cb6fca9ca190dc48da" - version = "v2.7.1" - -[[projects]] - branch = "master" - digest = "1:72ba344e60095ac4fe0eac56f56fe95644421670b808238a1c849ea92721037e" - name = "github.com/docker/go" - packages = ["canonical/json"] - pruneopts = "UT" - revision = "d30aec9fd63c35133f8f79c3412ad91a3b08be06" - -[[projects]] - branch = "master" - digest = "1:4841e14252a2cecf11840bd05230412ad469709bbacfc12467e2ce5ad07f339b" - name = "github.com/docker/libtrust" - packages = ["."] - pruneopts = "UT" - revision = "aabc10ec26b754e797f9028f4589c5b7bd90dc20" - -[[projects]] - digest = "1:0594af97b2f4cec6554086eeace6597e20a4b69466eb4ada25adf9f4300dddd2" - name = "github.com/garyburd/redigo" - packages = [ - "internal", - "redis", - ] - pruneopts = "UT" - revision = "a69d19351219b6dd56f274f96d85a7014a2ec34e" - version = "v1.6.0" - -[[projects]] - digest = "1:2cd7915ab26ede7d95b8749e6b1f933f1c6d5398030684e6505940a10f31cfda" - name = "github.com/ghodss/yaml" - packages = ["."] - pruneopts = "UT" - revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" - version = "v1.0.0" - -[[projects]] - digest = "1:850c49ca338a10fec2cb9e78f793043ed23965489d09e30bcc19fe29719da313" - name = "github.com/go-sql-driver/mysql" - packages = ["."] - pruneopts = "UT" - revision = "a0583e0143b1624142adab07e0e97fe106d99561" - version = "v1.3" - -[[projects]] - digest = "1:9ae31ce33b4bab257668963e844d98765b44160be4ee98cafc44637a213e530d" - name = "github.com/gobwas/glob" - packages = [ - ".", - "compiler", - "match", - "syntax", - "syntax/ast", - "syntax/lexer", - "util/runes", - "util/strings", - ] - pruneopts = "UT" - revision = "5ccd90ef52e1e632236f7326478d4faa74f99438" - version = "v0.2.3" - -[[projects]] - digest = "1:615643b442214e7a9bade98fa7d50ec072fd17bdc5c955daa194b32e73a532a8" - name = "github.com/gocraft/work" - packages = ["."] - pruneopts = "UT" - revision = "1d4117a214abff263b472043871c8666aedb716b" - version = "v0.5.1" - -[[projects]] - digest = "1:4d02824a56d268f74a6b6fdd944b20b58a77c3d70e81008b3ee0c4f1a6777340" - name = "github.com/gogo/protobuf" - packages = [ - "proto", - "sortkeys", - ] - pruneopts = "UT" - revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c" - version = "v1.2.1" - -[[projects]] - digest = "1:39d9284259004077d3b89109d592fce5f311788745ce94a7ccd4545e536ad3ac" - name = "github.com/golang-migrate/migrate" - packages = [ - ".", - "database", - "database/postgres", - "source", - "source/file", - ] - pruneopts = "UT" - revision = "bcd996f3df28363f43e2d0935484c4559537a3eb" - version = "v3.3.0" - -[[projects]] - branch = "master" - digest = "1:1ba1d79f2810270045c328ae5d674321db34e3aae468eb4233883b473c5c0467" - name = "github.com/golang/glog" - packages = ["."] - pruneopts = "UT" - revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" - -[[projects]] - digest = "1:41e5cefde26c58f1560df2d1c32c2fa85e332d7cb4460d2077ae8fd8e0f3d789" - name = "github.com/golang/protobuf" - packages = [ - "proto", - "ptypes/any", - "ptypes/timestamp", - ] - pruneopts = "UT" - revision = "130e6b02ab059e7b717a096f397c5b60111cae74" - -[[projects]] - digest = "1:38ec74012390146c45af1f92d46e5382b50531247929ff3a685d2b2be65155ac" - name = "github.com/gomodule/redigo" - packages = [ - "internal", - "redis", - ] - pruneopts = "UT" - revision = "9c11da706d9b7902c6da69c592f75637793fe121" - version = "v2.0.0" - -[[projects]] - branch = "master" - digest = "1:a63cff6b5d8b95638bfe300385d93b2a6d9d687734b863da8e09dc834510a690" - name = "github.com/google/go-querystring" - packages = ["query"] - pruneopts = "UT" - revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a" - -[[projects]] - branch = "master" - digest = "1:3ee90c0d94da31b442dde97c99635aaafec68d0b8a3c12ee2075c6bdabeec6bb" - name = "github.com/google/gofuzz" - packages = ["."] - pruneopts = "UT" - revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" - -[[projects]] - digest = "1:160eabf7a69910fd74f29c692718bc2437c1c1c7d4c9dea9712357752a70e5df" - name = "github.com/gorilla/context" - packages = ["."] - pruneopts = "UT" - revision = "1ea25387ff6f684839d82767c1733ff4d4d15d0a" - version = "v1.1" - -[[projects]] - digest = "1:185a43b59a1f4e7ad4e7ccafb8a1538193d897a2a75be16dda093ec42ad231cf" - name = "github.com/gorilla/handlers" - packages = ["."] - pruneopts = "UT" - revision = "90663712d74cb411cbef281bc1e08c19d1a76145" - version = "v1.3.0" - -[[projects]] - digest = "1:3c44722a6360b8d8abf6f70f122c69007189be992a150e39571224c54a9bc380" - name = "github.com/gorilla/mux" - packages = ["."] - pruneopts = "UT" - revision = "7f08801859139f86dfafd1c296e2cba9a80d292e" - version = "v1.6.0" - -[[projects]] - digest = "1:f5a2051c55d05548d2d4fd23d244027b59fbd943217df8aa3b5e170ac2fd6e1b" - name = "github.com/json-iterator/go" - packages = ["."] - pruneopts = "UT" - revision = "0ff49de124c6f76f8494e194af75bde0f1a49a29" - version = "v1.1.6" - -[[projects]] - digest = "1:31e761d97c76151dde79e9d28964a812c46efc5baee4085b86f68f0c654450de" - name = "github.com/konsorten/go-windows-terminal-sequences" - packages = ["."] - pruneopts = "UT" - revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e" - version = "v1.0.2" - -[[projects]] - branch = "master" - digest = "1:bd26bbaf1e9f9dfe829a88f87a0849b56f717c31785443a67668f2c752fa8412" - name = "github.com/lib/pq" - packages = [ - ".", - "oid", - ] - pruneopts = "UT" - revision = "b2004221932bd6b13167ef654c81cffac36f7537" - -[[projects]] - digest = "1:5113b1edf6e2f370f9ce6101e7b5a86c3e8decd108067e34b762ae91e42964ee" - name = "github.com/miekg/pkcs11" - packages = ["."] - pruneopts = "UT" - revision = "7283ca79f35edb89bc1b4ecae7f86a3680ce737f" - -[[projects]] - digest = "1:33422d238f147d247752996a26574ac48dcf472976eda7f5134015f06bf16563" - name = "github.com/modern-go/concurrent" - packages = ["."] - pruneopts = "UT" - revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" - version = "1.0.3" - -[[projects]] - digest = "1:e32bdbdb7c377a07a9a46378290059822efdce5c8d96fe71940d87cb4f918855" - name = "github.com/modern-go/reflect2" - packages = ["."] - pruneopts = "UT" - revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" - version = "1.0.1" - -[[projects]] - digest = "1:159d8a990f45d4891f1f04cb6ad7eb18b307cd02d783f7d37fa7a3b93912b172" - name = "github.com/opencontainers/go-digest" - packages = ["."] - pruneopts = "UT" - revision = "aa2ec055abd10d26d539eb630a92241b781ce4bc" - version = "v1.0.0-rc0" - -[[projects]] - digest = "1:11db38d694c130c800d0aefb502fb02519e514dc53d9804ce51d1ad25ec27db6" - name = "github.com/opencontainers/image-spec" - packages = [ - "specs-go", - "specs-go/v1", - ] - pruneopts = "UT" - revision = "d60099175f88c47cd379c4738d158884749ed235" - version = "v1.0.1" - -[[projects]] - digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b" - name = "github.com/pkg/errors" - packages = ["."] - pruneopts = "UT" - revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4" - version = "v0.8.1" - -[[projects]] - digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - pruneopts = "UT" - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - branch = "master" - digest = "1:bd9efe4e0b0f768302a1e2f0c22458149278de533e521206e5ddc71848c269a0" - name = "github.com/pquerna/cachecontrol" - packages = [ - ".", - "cacheobject", - ] - pruneopts = "UT" - revision = "1555304b9b35fdd2b425bccf1a5613677705e7d0" - -[[projects]] - digest = "1:3f68283c56d93b885f33c679708079e834815138649e9f59ffbc572c2993e0f8" - name = "github.com/robfig/cron" - packages = ["."] - pruneopts = "UT" - revision = "b024fc5ea0e34bc3f83d9941c8d60b0622bfaca4" - version = "v1" - -[[projects]] - digest = "1:fd61cf4ae1953d55df708acb6b91492d538f49c305b364a014049914495db426" - name = "github.com/sirupsen/logrus" - packages = ["."] - pruneopts = "UT" - revision = "8bdbc7bcc01dcbb8ec23dc8a28e332258d25251f" - version = "v1.4.1" - -[[projects]] - digest = "1:9424f440bba8f7508b69414634aef3b2b3a877e522d8a4624692412805407bb7" - name = "github.com/spf13/pflag" - packages = ["."] - pruneopts = "UT" - revision = "583c0c0531f06d5278b7d917446061adc344b5cd" - version = "v1.0.1" - -[[projects]] - digest = "1:ac83cf90d08b63ad5f7e020ef480d319ae890c208f8524622a2f3136e2686b02" - name = "github.com/stretchr/objx" - packages = ["."] - pruneopts = "UT" - revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c" - version = "v0.1.1" - -[[projects]] - digest = "1:288e2ba4192b77ec619875ab54d82e2179ca8978e8baa690dcb4343a4a1f4da7" - name = "github.com/stretchr/testify" - packages = [ - "assert", - "mock", - "require", - "suite", - ] - pruneopts = "UT" - revision = "ffdc059bfe9ce6a4e144ba849dbedead332c6053" - version = "v1.3.0" - -[[projects]] - digest = "1:a5702d6fd0891671faf050c05451d3ee4cfd70cb958e11556fefaca628ce832e" - name = "github.com/theupdateframework/notary" - packages = [ - ".", - "client", - "client/changelist", - "cryptoservice", - "storage", - "trustmanager", - "trustmanager/yubikey", - "trustpinning", - "tuf", - "tuf/data", - "tuf/signed", - "tuf/utils", - "tuf/validation", - ] - pruneopts = "UT" - revision = "d6e1431feb32348e0650bf7551ac5cffd01d857b" - version = "v0.6.1" - -[[projects]] - digest = "1:ab3259b9f5008a18ff8c1cc34623eccce354f3a9faf5b409983cd6717d64b40b" - name = "golang.org/x/crypto" - packages = [ - "cast5", - "ed25519", - "ed25519/internal/edwards25519", - "openpgp", - "openpgp/armor", - "openpgp/clearsign", - "openpgp/elgamal", - "openpgp/errors", - "openpgp/packet", - "openpgp/s2k", - "pbkdf2", - "ssh/terminal", - ] - pruneopts = "UT" - revision = "5f961cd492ac9d43fc33a8ef646bae79d113fd97" - -[[projects]] - digest = "1:2a465dcd21dc1094bd90bc28adc168d5c12d4d754b49d67b34362d26bd5c21b2" - name = "golang.org/x/net" - packages = [ - "context", - "context/ctxhttp", - "http2", - "http2/hpack", - "lex/httplex", - ] - pruneopts = "UT" - revision = "075e191f18186a8ff2becaf64478e30f4545cdad" - -[[projects]] - digest = "1:3d57c230f6800023b6fec274f38a139337b5fc0d00169a100a538eb3ef5e3da8" - name = "golang.org/x/oauth2" - packages = [ - ".", - "clientcredentials", - "internal", - ] - pruneopts = "UT" - revision = "bb50c06baba3d0c76f9d125c0719093e315b5b44" - -[[projects]] - branch = "master" - digest = "1:f21f21efdd315b95a015ffd7ddca70ca60ff021848618b5a4efd88bb1603335f" - name = "golang.org/x/sys" - packages = ["unix"] - pruneopts = "UT" - revision = "571f7bbbe08da2a8955aed9d4db316e78630e9a3" - -[[projects]] - branch = "master" - digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90" - name = "golang.org/x/time" - packages = ["rate"] - pruneopts = "UT" - revision = "9d24e82272b4f38b78bc8cff74fa936d31ccd8ef" - -[[projects]] - digest = "1:52133d6859535332391e6193c8878d06347f28881111efa900392802485e9a18" - name = "google.golang.org/appengine" - packages = [ - "internal", - "internal/base", - "internal/datastore", - "internal/log", - "internal/remote_api", - "internal/urlfetch", - "urlfetch", - ] - pruneopts = "UT" - revision = "24e4144ec923c2374f6b06610c0df16a9222c3d9" - -[[projects]] - digest = "1:79decf236a2000df456fe7478fd23da8af950563c922747b299e1fab7fa7d78f" - name = "gopkg.in/asn1-ber.v1" - packages = ["."] - pruneopts = "UT" - revision = "4e86f4367175e39f69d9358a5f17b4dda270378d" - version = "v1.1" - -[[projects]] - digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" - name = "gopkg.in/inf.v0" - packages = ["."] - pruneopts = "UT" - revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" - version = "v0.9.1" - -[[projects]] - digest = "1:79691acfc86fc3204928daf67e44955e8021ec5e10091599d344b0e16de32236" - name = "gopkg.in/ldap.v2" - packages = ["."] - pruneopts = "UT" - revision = "8168ee085ee43257585e50c6441aadf54ecb2c9f" - version = "v2.5.0" - -[[projects]] - digest = "1:c0c30f47f9c16f227ba82f0bdfd14fa968453c30b7677a07903b3b4f34b98d49" - name = "gopkg.in/square/go-jose.v2" - packages = [ - ".", - "cipher", - "json", - ] - pruneopts = "UT" - revision = "628223f44a71f715d2881ea69afc795a1e9c01be" - version = "v2.3.0" - -[[projects]] - digest = "1:2a81c6e126d36ad027328cffaa4888fc3be40f09dc48028d1f93705b718130b9" - name = "gopkg.in/yaml.v2" - packages = ["."] - pruneopts = "UT" - revision = "7f97868eec74b32b0982dd158a51a446d1da7eb5" - version = "v2.1.1" - -[[projects]] - digest = "1:7727a365529cdf6af394821dd990b046c56b8afac31e15e78fed58cf7bc179ad" - name = "k8s.io/api" - packages = [ - "admissionregistration/v1alpha1", - "admissionregistration/v1beta1", - "apps/v1", - "apps/v1beta1", - "apps/v1beta2", - "authentication/v1", - "authentication/v1beta1", - "authorization/v1", - "authorization/v1beta1", - "autoscaling/v1", - "autoscaling/v2beta1", - "batch/v1", - "batch/v1beta1", - "batch/v2alpha1", - "certificates/v1beta1", - "core/v1", - "events/v1beta1", - "extensions/v1beta1", - "networking/v1", - "policy/v1beta1", - "rbac/v1", - "rbac/v1alpha1", - "rbac/v1beta1", - "scheduling/v1alpha1", - "scheduling/v1beta1", - "settings/v1alpha1", - "storage/v1", - "storage/v1alpha1", - "storage/v1beta1", - ] - pruneopts = "UT" - revision = "5cb15d34447165a97c76ed5a60e4e99c8a01ecfe" - version = "kubernetes-1.13.4" - -[[projects]] - branch = "master" - digest = "1:d0d43cf61b49d2750351759e1d220134ab7731db608b6716dc4ed792a493027d" - name = "k8s.io/apimachinery" - packages = [ - "pkg/api/errors", - "pkg/api/resource", - "pkg/apis/meta/v1", - "pkg/apis/meta/v1/unstructured", - "pkg/conversion", - "pkg/conversion/queryparams", - "pkg/fields", - "pkg/labels", - "pkg/runtime", - "pkg/runtime/schema", - "pkg/runtime/serializer", - "pkg/runtime/serializer/json", - "pkg/runtime/serializer/protobuf", - "pkg/runtime/serializer/recognizer", - "pkg/runtime/serializer/streaming", - "pkg/runtime/serializer/versioning", - "pkg/selection", - "pkg/types", - "pkg/util/clock", - "pkg/util/errors", - "pkg/util/framer", - "pkg/util/intstr", - "pkg/util/json", - "pkg/util/net", - "pkg/util/runtime", - "pkg/util/sets", - "pkg/util/validation", - "pkg/util/validation/field", - "pkg/util/wait", - "pkg/util/yaml", - "pkg/version", - "pkg/watch", - "third_party/forked/golang/reflect", - ] - pruneopts = "UT" - revision = "f534d624797b270e5e46104dc7e2c2d61edbb85d" - -[[projects]] - digest = "1:131682c26796b64f0abb77ac3d85525712706fde0b085aaa7b6d10b4398167cc" - name = "k8s.io/client-go" - packages = [ - "kubernetes/scheme", - "pkg/apis/clientauthentication", - "pkg/apis/clientauthentication/v1alpha1", - "pkg/apis/clientauthentication/v1beta1", - "pkg/version", - "plugin/pkg/client/auth/exec", - "rest", - "rest/watch", - "tools/clientcmd/api", - "tools/metrics", - "transport", - "util/cert", - "util/connrotation", - "util/flowcontrol", - "util/homedir", - "util/integer", - ] - pruneopts = "UT" - revision = "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" - version = "v8.0.0" - -[[projects]] - digest = "1:1076dbb6a69b965ccfda2a06a04e5038db78eff586f74b5daf4a41444e6f6077" - name = "k8s.io/helm" - packages = [ - "cmd/helm/search", - "pkg/chartutil", - "pkg/getter", - "pkg/helm/environment", - "pkg/helm/helmpath", - "pkg/ignore", - "pkg/plugin", - "pkg/proto/hapi/chart", - "pkg/proto/hapi/version", - "pkg/provenance", - "pkg/repo", - "pkg/sympath", - "pkg/tlsutil", - "pkg/urlutil", - "pkg/version", - ] - pruneopts = "UT" - revision = "20adb27c7c5868466912eebdf6664e7390ebe710" - version = "v2.9.1" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "github.com/Masterminds/semver", - "github.com/astaxie/beego", - "github.com/astaxie/beego/cache", - "github.com/astaxie/beego/cache/redis", - "github.com/astaxie/beego/context", - "github.com/astaxie/beego/orm", - "github.com/astaxie/beego/session", - "github.com/astaxie/beego/session/redis", - "github.com/astaxie/beego/validation", - "github.com/beego/i18n", - "github.com/bmatcuk/doublestar", - "github.com/casbin/casbin", - "github.com/casbin/casbin/model", - "github.com/casbin/casbin/persist", - "github.com/casbin/casbin/util", - "github.com/coreos/go-oidc", - "github.com/dghubble/sling", - "github.com/dgrijalva/jwt-go", - "github.com/docker/distribution", - "github.com/docker/distribution/health", - "github.com/docker/distribution/manifest/manifestlist", - "github.com/docker/distribution/manifest/schema1", - "github.com/docker/distribution/manifest/schema2", - "github.com/docker/distribution/reference", - "github.com/docker/distribution/registry/auth/token", - "github.com/docker/distribution/registry/client/auth/challenge", - "github.com/docker/libtrust", - "github.com/garyburd/redigo/redis", - "github.com/ghodss/yaml", - "github.com/go-sql-driver/mysql", - "github.com/gocraft/work", - "github.com/golang-migrate/migrate", - "github.com/golang-migrate/migrate/database/postgres", - "github.com/golang-migrate/migrate/source/file", - "github.com/gomodule/redigo/redis", - "github.com/gorilla/handlers", - "github.com/gorilla/mux", - "github.com/lib/pq", - "github.com/opencontainers/go-digest", - "github.com/pkg/errors", - "github.com/robfig/cron", - "github.com/stretchr/testify/assert", - "github.com/stretchr/testify/mock", - "github.com/stretchr/testify/require", - "github.com/stretchr/testify/suite", - "github.com/theupdateframework/notary", - "github.com/theupdateframework/notary/client", - "github.com/theupdateframework/notary/trustpinning", - "github.com/theupdateframework/notary/tuf/data", - "golang.org/x/crypto/pbkdf2", - "golang.org/x/oauth2", - "golang.org/x/oauth2/clientcredentials", - "gopkg.in/ldap.v2", - "gopkg.in/yaml.v2", - "k8s.io/api/authentication/v1beta1", - "k8s.io/apimachinery/pkg/apis/meta/v1", - "k8s.io/apimachinery/pkg/runtime/schema", - "k8s.io/apimachinery/pkg/runtime/serializer", - "k8s.io/client-go/kubernetes/scheme", - "k8s.io/client-go/rest", - "k8s.io/helm/cmd/helm/search", - "k8s.io/helm/pkg/chartutil", - "k8s.io/helm/pkg/proto/hapi/chart", - "k8s.io/helm/pkg/repo", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/src/Gopkg.toml b/src/Gopkg.toml deleted file mode 100644 index 67a00c2c2..000000000 --- a/src/Gopkg.toml +++ /dev/null @@ -1,137 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -ignored = ["github.com/goharbor/harbor/tests*"] - -[prune] - go-tests = true - unused-packages = true - -[[constraint]] - name = "github.com/astaxie/beego" - version = "=1.9.0" - -[[constraint]] - name = "github.com/casbin/casbin" - version = "=1.7.0" - -[[constraint]] - name = "github.com/dghubble/sling" - version = "=1.1.0" - -[[constraint]] - name = "github.com/dgrijalva/jwt-go" - version = "=3.0.0" - -[[constraint]] - name = "github.com/docker/distribution" - version = "=2.7.1" - -[[constraint]] - branch = "master" - name = "github.com/docker/libtrust" - -[[constraint]] - name = "github.com/go-sql-driver/mysql" - version = "=1.3.0" - -[[override]] - name = "github.com/mattn/go-sqlite3" - version = "=1.6.0" - -[[constraint]] - name = "github.com/opencontainers/go-digest" - version = "=1.0.0-rc0" - -[[constraint]] - name = "gopkg.in/ldap.v2" - version = "=2.5.0" - -[[constraint]] - name = "github.com/stretchr/testify" - version = "=1.3.0" - -[[constraint]] - name = "github.com/gorilla/handlers" - version = "=1.3.0" - -[[constraint]] - name = "github.com/gorilla/mux" - version = "=1.6.0" - -[[override]] - name = "github.com/Sirupsen/logrus" - version = "=1.0.5" - -[[override]] - name = "github.com/gorilla/context" - version = "=1.1" - -[[override]] - name = "github.com/garyburd/redigo" - version = "=1.6.0" - -[[constraint]] - name = "github.com/golang-migrate/migrate" - version = "=3.3.0" - -[[constraint]] - name = "k8s.io/helm" - version = "2.9.1" - -[[constraint]] - name = "github.com/ghodss/yaml" - version = "=1.0.0" - -[[constraint]] - name = "github.com/Masterminds/semver" - version = "=1.4.2" - -[[constraint]] - name = "github.com/gocraft/work" - version = "=0.5.1" - -[[constraint]] - name = "github.com/robfig/cron" - version = "=1.0" - -[[constraint]] - name = "github.com/coreos/go-oidc" - version = "=2.0.0" - -[[constraint]] - name = "gopkg.in/yaml.v2" - version = "=2.1.1" - -[[constraint]] - name = "k8s.io/api" - version = "kubernetes-1.13.4" - -[[constraint]] - name = "github.com/bmatcuk/doublestar" - version = "=1.1.1" - -[[constraint]] - name = "github.com/pkg/errors" - version = "=0.8.1" - -[[constraint]] - name = "github.com/docker/notary" - version = "=0.6.1" diff --git a/src/chartserver/client.go b/src/chartserver/client.go index e7e1fb646..1ab153570 100644 --- a/src/chartserver/client.go +++ b/src/chartserver/client.go @@ -1,16 +1,16 @@ package chartserver import ( - "errors" "fmt" - commonhttp "github.com/goharbor/harbor/src/common/http" - hlog "github.com/goharbor/harbor/src/common/utils/log" "io" "io/ioutil" "net/http" "net/url" "strings" "time" + + commonhttp "github.com/goharbor/harbor/src/common/http" + "github.com/pkg/errors" ) const ( @@ -49,11 +49,13 @@ func NewChartClient(credential *Credential) *ChartClient { // Create http client func (cc *ChartClient) GetContent(addr string) ([]byte, error) { response, err := cc.sendRequest(addr, http.MethodGet, nil) if err != nil { + err = errors.Wrap(err, "get content failed") return nil, err } content, err := ioutil.ReadAll(response.Body) if err != nil { + err = errors.Wrap(err, "Read response body error") return nil, err } defer response.Body.Close() @@ -61,6 +63,7 @@ func (cc *ChartClient) GetContent(addr string) ([]byte, error) { if response.StatusCode != http.StatusOK { text, err := extractError(content) if err != nil { + err = errors.Wrap(err, "Extract content error failed") return nil, err } return nil, &commonhttp.Error{ @@ -106,7 +109,8 @@ func (cc *ChartClient) sendRequest(addr string, method string, body io.Reader) ( fullURI, err := url.Parse(addr) if err != nil { - return nil, fmt.Errorf("invalid url: %s", err.Error()) + err = errors.Wrap(err, "Invalid url") + return nil, err } request, err := http.NewRequest(method, addr, body) @@ -121,7 +125,7 @@ func (cc *ChartClient) sendRequest(addr string, method string, body io.Reader) ( response, err := cc.httpClient.Do(request) if err != nil { - hlog.Errorf("%s '%s' failed with error: %s", method, fullURI.Path, err) + err = errors.Wrap(err, fmt.Sprintf("send request %s %s failed", method, fullURI.Path)) return nil, err } diff --git a/src/chartserver/controller.go b/src/chartserver/controller.go index 6815a974f..499b14391 100644 --- a/src/chartserver/controller.go +++ b/src/chartserver/controller.go @@ -7,6 +7,7 @@ import ( "os" hlog "github.com/goharbor/harbor/src/common/utils/log" + "github.com/justinas/alice" ) const ( @@ -42,7 +43,7 @@ type Controller struct { } // NewController is constructor of the chartserver.Controller -func NewController(backendServer *url.URL) (*Controller, error) { +func NewController(backendServer *url.URL, chains ...*alice.Chain) (*Controller, error) { if backendServer == nil { return nil, errors.New("failed to create chartserver.Controller: backend sever address is required") } @@ -68,7 +69,7 @@ func NewController(backendServer *url.URL) (*Controller, error) { return &Controller{ backendServerAddress: backendServer, // Use customized reverse proxy - trafficProxy: NewProxyEngine(backendServer, cred), + trafficProxy: NewProxyEngine(backendServer, cred, chains...), // Initialize chart operator for use chartOperator: &ChartOperator{}, // Create http client with customized timeouts diff --git a/src/chartserver/handler_manipulation.go b/src/chartserver/handler_manipulation.go index 9250f2476..42e714916 100644 --- a/src/chartserver/handler_manipulation.go +++ b/src/chartserver/handler_manipulation.go @@ -2,19 +2,20 @@ package chartserver import ( "encoding/json" - "errors" "fmt" + "net/http" + "net/http/httptest" + "os" "strings" "github.com/ghodss/yaml" + commonhttp "github.com/goharbor/harbor/src/common/http" + "github.com/goharbor/harbor/src/common/utils/log" "github.com/goharbor/harbor/src/replication" rep_event "github.com/goharbor/harbor/src/replication/event" "github.com/goharbor/harbor/src/replication/model" + "github.com/pkg/errors" helm_repo "k8s.io/helm/pkg/repo" - - "os" - - "github.com/goharbor/harbor/src/common/utils/log" ) // ListCharts gets the chart list under the namespace @@ -68,11 +69,21 @@ func (c *Controller) DeleteChartVersion(namespace, chartName, version string) er return errors.New("invalid chart for deleting") } - url := fmt.Sprintf("%s/%s/%s", c.APIPrefix(namespace), chartName, version) + url := fmt.Sprintf("/api/chartrepo/%s/charts/%s/%s", namespace, chartName, version) + req, _ := http.NewRequest(http.MethodDelete, url, nil) + w := httptest.NewRecorder() - err := c.apiClient.DeleteContent(url) - if err != nil { - return err + c.trafficProxy.ServeHTTP(w, req) + + if w.Code != http.StatusOK { + text, err := extractError(w.Body.Bytes()) + if err != nil { + return err + } + return &commonhttp.Error{ + Code: w.Code, + Message: text, + } } // send notification to replication handler diff --git a/src/chartserver/reverse_proxy.go b/src/chartserver/reverse_proxy.go index 74716ea6d..c11025c77 100644 --- a/src/chartserver/reverse_proxy.go +++ b/src/chartserver/reverse_proxy.go @@ -17,6 +17,7 @@ import ( hlog "github.com/goharbor/harbor/src/common/utils/log" "github.com/goharbor/harbor/src/replication" rep_event "github.com/goharbor/harbor/src/replication/event" + "github.com/justinas/alice" ) const ( @@ -36,20 +37,29 @@ type ProxyEngine struct { backend *url.URL // Use go reverse proxy as engine - engine *httputil.ReverseProxy + engine http.Handler } // NewProxyEngine is constructor of NewProxyEngine -func NewProxyEngine(target *url.URL, cred *Credential) *ProxyEngine { +func NewProxyEngine(target *url.URL, cred *Credential, chains ...*alice.Chain) *ProxyEngine { + var engine http.Handler + + engine = &httputil.ReverseProxy{ + ErrorLog: log.New(os.Stdout, "", log.Ldate|log.Ltime|log.Lshortfile), + Director: func(req *http.Request) { + director(target, cred, req) + }, + ModifyResponse: modifyResponse, + } + + if len(chains) > 0 { + hlog.Info("New chart server traffic proxy with middlewares") + engine = chains[0].Then(engine) + } + return &ProxyEngine{ backend: target, - engine: &httputil.ReverseProxy{ - ErrorLog: log.New(os.Stdout, "", log.Ldate|log.Ltime|log.Lshortfile), - Director: func(req *http.Request) { - director(target, cred, req) - }, - ModifyResponse: modifyResponse, - }, + engine: engine, } } diff --git a/src/common/api/base.go b/src/common/api/base.go index fba8c3621..928c37e08 100644 --- a/src/common/api/base.go +++ b/src/common/api/base.go @@ -20,12 +20,11 @@ import ( "net/http" "strconv" + "github.com/astaxie/beego" "github.com/astaxie/beego/validation" commonhttp "github.com/goharbor/harbor/src/common/http" "github.com/goharbor/harbor/src/common/utils/log" - - "errors" - "github.com/astaxie/beego" + "github.com/pkg/errors" ) const ( diff --git a/src/common/config/manager.go b/src/common/config/manager.go index 0df6eaa47..3886f160f 100644 --- a/src/common/config/manager.go +++ b/src/common/config/manager.go @@ -210,12 +210,14 @@ func (c *CfgManager) GetDatabaseCfg() *models.Database { return &models.Database{ Type: c.Get(common.DatabaseType).GetString(), PostGreSQL: &models.PostGreSQL{ - Host: c.Get(common.PostGreSQLHOST).GetString(), - Port: c.Get(common.PostGreSQLPort).GetInt(), - Username: c.Get(common.PostGreSQLUsername).GetString(), - Password: c.Get(common.PostGreSQLPassword).GetString(), - Database: c.Get(common.PostGreSQLDatabase).GetString(), - SSLMode: c.Get(common.PostGreSQLSSLMode).GetString(), + Host: c.Get(common.PostGreSQLHOST).GetString(), + Port: c.Get(common.PostGreSQLPort).GetInt(), + Username: c.Get(common.PostGreSQLUsername).GetString(), + Password: c.Get(common.PostGreSQLPassword).GetString(), + Database: c.Get(common.PostGreSQLDatabase).GetString(), + SSLMode: c.Get(common.PostGreSQLSSLMode).GetString(), + MaxIdleConns: c.Get(common.PostGreSQLMaxIdleConns).GetInt(), + MaxOpenConns: c.Get(common.PostGreSQLMaxOpenConns).GetInt(), }, } } diff --git a/src/common/config/metadata/metadatalist.go b/src/common/config/metadata/metadatalist.go index 202f426b7..7106a38c6 100644 --- a/src/common/config/metadata/metadatalist.go +++ b/src/common/config/metadata/metadatalist.go @@ -47,6 +47,7 @@ const ( HTTPAuthGroup = "http_auth" OIDCGroup = "oidc" DatabaseGroup = "database" + QuotaGroup = "quota" // Put all config items do not belong a existing group into basic BasicGroup = "basic" ClairGroup = "clair" @@ -74,6 +75,7 @@ var ( {Name: common.ClairURL, Scope: SystemScope, Group: ClairGroup, EnvKey: "CLAIR_URL", DefaultValue: "http://clair:6060", ItemType: &StringType{}, Editable: false}, {Name: common.CoreURL, Scope: SystemScope, Group: BasicGroup, EnvKey: "CORE_URL", DefaultValue: "http://core:8080", ItemType: &StringType{}, Editable: false}, + {Name: common.CoreLocalURL, Scope: SystemScope, Group: BasicGroup, EnvKey: "CORE_LOCAL_URL", DefaultValue: "http://127.0.0.1:8080", ItemType: &StringType{}, Editable: false}, {Name: common.DatabaseType, Scope: SystemScope, Group: BasicGroup, EnvKey: "DATABASE_TYPE", DefaultValue: "postgresql", ItemType: &StringType{}, Editable: false}, {Name: common.EmailFrom, Scope: UserScope, Group: EmailGroup, EnvKey: "EMAIL_FROM", DefaultValue: "admin ", ItemType: &StringType{}, Editable: false}, @@ -91,7 +93,7 @@ var ( {Name: common.LDAPBaseDN, Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_BASE_DN", DefaultValue: "", ItemType: &NonEmptyStringType{}, Editable: false}, {Name: common.LDAPFilter, Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_FILTER", DefaultValue: "", ItemType: &StringType{}, Editable: false}, {Name: common.LDAPGroupBaseDN, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_BASE_DN", DefaultValue: "", ItemType: &StringType{}, Editable: false}, - {Name: common.LdapGroupAdminDn, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_ADMIN_DN", DefaultValue: "", ItemType: &StringType{}, Editable: false}, + {Name: common.LDAPGroupAdminDn, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_ADMIN_DN", DefaultValue: "", ItemType: &StringType{}, Editable: false}, {Name: common.LDAPGroupAttributeName, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_GID", DefaultValue: "", ItemType: &StringType{}, Editable: false}, {Name: common.LDAPGroupSearchFilter, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_FILTER", DefaultValue: "", ItemType: &StringType{}, Editable: false}, {Name: common.LDAPGroupSearchScope, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_SCOPE", DefaultValue: "2", ItemType: &LdapScopeType{}, Editable: false}, @@ -114,6 +116,8 @@ var ( {Name: common.PostGreSQLPort, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_PORT", DefaultValue: "5432", ItemType: &PortType{}, Editable: false}, {Name: common.PostGreSQLSSLMode, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_SSLMODE", DefaultValue: "disable", ItemType: &StringType{}, Editable: false}, {Name: common.PostGreSQLUsername, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_USERNAME", DefaultValue: "postgres", ItemType: &StringType{}, Editable: false}, + {Name: common.PostGreSQLMaxIdleConns, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_MAX_IDLE_CONNS", DefaultValue: "2", ItemType: &IntType{}, Editable: false}, + {Name: common.PostGreSQLMaxOpenConns, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_MAX_OPEN_CONNS", DefaultValue: "0", ItemType: &IntType{}, Editable: false}, {Name: common.ProjectCreationRestriction, Scope: UserScope, Group: BasicGroup, EnvKey: "PROJECT_CREATION_RESTRICTION", DefaultValue: common.ProCrtRestrEveryone, ItemType: &ProjectCreationRestrictionType{}, Editable: false}, {Name: common.ReadOnly, Scope: UserScope, Group: BasicGroup, EnvKey: "READ_ONLY", DefaultValue: "false", ItemType: &BoolType{}, Editable: false}, @@ -133,7 +137,7 @@ var ( {Name: common.HTTPAuthProxyEndpoint, Scope: UserScope, Group: HTTPAuthGroup, ItemType: &StringType{}}, {Name: common.HTTPAuthProxyTokenReviewEndpoint, Scope: UserScope, Group: HTTPAuthGroup, ItemType: &StringType{}}, {Name: common.HTTPAuthProxyVerifyCert, Scope: UserScope, Group: HTTPAuthGroup, DefaultValue: "true", ItemType: &BoolType{}}, - {Name: common.HTTPAuthProxyAlwaysOnboard, Scope: UserScope, Group: HTTPAuthGroup, DefaultValue: "false", ItemType: &BoolType{}}, + {Name: common.HTTPAuthProxySkipSearch, Scope: UserScope, Group: HTTPAuthGroup, DefaultValue: "false", ItemType: &BoolType{}}, {Name: common.OIDCName, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}}, {Name: common.OIDCEndpoint, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}}, @@ -147,5 +151,10 @@ var ( {Name: common.WithNotary, Scope: SystemScope, Group: BasicGroup, EnvKey: "WITH_NOTARY", DefaultValue: "false", ItemType: &BoolType{}, Editable: true}, // the unit of expiration is minute, 43200 minutes = 30 days {Name: common.RobotTokenDuration, Scope: UserScope, Group: BasicGroup, EnvKey: "ROBOT_TOKEN_DURATION", DefaultValue: "43200", ItemType: &IntType{}, Editable: true}, + {Name: common.NotificationEnable, Scope: UserScope, Group: BasicGroup, EnvKey: "NOTIFICATION_ENABLE", DefaultValue: "true", ItemType: &BoolType{}, Editable: true}, + + {Name: common.QuotaPerProjectEnable, Scope: UserScope, Group: QuotaGroup, EnvKey: "QUOTA_PER_PROJECT_ENABLE", DefaultValue: "true", ItemType: &BoolType{}, Editable: true}, + {Name: common.CountPerProject, Scope: UserScope, Group: QuotaGroup, EnvKey: "COUNT_PER_PROJECT", DefaultValue: "-1", ItemType: &QuotaType{}, Editable: true}, + {Name: common.StoragePerProject, Scope: UserScope, Group: QuotaGroup, EnvKey: "STORAGE_PER_PROJECT", DefaultValue: "-1", ItemType: &QuotaType{}, Editable: true}, } ) diff --git a/src/common/config/metadata/type.go b/src/common/config/metadata/type.go index 6ed790c97..745f30868 100644 --- a/src/common/config/metadata/type.go +++ b/src/common/config/metadata/type.go @@ -18,9 +18,10 @@ package metadata import ( "encoding/json" "fmt" - "github.com/goharbor/harbor/src/common" "strconv" "strings" + + "github.com/goharbor/harbor/src/common" ) // Type - Use this interface to define and encapsulate the behavior of validation and transformation @@ -186,3 +187,21 @@ func (t *MapType) get(str string) (interface{}, error) { err := json.Unmarshal([]byte(str), &result) return result, err } + +// QuotaType ... +type QuotaType struct { + Int64Type +} + +func (t *QuotaType) validate(str string) error { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return err + } + + if val <= 0 && val != -1 { + return fmt.Errorf("quota value should be -1 or great than zero") + } + + return nil +} diff --git a/src/common/config/store/driver/db.go b/src/common/config/store/driver/db.go index 18fe703e0..97b6568cb 100644 --- a/src/common/config/store/driver/db.go +++ b/src/common/config/store/driver/db.go @@ -40,7 +40,7 @@ func (d *Database) Load() (map[string]interface{}, error) { itemMetadata, ok := metadata.Instance().GetByName(item.Key) if !ok { - log.Warningf("failed to get metadata, key:%v, error:%v, skip to load item", item.Key, err) + log.Debugf("failed to get metadata, key:%v, error:%v, skip to load item", item.Key, err) continue } if itemMetadata.Scope == metadata.SystemScope { diff --git a/src/common/const.go b/src/common/const.go old mode 100644 new mode 100755 index 532e7960f..dbb8dec57 --- a/src/common/const.go +++ b/src/common/const.go @@ -53,8 +53,11 @@ const ( PostGreSQLPassword = "postgresql_password" PostGreSQLDatabase = "postgresql_database" PostGreSQLSSLMode = "postgresql_sslmode" + PostGreSQLMaxIdleConns = "postgresql_max_idle_conns" + PostGreSQLMaxOpenConns = "postgresql_max_open_conns" SelfRegistration = "self_registration" CoreURL = "core_url" + CoreLocalURL = "core_local_url" JobServiceURL = "jobservice_url" LDAPURL = "ldap_url" LDAPSearchDN = "ldap_search_dn" @@ -100,7 +103,7 @@ const ( HTTPAuthProxyEndpoint = "http_authproxy_endpoint" HTTPAuthProxyTokenReviewEndpoint = "http_authproxy_tokenreview_endpoint" HTTPAuthProxyVerifyCert = "http_authproxy_verify_cert" - HTTPAuthProxyAlwaysOnboard = "http_authproxy_always_onboard" + HTTPAuthProxySkipSearch = "http_authproxy_skip_search" OIDCName = "oidc_name" OIDCEndpoint = "oidc_endpoint" OIDCCLientID = "oidc_client_id" @@ -120,8 +123,9 @@ const ( NotaryURL = "notary_url" DefaultCoreEndpoint = "http://core:8080" DefaultNotaryEndpoint = "http://notary-server:4443" - LdapGroupType = 1 - LdapGroupAdminDn = "ldap_group_admin_dn" + LDAPGroupType = 1 + HTTPGroupType = 2 + LDAPGroupAdminDn = "ldap_group_admin_dn" LDAPGroupMembershipAttribute = "ldap_group_membership_attribute" DefaultRegistryControllerEndpoint = "http://registryctl:8080" WithChartMuseum = "with_chartmuseum" @@ -141,4 +145,12 @@ const ( OIDCLoginPath = "/c/oidc/login" ChartUploadCtxKey = contextKey("chart_upload_event") + + // Global notification enable configuration + NotificationEnable = "notification_enable" + + // Quota setting items for project + QuotaPerProjectEnable = "quota_per_project_enable" + CountPerProject = "count_per_project" + StoragePerProject = "storage_per_project" ) diff --git a/src/common/dao/artifact.go b/src/common/dao/artifact.go new file mode 100644 index 000000000..34663b5cd --- /dev/null +++ b/src/common/dao/artifact.go @@ -0,0 +1,142 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "strings" + "time" + + "github.com/astaxie/beego/orm" + "github.com/goharbor/harbor/src/common/models" +) + +// AddArtifact ... +func AddArtifact(af *models.Artifact) (int64, error) { + now := time.Now() + af.CreationTime = now + af.PushTime = now + + id, err := GetOrmer().Insert(af) + if err != nil { + if strings.Contains(err.Error(), "duplicate key value violates unique constraint") { + return 0, ErrDupRows + } + return 0, err + } + return id, nil +} + +// UpdateArtifact ... +func UpdateArtifact(af *models.Artifact) error { + _, err := GetOrmer().Update(af) + return err +} + +// UpdateArtifactDigest ... +func UpdateArtifactDigest(af *models.Artifact) error { + _, err := GetOrmer().Update(af, "digest") + return err +} + +// UpdateArtifactPullTime updates the pull time of the artifact. +func UpdateArtifactPullTime(af *models.Artifact) error { + _, err := GetOrmer().Update(af, "pull_time") + return err +} + +// DeleteArtifact ... +func DeleteArtifact(id int64) error { + + _, err := GetOrmer().QueryTable(&models.Artifact{}).Filter("ID", id).Delete() + return err +} + +// DeleteArtifactByDigest ... +func DeleteArtifactByDigest(projectID int64, repo, digest string) error { + _, err := GetOrmer().Raw(`delete from artifact where project_id = ? and repo = ? and digest = ? `, + projectID, repo, digest).Exec() + if err != nil { + return err + } + return nil +} + +// DeleteArtifactByTag ... +func DeleteArtifactByTag(projectID int64, repo, tag string) error { + _, err := GetOrmer().Raw(`delete from artifact where project_id = ? and repo = ? and tag = ? `, + projectID, repo, tag).Exec() + if err != nil { + return err + } + return nil +} + +// ListArtifacts list artifacts according to the query conditions +func ListArtifacts(query *models.ArtifactQuery) ([]*models.Artifact, error) { + qs := getArtifactQuerySetter(query) + if query.Size > 0 { + qs = qs.Limit(query.Size) + if query.Page > 0 { + qs = qs.Offset((query.Page - 1) * query.Size) + } + } + afs := []*models.Artifact{} + _, err := qs.All(&afs) + return afs, err +} + +// GetArtifact by repository and tag +func GetArtifact(repo, tag string) (*models.Artifact, error) { + artifact := &models.Artifact{} + err := GetOrmer().QueryTable(&models.Artifact{}). + Filter("Repo", repo). + Filter("Tag", tag).One(artifact) + if err != nil { + if err == orm.ErrNoRows { + return nil, nil + } + return nil, err + } + return artifact, nil +} + +// GetTotalOfArtifacts returns total of artifacts +func GetTotalOfArtifacts(query ...*models.ArtifactQuery) (int64, error) { + var qs orm.QuerySeter + if len(query) > 0 { + qs = getArtifactQuerySetter(query[0]) + } else { + qs = GetOrmer().QueryTable(&models.Artifact{}) + } + + return qs.Count() +} + +func getArtifactQuerySetter(query *models.ArtifactQuery) orm.QuerySeter { + qs := GetOrmer().QueryTable(&models.Artifact{}) + if query.PID != 0 { + qs = qs.Filter("PID", query.PID) + } + if len(query.Repo) > 0 { + qs = qs.Filter("Repo", query.Repo) + } + if len(query.Tag) > 0 { + qs = qs.Filter("Tag", query.Tag) + } + if len(query.Digest) > 0 { + qs = qs.Filter("Digest", query.Digest) + } + return qs +} diff --git a/src/common/dao/artifact_blob.go b/src/common/dao/artifact_blob.go new file mode 100644 index 000000000..f1bcabb56 --- /dev/null +++ b/src/common/dao/artifact_blob.go @@ -0,0 +1,110 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "fmt" + "github.com/astaxie/beego/orm" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/pkg/errors" + "strconv" + "strings" + "time" +) + +// AddArtifactNBlob ... +func AddArtifactNBlob(afnb *models.ArtifactAndBlob) (int64, error) { + now := time.Now() + afnb.CreationTime = now + id, err := GetOrmer().Insert(afnb) + if err != nil { + if strings.Contains(err.Error(), "duplicate key value violates unique constraint") { + return 0, ErrDupRows + } + return 0, err + } + return id, nil +} + +// AddArtifactNBlobs ... +func AddArtifactNBlobs(afnbs []*models.ArtifactAndBlob) error { + o := orm.NewOrm() + err := o.Begin() + if err != nil { + return err + } + + var errInsertMultiple error + total := len(afnbs) + successNums, err := o.InsertMulti(total, afnbs) + if err != nil { + errInsertMultiple = err + if strings.Contains(err.Error(), "duplicate key value violates unique constraint") { + errInsertMultiple = errors.Wrap(errInsertMultiple, ErrDupRows.Error()) + } + err := o.Rollback() + if err != nil { + log.Errorf("fail to rollback when to insert multiple artifact and blobs, %v", err) + errInsertMultiple = errors.Wrap(errInsertMultiple, err.Error()) + } + return errInsertMultiple + } + + // part of them cannot be inserted successfully. + if successNums != int64(total) { + errInsertMultiple = errors.New("Not all of artifact and blobs are inserted successfully") + err := o.Rollback() + if err != nil { + log.Errorf("fail to rollback when to insert multiple artifact and blobs, %v", err) + errInsertMultiple = errors.Wrap(errInsertMultiple, err.Error()) + } + return errInsertMultiple + } + + err = o.Commit() + if err != nil { + log.Errorf("fail to commit when to insert multiple artifact and blobs, %v", err) + return fmt.Errorf("fail to commit when to insert multiple artifact and blobs, %v", err) + } + + return nil +} + +// DeleteArtifactAndBlobByDigest ... +func DeleteArtifactAndBlobByDigest(digest string) error { + _, err := GetOrmer().Raw(`delete from artifact_blob where digest_af = ? `, digest).Exec() + if err != nil { + return err + } + return nil +} + +// CountSizeOfArtifact ... +func CountSizeOfArtifact(digest string) (int64, error) { + var res []orm.Params + num, err := GetOrmer().Raw(`SELECT sum(bb.size) FROM artifact_blob afnb LEFT JOIN blob bb ON afnb.digest_blob = bb.digest WHERE afnb.digest_af = ? `, digest).Values(&res) + if err != nil { + return -1, err + } + if num > 0 { + size, err := strconv.ParseInt(res[0]["sum"].(string), 0, 64) + if err != nil { + return -1, err + } + return size, nil + } + return -1, err +} diff --git a/src/common/dao/artifact_blob_test.go b/src/common/dao/artifact_blob_test.go new file mode 100644 index 000000000..3da44748b --- /dev/null +++ b/src/common/dao/artifact_blob_test.go @@ -0,0 +1,131 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "testing" + + "github.com/goharbor/harbor/src/common/models" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAddArtifactNBlob(t *testing.T) { + afnb := &models.ArtifactAndBlob{ + DigestAF: "vvvv", + DigestBlob: "aaaa", + } + + // add + id, err := AddArtifactNBlob(afnb) + require.Nil(t, err) + afnb.ID = id + assert.Equal(t, id, int64(1)) +} + +func TestAddArtifactNBlobs(t *testing.T) { + afnb1 := &models.ArtifactAndBlob{ + DigestAF: "zzzz", + DigestBlob: "zzza", + } + afnb2 := &models.ArtifactAndBlob{ + DigestAF: "zzzz", + DigestBlob: "zzzb", + } + afnb3 := &models.ArtifactAndBlob{ + DigestAF: "zzzz", + DigestBlob: "zzzc", + } + + var afnbs []*models.ArtifactAndBlob + afnbs = append(afnbs, afnb1) + afnbs = append(afnbs, afnb2) + afnbs = append(afnbs, afnb3) + + // add + err := AddArtifactNBlobs(afnbs) + require.Nil(t, err) +} + +func TestDeleteArtifactAndBlobByDigest(t *testing.T) { + afnb := &models.ArtifactAndBlob{ + DigestAF: "vvvv", + DigestBlob: "vvva", + } + + // add + _, err := AddArtifactNBlob(afnb) + require.Nil(t, err) + + // delete + err = DeleteArtifactAndBlobByDigest(afnb.DigestAF) + require.Nil(t, err) +} + +func TestCountSizeOfArtifact(t *testing.T) { + + afnb1 := &models.ArtifactAndBlob{ + DigestAF: "xxxx", + DigestBlob: "aaaa", + } + afnb2 := &models.ArtifactAndBlob{ + DigestAF: "xxxx", + DigestBlob: "aaab", + } + afnb3 := &models.ArtifactAndBlob{ + DigestAF: "xxxx", + DigestBlob: "aaac", + } + + var afnbs []*models.ArtifactAndBlob + afnbs = append(afnbs, afnb1) + afnbs = append(afnbs, afnb2) + afnbs = append(afnbs, afnb3) + + err := AddArtifactNBlobs(afnbs) + require.Nil(t, err) + + blob1 := &models.Blob{ + Digest: "aaaa", + ContentType: "v2.blob", + Size: 100, + } + + _, err = AddBlob(blob1) + require.Nil(t, err) + + blob2 := &models.Blob{ + Digest: "aaab", + ContentType: "v2.blob", + Size: 200, + } + + _, err = AddBlob(blob2) + require.Nil(t, err) + + blob3 := &models.Blob{ + Digest: "aaac", + ContentType: "v2.blob", + Size: 300, + } + + _, err = AddBlob(blob3) + require.Nil(t, err) + + imageSize, err := CountSizeOfArtifact("xxxx") + require.Nil(t, err) + require.Equal(t, imageSize, int64(600)) +} diff --git a/src/common/dao/artifact_test.go b/src/common/dao/artifact_test.go new file mode 100644 index 000000000..a7889375c --- /dev/null +++ b/src/common/dao/artifact_test.go @@ -0,0 +1,184 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "testing" + "time" + + "github.com/goharbor/harbor/src/common/models" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAddArtifact(t *testing.T) { + af := &models.Artifact{ + PID: 1, + Repo: "hello-world", + Tag: "latest", + Digest: "1234abcd", + Kind: "image", + } + + // add + id, err := AddArtifact(af) + require.Nil(t, err) + af.ID = id + assert.Equal(t, id, int64(1)) + +} + +func TestGetArtifact(t *testing.T) { + repo := "hello-world" + tag := "latest" + artifact, err := GetArtifact(repo, tag) + require.Nil(t, err) + require.NotNil(t, artifact) + assert.Equal(t, repo, artifact.Repo) + assert.Equal(t, tag, artifact.Tag) +} + +func TestUpdateArtifactDigest(t *testing.T) { + af := &models.Artifact{ + PID: 1, + Repo: "hello-world", + Tag: "v2.0", + Digest: "4321abcd", + Kind: "image", + } + + // add + _, err := AddArtifact(af) + require.Nil(t, err) + + af.Digest = "update_4321abcd" + require.Nil(t, UpdateArtifactDigest(af)) + assert.Equal(t, af.Digest, "update_4321abcd") +} + +func TestUpdateArtifactPullTime(t *testing.T) { + timeNow := time.Now() + af := &models.Artifact{ + PID: 1, + Repo: "TestUpdateArtifactPullTime", + Tag: "v1.0", + Digest: "4321abcd", + Kind: "image", + PullTime: timeNow, + } + + // add + _, err := AddArtifact(af) + require.Nil(t, err) + + time.Sleep(time.Second * 1) + + af.PullTime = time.Now() + require.Nil(t, UpdateArtifactPullTime(af)) + assert.NotEqual(t, timeNow, af.PullTime) +} + +func TestDeleteArtifact(t *testing.T) { + af := &models.Artifact{ + PID: 1, + Repo: "hello-world", + Tag: "v1.0", + Digest: "1234abcd", + Kind: "image", + } + // add + id, err := AddArtifact(af) + require.Nil(t, err) + + // delete + err = DeleteArtifact(id) + require.Nil(t, err) +} + +func TestDeleteArtifactByDigest(t *testing.T) { + af := &models.Artifact{ + PID: 1, + Repo: "hello-world", + Tag: "v1.1", + Digest: "TestDeleteArtifactByDigest", + Kind: "image", + } + // add + _, err := AddArtifact(af) + require.Nil(t, err) + + // delete + err = DeleteArtifactByDigest(af.PID, af.Repo, af.Digest) + require.Nil(t, err) +} + +func TestDeleteArtifactByTag(t *testing.T) { + af := &models.Artifact{ + PID: 1, + Repo: "hello-world", + Tag: "v1.2", + Digest: "TestDeleteArtifactByTag", + Kind: "image", + } + // add + _, err := AddArtifact(af) + require.Nil(t, err) + + // delete + err = DeleteArtifactByTag(1, "hello-world", "v1.2") + require.Nil(t, err) +} + +func TestListArtifacts(t *testing.T) { + af := &models.Artifact{ + PID: 1, + Repo: "hello-world", + Tag: "v3.0", + Digest: "TestListArtifacts", + Kind: "image", + } + // add + _, err := AddArtifact(af) + require.Nil(t, err) + + afs, err := ListArtifacts(&models.ArtifactQuery{ + PID: 1, + Repo: "hello-world", + Tag: "v3.0", + }) + require.Nil(t, err) + assert.Equal(t, 1, len(afs)) +} + +func TestGetTotalOfArtifacts(t *testing.T) { + af := &models.Artifact{ + PID: 2, + Repo: "hello-world", + Tag: "v3.0", + Digest: "TestGetTotalOfArtifacts", + Kind: "image", + } + // add + _, err := AddArtifact(af) + require.Nil(t, err) + + total, err := GetTotalOfArtifacts(&models.ArtifactQuery{ + PID: 2, + Repo: "hello-world", + Tag: "v3.0", + }) + require.Nil(t, err) + assert.Equal(t, int64(1), total) +} diff --git a/src/common/dao/base.go b/src/common/dao/base.go index 3e04867da..43ded29ef 100644 --- a/src/common/dao/base.go +++ b/src/common/dao/base.go @@ -121,12 +121,16 @@ func getDatabase(database *models.Database) (db Database, err error) { switch database.Type { case "", "postgresql": - db = NewPGSQL(database.PostGreSQL.Host, + db = NewPGSQL( + database.PostGreSQL.Host, strconv.Itoa(database.PostGreSQL.Port), database.PostGreSQL.Username, database.PostGreSQL.Password, database.PostGreSQL.Database, - database.PostGreSQL.SSLMode) + database.PostGreSQL.SSLMode, + database.PostGreSQL.MaxIdleConns, + database.PostGreSQL.MaxOpenConns, + ) default: err = fmt.Errorf("invalid database: %s", database.Type) } @@ -139,6 +143,8 @@ var once sync.Once // GetOrmer :set ormer singleton func GetOrmer() orm.Ormer { once.Do(func() { + // override the default value(1000) to return all records when setting no limit + orm.DefaultRowsLimit = -1 globalOrm = orm.NewOrm() }) return globalOrm @@ -167,11 +173,13 @@ func ClearTable(table string) error { return err } -func paginateForRawSQL(sql string, limit, offset int64) string { +// PaginateForRawSQL ... +func PaginateForRawSQL(sql string, limit, offset int64) string { return fmt.Sprintf("%s limit %d offset %d", sql, limit, offset) } -func paginateForQuerySetter(qs orm.QuerySeter, page, size int64) orm.QuerySeter { +// PaginateForQuerySetter ... +func PaginateForQuerySetter(qs orm.QuerySeter, page, size int64) orm.QuerySeter { if size > 0 { qs = qs.Limit(size) if page > 0 { @@ -183,7 +191,34 @@ func paginateForQuerySetter(qs orm.QuerySeter, page, size int64) orm.QuerySeter // Escape .. func Escape(str string) string { + str = strings.Replace(str, `\`, `\\`, -1) str = strings.Replace(str, `%`, `\%`, -1) str = strings.Replace(str, `_`, `\_`, -1) return str } + +// WithTransaction helper for transaction +func WithTransaction(handler func(o orm.Ormer) error) error { + o := orm.NewOrm() + + if err := o.Begin(); err != nil { + log.Errorf("begin transaction failed: %v", err) + return err + } + + if err := handler(o); err != nil { + if e := o.Rollback(); e != nil { + log.Errorf("rollback transaction failed: %v", e) + return e + } + + return err + } + + if err := o.Commit(); err != nil { + log.Errorf("commit transaction failed: %v", err) + return err + } + + return nil +} diff --git a/src/common/dao/blob.go b/src/common/dao/blob.go new file mode 100644 index 000000000..ddcca42e1 --- /dev/null +++ b/src/common/dao/blob.go @@ -0,0 +1,136 @@ +package dao + +import ( + "fmt" + "strings" + "time" + + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils/log" +) + +// AddBlob ... +func AddBlob(blob *models.Blob) (int64, error) { + now := time.Now() + blob.CreationTime = now + id, err := GetOrmer().Insert(blob) + if err != nil { + if strings.Contains(err.Error(), "duplicate key value violates unique constraint") { + return 0, ErrDupRows + } + return 0, err + } + return id, nil +} + +// GetOrCreateBlob returns blob by digest, create it if not exists +func GetOrCreateBlob(blob *models.Blob) (bool, *models.Blob, error) { + blob.CreationTime = time.Now() + + created, id, err := GetOrmer().ReadOrCreate(blob, "digest") + if err != nil { + return false, nil, err + } + + blob.ID = id + + return created, blob, nil +} + +// GetBlob ... +func GetBlob(digest string) (*models.Blob, error) { + o := GetOrmer() + qs := o.QueryTable(&models.Blob{}) + qs = qs.Filter("Digest", digest) + b := []*models.Blob{} + _, err := qs.All(&b) + if err != nil { + return nil, fmt.Errorf("failed to get blob for digest %s, error: %v", digest, err) + } + if len(b) == 0 { + log.Infof("No blob found for digest %s, returning empty.", digest) + return &models.Blob{}, nil + } else if len(b) > 1 { + log.Infof("Multiple blob found for digest %s", digest) + return &models.Blob{}, fmt.Errorf("Multiple blob found for digest %s", digest) + } + return b[0], nil +} + +// DeleteBlob ... +func DeleteBlob(digest string) error { + o := GetOrmer() + _, err := o.QueryTable("blob").Filter("digest", digest).Delete() + return err +} + +// GetBlobsByArtifact returns blobs of artifact +func GetBlobsByArtifact(artifactDigest string) ([]*models.Blob, error) { + sql := `SELECT * FROM blob WHERE digest IN (SELECT digest_blob FROM artifact_blob WHERE digest_af = ?)` + + var blobs []*models.Blob + if _, err := GetOrmer().Raw(sql, artifactDigest).QueryRows(&blobs); err != nil { + return nil, err + } + + return blobs, nil +} + +// GetExclusiveBlobs returns layers of repository:tag which are not shared with other repositories in the project +func GetExclusiveBlobs(projectID int64, repository, digest string) ([]*models.Blob, error) { + blobs, err := GetBlobsByArtifact(digest) + if err != nil { + return nil, err + } + + sql := fmt.Sprintf(` +SELECT + DISTINCT b.digest_blob AS digest +FROM + ( + SELECT + digest + FROM + artifact + WHERE + ( + project_id = ? + AND repo != ? + ) + OR ( + project_id = ? + AND digest != ? + ) + ) AS a + LEFT JOIN artifact_blob b ON a.digest = b.digest_af + AND b.digest_blob IN (%s)`, ParamPlaceholderForIn(len(blobs)-1)) + + params := []interface{}{projectID, repository, projectID, digest} + for _, blob := range blobs { + if blob.Digest != digest { + params = append(params, blob.Digest) + } + } + + var rows []struct { + Digest string + } + + if _, err := GetOrmer().Raw(sql, params...).QueryRows(&rows); err != nil { + return nil, err + } + + shared := map[string]bool{} + for _, row := range rows { + shared[row.Digest] = true + } + + var exclusive []*models.Blob + for _, blob := range blobs { + if blob.Digest != digest && !shared[blob.Digest] { + exclusive = append(exclusive, blob) + } + } + + return exclusive, nil +} diff --git a/src/common/dao/blob_test.go b/src/common/dao/blob_test.go new file mode 100644 index 000000000..26dc5e492 --- /dev/null +++ b/src/common/dao/blob_test.go @@ -0,0 +1,222 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "strings" + "testing" + + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils" + "github.com/opencontainers/go-digest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +func TestAddBlob(t *testing.T) { + blob := &models.Blob{ + Digest: "1234abcd", + ContentType: "v2.blob", + Size: 1523, + } + + // add + _, err := AddBlob(blob) + require.Nil(t, err) +} + +func TestGetBlob(t *testing.T) { + blob := &models.Blob{ + Digest: "12345abcde", + ContentType: "v2.blob", + Size: 453, + } + + // add + id, err := AddBlob(blob) + require.Nil(t, err) + blob.ID = id + + blob2, err := GetBlob("12345abcde") + require.Nil(t, err) + assert.Equal(t, blob.Digest, blob2.Digest) + +} + +func TestDeleteBlob(t *testing.T) { + blob := &models.Blob{ + Digest: "123456abcdef", + ContentType: "v2.blob", + Size: 4543, + } + id, err := AddBlob(blob) + require.Nil(t, err) + blob.ID = id + err = DeleteBlob(blob.Digest) + require.Nil(t, err) +} + +func prepareImage(projectID int64, projectName, name, tag string, layerDigests ...string) (string, error) { + digest := digest.FromString(strings.Join(layerDigests, ":")).String() + artifact := &models.Artifact{PID: projectID, Repo: projectName + "/" + name, Digest: digest, Tag: tag} + if _, err := AddArtifact(artifact); err != nil { + return "", err + } + + var afnbs []*models.ArtifactAndBlob + + blobDigests := append([]string{digest}, layerDigests...) + for _, blobDigest := range blobDigests { + blob := &models.Blob{Digest: blobDigest, Size: 1} + if _, _, err := GetOrCreateBlob(blob); err != nil { + return "", err + } + + afnbs = append(afnbs, &models.ArtifactAndBlob{DigestAF: digest, DigestBlob: blobDigest}) + } + + total, err := GetTotalOfArtifacts(&models.ArtifactQuery{Digest: digest}) + if err != nil { + return "", err + } + + if total == 1 { + if err := AddArtifactNBlobs(afnbs); err != nil { + return "", err + } + } + + return digest, nil +} + +func withProject(f func(int64, string)) { + projectName := utils.GenerateRandomString() + + projectID, err := AddProject(models.Project{ + Name: projectName, + OwnerID: 1, + }) + if err != nil { + panic(err) + } + + defer func() { + DeleteProject(projectID) + }() + + f(projectID, projectName) +} + +type GetExclusiveBlobsSuite struct { + suite.Suite +} + +func (suite *GetExclusiveBlobsSuite) mustPrepareImage(projectID int64, projectName, name, tag string, layerDigests ...string) string { + digest, err := prepareImage(projectID, projectName, name, tag, layerDigests...) + suite.Nil(err) + + return digest +} + +func (suite *GetExclusiveBlobsSuite) TestInSameRepository() { + withProject(func(projectID int64, projectName string) { + digest1 := digest.FromString(utils.GenerateRandomString()).String() + digest2 := digest.FromString(utils.GenerateRandomString()).String() + digest3 := digest.FromString(utils.GenerateRandomString()).String() + + manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2) + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) { + suite.Len(blobs, 2) + } + + manifest2 := suite.mustPrepareImage(projectID, projectName, "mysql", "8.0", digest1, digest2) + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) { + suite.Len(blobs, 2) + } + + manifest3 := suite.mustPrepareImage(projectID, projectName, "mysql", "dev", digest1, digest2, digest3) + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) { + suite.Len(blobs, 0) + } + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) { + suite.Len(blobs, 0) + } + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest3); suite.Nil(err) { + suite.Len(blobs, 1) + suite.Equal(digest3, blobs[0].Digest) + } + }) +} + +func (suite *GetExclusiveBlobsSuite) TestInDifferentRepositories() { + withProject(func(projectID int64, projectName string) { + digest1 := digest.FromString(utils.GenerateRandomString()).String() + digest2 := digest.FromString(utils.GenerateRandomString()).String() + digest3 := digest.FromString(utils.GenerateRandomString()).String() + + manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2) + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) { + suite.Len(blobs, 2) + } + + manifest2 := suite.mustPrepareImage(projectID, projectName, "mariadb", "latest", digest1, digest2) + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) { + suite.Len(blobs, 0) + } + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mariadb", manifest2); suite.Nil(err) { + suite.Len(blobs, 0) + } + + manifest3 := suite.mustPrepareImage(projectID, projectName, "mysql", "dev", digest1, digest2, digest3) + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) { + suite.Len(blobs, 0) + } + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) { + suite.Len(blobs, 0) + } + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest3); suite.Nil(err) { + suite.Len(blobs, 1) + suite.Equal(digest3, blobs[0].Digest) + } + }) +} + +func (suite *GetExclusiveBlobsSuite) TestInDifferentProjects() { + withProject(func(projectID int64, projectName string) { + digest1 := digest.FromString(utils.GenerateRandomString()).String() + digest2 := digest.FromString(utils.GenerateRandomString()).String() + + manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2) + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) { + suite.Len(blobs, 2) + } + + withProject(func(id int64, name string) { + manifest2 := suite.mustPrepareImage(id, name, "mysql", "latest", digest1, digest2) + if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) { + suite.Len(blobs, 2) + } + if blobs, err := GetExclusiveBlobs(id, name+"/mysql", manifest2); suite.Nil(err) { + suite.Len(blobs, 2) + } + }) + + }) +} + +func TestRunGetExclusiveBlobsSuite(t *testing.T) { + suite.Run(t, new(GetExclusiveBlobsSuite)) +} diff --git a/src/common/dao/config.go b/src/common/dao/config.go index 65ec6e195..eea49cb30 100644 --- a/src/common/dao/config.go +++ b/src/common/dao/config.go @@ -54,7 +54,7 @@ func GetConfigEntries() ([]*models.ConfigEntry, error) { func SaveConfigEntries(entries []models.ConfigEntry) error { o := GetOrmer() for _, entry := range entries { - if entry.Key == common.LdapGroupAdminDn { + if entry.Key == common.LDAPGroupAdminDn { entry.Value = utils.TrimLower(entry.Value) } tempEntry := models.ConfigEntry{} diff --git a/src/common/dao/cve_whitelist.go b/src/common/dao/cve_whitelist.go new file mode 100644 index 000000000..645a1c076 --- /dev/null +++ b/src/common/dao/cve_whitelist.go @@ -0,0 +1,64 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "encoding/json" + "fmt" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils/log" +) + +// CreateCVEWhitelist creates the CVE whitelist +func CreateCVEWhitelist(l models.CVEWhitelist) (int64, error) { + o := GetOrmer() + itemsBytes, _ := json.Marshal(l.Items) + l.ItemsText = string(itemsBytes) + return o.Insert(&l) +} + +// UpdateCVEWhitelist Updates the vulnerability white list to DB +func UpdateCVEWhitelist(l models.CVEWhitelist) (int64, error) { + o := GetOrmer() + itemsBytes, _ := json.Marshal(l.Items) + l.ItemsText = string(itemsBytes) + id, err := o.InsertOrUpdate(&l, "project_id") + return id, err +} + +// GetCVEWhitelist Gets the CVE whitelist of the project based on the project ID in parameter +func GetCVEWhitelist(pid int64) (*models.CVEWhitelist, error) { + o := GetOrmer() + qs := o.QueryTable(&models.CVEWhitelist{}) + qs = qs.Filter("ProjectID", pid) + r := []*models.CVEWhitelist{} + _, err := qs.All(&r) + if err != nil { + return nil, fmt.Errorf("failed to get CVE whitelist for project %d, error: %v", pid, err) + } + if len(r) == 0 { + return nil, nil + } else if len(r) > 1 { + log.Infof("Multiple CVE whitelists found for project %d, length: %d, returning first element.", pid, len(r)) + } + items := []models.CVEWhitelistItem{} + err = json.Unmarshal([]byte(r[0].ItemsText), &items) + if err != nil { + log.Errorf("Failed to decode item list, err: %v, text: %s", err, r[0].ItemsText) + return nil, err + } + r[0].Items = items + return r[0], nil +} diff --git a/src/common/dao/cve_whitelist_test.go b/src/common/dao/cve_whitelist_test.go new file mode 100644 index 000000000..099409de5 --- /dev/null +++ b/src/common/dao/cve_whitelist_test.go @@ -0,0 +1,55 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "github.com/goharbor/harbor/src/common/models" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "testing" +) + +func TestUpdateAndGetCVEWhitelist(t *testing.T) { + require.Nil(t, ClearTable("cve_whitelist")) + l2, err := GetCVEWhitelist(5) + assert.Nil(t, err) + assert.Nil(t, l2) + + longList := []models.CVEWhitelistItem{} + for i := 0; i < 50; i++ { + longList = append(longList, models.CVEWhitelistItem{CVEID: "CVE-1999-0067"}) + } + + e := int64(1573254000) + in1 := models.CVEWhitelist{ProjectID: 3, Items: longList, ExpiresAt: &e} + _, err = UpdateCVEWhitelist(in1) + require.Nil(t, err) + // assert.Equal(t, int64(1), n) + out1, err := GetCVEWhitelist(3) + require.Nil(t, err) + assert.Equal(t, int64(3), out1.ProjectID) + assert.Equal(t, longList, out1.Items) + assert.Equal(t, e, *out1.ExpiresAt) + + sysCVEs := []models.CVEWhitelistItem{ + {CVEID: "CVE-2019-10164"}, + {CVEID: "CVE-2017-12345"}, + } + in3 := models.CVEWhitelist{Items: sysCVEs} + _, err = UpdateCVEWhitelist(in3) + require.Nil(t, err) + + require.Nil(t, ClearTable("cve_whitelist")) +} diff --git a/src/common/dao/dao_test.go b/src/common/dao/dao_test.go index 646634226..bc070245a 100644 --- a/src/common/dao/dao_test.go +++ b/src/common/dao/dao_test.go @@ -47,8 +47,8 @@ func cleanByUser(username string) { o := GetOrmer() o.Begin() - err = execUpdate(o, `delete - from project_member + err = execUpdate(o, `delete + from project_member where entity_id = ( select user_id from harbor_user @@ -59,7 +59,7 @@ func cleanByUser(username string) { log.Error(err) } - err = execUpdate(o, `delete + err = execUpdate(o, `delete from project_member where project_id = ( select project_id @@ -71,8 +71,8 @@ func cleanByUser(username string) { log.Error(err) } - err = execUpdate(o, `delete - from access_log + err = execUpdate(o, `delete + from access_log where username = ? `, username) if err != nil { @@ -80,7 +80,7 @@ func cleanByUser(username string) { log.Error(err) } - err = execUpdate(o, `delete + err = execUpdate(o, `delete from access_log where project_id = ( select project_id @@ -302,9 +302,6 @@ func TestListUsers(t *testing.T) { if err != nil { t.Errorf("Error occurred in ListUsers: %v", err) } - if len(users) != 1 { - t.Errorf("Expect one user in list, but the acutal length is %d, the list: %+v", len(users), users) - } users2, err := ListUsers(&models.UserQuery{Username: username}) if len(users2) != 1 { t.Errorf("Expect one user in list, but the acutal length is %d, the list: %+v", len(users), users) @@ -1035,3 +1032,53 @@ func TestIsDupRecError(t *testing.T) { assert.True(t, isDupRecErr(fmt.Errorf("pq: duplicate key value violates unique constraint \"properties_k_key\""))) assert.False(t, isDupRecErr(fmt.Errorf("other error"))) } + +func TestWithTransaction(t *testing.T) { + reference := "transaction" + + quota := models.Quota{ + Reference: reference, + ReferenceID: "1", + Hard: "{}", + } + + failed := func(o orm.Ormer) error { + o.Insert("a) + + return fmt.Errorf("failed") + } + + var quotaID int64 + success := func(o orm.Ormer) error { + id, err := o.Insert("a) + if err != nil { + return err + } + + quotaID = id + return nil + } + + assert := assert.New(t) + + if assert.Error(WithTransaction(failed)) { + var quota models.Quota + quota.Reference = reference + quota.ReferenceID = "1" + err := GetOrmer().Read("a, "reference", "reference_id") + assert.Error(err) + assert.False(quota.ID != 0) + } + + if assert.Nil(WithTransaction(success)) { + var quota models.Quota + quota.Reference = reference + quota.ReferenceID = "1" + err := GetOrmer().Read("a, "reference", "reference_id") + assert.Nil(err) + assert.True(quota.ID != 0) + assert.Equal(quotaID, quota.ID) + + GetOrmer().Delete(&models.Quota{ID: quotaID}, "id") + } +} diff --git a/src/common/dao/group/usergroup.go b/src/common/dao/group/usergroup.go index e0aa1d226..a6eedfec1 100644 --- a/src/common/dao/group/usergroup.go +++ b/src/common/dao/group/usergroup.go @@ -18,23 +18,35 @@ import ( "strings" "time" - "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common/utils" + "fmt" + + "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common/dao" "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/common/utils/log" + "github.com/pkg/errors" ) +// ErrGroupNameDup ... +var ErrGroupNameDup = errors.New("duplicated user group name") + // AddUserGroup - Add User Group func AddUserGroup(userGroup models.UserGroup) (int, error) { + userGroupList, err := QueryUserGroup(models.UserGroup{GroupName: userGroup.GroupName, GroupType: common.HTTPGroupType}) + if err != nil { + return 0, ErrGroupNameDup + } + if len(userGroupList) > 0 { + return 0, ErrGroupNameDup + } o := dao.GetOrmer() - sql := "insert into user_group (group_name, group_type, ldap_group_dn, creation_time, update_time) values (?, ?, ?, ?, ?) RETURNING id" var id int now := time.Now() - err := o.Raw(sql, userGroup.GroupName, userGroup.GroupType, utils.TrimLower(userGroup.LdapGroupDN), now, now).QueryRow(&id) + err = o.Raw(sql, userGroup.GroupName, userGroup.GroupType, utils.TrimLower(userGroup.LdapGroupDN), now, now).QueryRow(&id) if err != nil { return 0, err } @@ -47,10 +59,10 @@ func QueryUserGroup(query models.UserGroup) ([]*models.UserGroup, error) { o := dao.GetOrmer() sql := `select id, group_name, group_type, ldap_group_dn from user_group where 1=1 ` sqlParam := make([]interface{}, 1) - groups := []*models.UserGroup{} + var groups []*models.UserGroup if len(query.GroupName) != 0 { - sql += ` and group_name like ? ` - sqlParam = append(sqlParam, `%`+dao.Escape(query.GroupName)+`%`) + sql += ` and group_name = ? ` + sqlParam = append(sqlParam, query.GroupName) } if query.GroupType != 0 { @@ -86,6 +98,27 @@ func GetUserGroup(id int) (*models.UserGroup, error) { return nil, nil } +// GetGroupIDByGroupName - Return the group ID by given group name. it is possible less group ID than the given group name if some group doesn't exist. +func GetGroupIDByGroupName(groupName []string, groupType int) ([]int, error) { + var retGroupID []int + var conditions []string + if len(groupName) == 0 { + return retGroupID, nil + } + for _, gName := range groupName { + con := "'" + gName + "'" + conditions = append(conditions, con) + } + sql := fmt.Sprintf("select id from user_group where group_name in ( %s ) and group_type = %v", strings.Join(conditions, ","), groupType) + o := dao.GetOrmer() + cnt, err := o.Raw(sql).QueryRows(&retGroupID) + if err != nil { + return retGroupID, err + } + log.Debugf("Found rows %v", cnt) + return retGroupID, nil +} + // DeleteUserGroup ... func DeleteUserGroup(id int) error { userGroup := models.UserGroup{ID: id} @@ -111,11 +144,7 @@ func UpdateUserGroupName(id int, groupName string) error { return err } -// OnBoardUserGroup will check if a usergroup exists in usergroup table, if not insert the usergroup and -// put the id in the pointer of usergroup model, if it does exist, return the usergroup's profile. -// This is used for ldap and uaa authentication, such the usergroup can have an ID in Harbor. -// the keyAttribute and combinedKeyAttribute are key columns used to check duplicate usergroup in harbor -func OnBoardUserGroup(g *models.UserGroup, keyAttribute string, combinedKeyAttributes ...string) error { +func onBoardCommonUserGroup(g *models.UserGroup, keyAttribute string, combinedKeyAttributes ...string) error { g.LdapGroupDN = utils.TrimLower(g.LdapGroupDN) o := dao.GetOrmer() @@ -140,19 +169,11 @@ func OnBoardUserGroup(g *models.UserGroup, keyAttribute string, combinedKeyAttri return nil } -// GetGroupDNQueryCondition get the part of IN ('XXX', 'XXX') condition -func GetGroupDNQueryCondition(userGroupList []*models.UserGroup) string { - result := make([]string, 0) - count := 0 - for _, userGroup := range userGroupList { - if userGroup.GroupType == common.LdapGroupType { - result = append(result, "'"+userGroup.LdapGroupDN+"'") - count++ - } +// OnBoardUserGroup will check if a usergroup exists in usergroup table, if not insert the usergroup and +// put the id in the pointer of usergroup model, if it does exist, return the usergroup's profile. +func OnBoardUserGroup(g *models.UserGroup) error { + if g.GroupType == common.LDAPGroupType { + return onBoardCommonUserGroup(g, "LdapGroupDN", "GroupType") } - // No LDAP Group found - if count == 0 { - return "" - } - return strings.Join(result, ",") + return onBoardCommonUserGroup(g, "GroupName", "GroupType") } diff --git a/src/common/dao/group/usergroup_test.go b/src/common/dao/group/usergroup_test.go index 91603e64d..2b7952ef9 100644 --- a/src/common/dao/group/usergroup_test.go +++ b/src/common/dao/group/usergroup_test.go @@ -17,6 +17,7 @@ package group import ( "fmt" "os" + "reflect" "testing" "github.com/goharbor/harbor/src/common" @@ -46,20 +47,30 @@ func TestMain(m *testing.M) { // Extract to test utils initSqls := []string{ "insert into harbor_user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')", + "insert into harbor_user (username, email, password, realname) values ('grouptestu09', 'grouptestu09@example.com', '123456', 'grouptestu09')", "insert into project (name, owner_id) values ('member_test_01', 1)", + `insert into project (name, owner_id) values ('group_project2', 1)`, + `insert into project (name, owner_id) values ('group_project_private', 1)`, "insert into user_group (group_name, group_type, ldap_group_dn) values ('test_group_01', 1, 'cn=harbor_users,ou=sample,ou=vmware,dc=harbor,dc=com')", + "insert into user_group (group_name, group_type, ldap_group_dn) values ('test_http_group', 2, '')", + "insert into user_group (group_name, group_type, ldap_group_dn) values ('test_myhttp_group', 2, '')", "update project set owner_id = (select user_id from harbor_user where username = 'member_test_01') where name = 'member_test_01'", "insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from harbor_user where username = 'member_test_01'), 'u', 1)", "insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_group_01'), 'g', 1)", + "insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_http_group'), 'g', 4)", + "insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_myhttp_group'), 'g', 4)", } clearSqls := []string{ "delete from project where name='member_test_01'", - "delete from harbor_user where username='member_test_01' or username='pm_sample'", + "delete from project where name='group_project2'", + "delete from project where name='group_project_private'", + "delete from harbor_user where username='member_test_01' or username='pm_sample' or username='grouptestu09'", "delete from user_group", "delete from project_member", } - dao.PrepareTestData(clearSqls, initSqls) + dao.ExecuteBatchSQL(initSqls) + defer dao.ExecuteBatchSQL(clearSqls) result = m.Run() @@ -80,7 +91,7 @@ func TestAddUserGroup(t *testing.T) { want int wantErr bool }{ - {"Insert an ldap user group", args{userGroup: models.UserGroup{GroupName: "sample_group", GroupType: common.LdapGroupType, LdapGroupDN: "sample_ldap_dn_string"}}, 0, false}, + {"Insert an ldap user group", args{userGroup: models.UserGroup{GroupName: "sample_group", GroupType: common.LDAPGroupType, LdapGroupDN: "sample_ldap_dn_string"}}, 0, false}, {"Insert other user group", args{userGroup: models.UserGroup{GroupName: "other_group", GroupType: 3, LdapGroupDN: "other information"}}, 0, false}, } for _, tt := range tests { @@ -108,8 +119,8 @@ func TestQueryUserGroup(t *testing.T) { wantErr bool }{ {"Query all user group", args{query: models.UserGroup{GroupName: "test_group_01"}}, 1, false}, - {"Query all ldap group", args{query: models.UserGroup{GroupType: common.LdapGroupType}}, 2, false}, - {"Query ldap group with group property", args{query: models.UserGroup{GroupType: common.LdapGroupType, LdapGroupDN: "CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com"}}, 1, false}, + {"Query all ldap group", args{query: models.UserGroup{GroupType: common.LDAPGroupType}}, 2, false}, + {"Query ldap group with group property", args{query: models.UserGroup{GroupType: common.LDAPGroupType, LdapGroupDN: "CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com"}}, 1, false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -126,7 +137,7 @@ func TestQueryUserGroup(t *testing.T) { } func TestGetUserGroup(t *testing.T) { - userGroup := models.UserGroup{GroupName: "insert_group", GroupType: common.LdapGroupType, LdapGroupDN: "ldap_dn_string"} + userGroup := models.UserGroup{GroupName: "insert_group", GroupType: common.LDAPGroupType, LdapGroupDN: "ldap_dn_string"} result, err := AddUserGroup(userGroup) if err != nil { t.Errorf("Error occurred when AddUserGroup: %v", err) @@ -175,7 +186,7 @@ func TestUpdateUserGroup(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - fmt.Printf("id=%v", createdUserGroupID) + fmt.Printf("id=%v\n", createdUserGroupID) if err := UpdateUserGroupName(tt.args.id, tt.args.groupName); (err != nil) != tt.wantErr { t.Errorf("UpdateUserGroup() error = %v, wantErr %v", err, tt.wantErr) userGroup, err := GetUserGroup(tt.args.id) @@ -231,65 +242,30 @@ func TestOnBoardUserGroup(t *testing.T) { args{g: &models.UserGroup{ GroupName: "harbor_example", LdapGroupDN: "cn=harbor_example,ou=groups,dc=example,dc=com", - GroupType: common.LdapGroupType}}, + GroupType: common.LDAPGroupType}}, false}, {"OnBoardUserGroup second time", args{g: &models.UserGroup{ GroupName: "harbor_example", LdapGroupDN: "cn=harbor_example,ou=groups,dc=example,dc=com", - GroupType: common.LdapGroupType}}, + GroupType: common.LDAPGroupType}}, + false}, + {"OnBoardUserGroup HTTP user group", + args{g: &models.UserGroup{ + GroupName: "test_myhttp_group", + GroupType: common.HTTPGroupType}}, false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if err := OnBoardUserGroup(tt.args.g, "LdapGroupDN", "GroupType"); (err != nil) != tt.wantErr { + if err := OnBoardUserGroup(tt.args.g); (err != nil) != tt.wantErr { t.Errorf("OnBoardUserGroup() error = %v, wantErr %v", err, tt.wantErr) } }) } } -func TestGetGroupDNQueryCondition(t *testing.T) { - userGroupList := []*models.UserGroup{ - { - GroupName: "sample1", - GroupType: 1, - LdapGroupDN: "cn=sample1_users,ou=groups,dc=example,dc=com", - }, - { - GroupName: "sample2", - GroupType: 1, - LdapGroupDN: "cn=sample2_users,ou=groups,dc=example,dc=com", - }, - { - GroupName: "sample3", - GroupType: 0, - LdapGroupDN: "cn=sample3_users,ou=groups,dc=example,dc=com", - }, - } - - groupQueryConditions := GetGroupDNQueryCondition(userGroupList) - expectedConditions := `'cn=sample1_users,ou=groups,dc=example,dc=com','cn=sample2_users,ou=groups,dc=example,dc=com'` - if groupQueryConditions != expectedConditions { - t.Errorf("Failed to GetGroupDNQueryCondition, expected %v, actual %v", expectedConditions, groupQueryConditions) - } - var userGroupList2 []*models.UserGroup - groupQueryCondition2 := GetGroupDNQueryCondition(userGroupList2) - if len(groupQueryCondition2) > 0 { - t.Errorf("Failed to GetGroupDNQueryCondition, expected %v, actual %v", "", groupQueryCondition2) - } - groupQueryCondition3 := GetGroupDNQueryCondition(nil) - if len(groupQueryCondition3) > 0 { - t.Errorf("Failed to GetGroupDNQueryCondition, expected %v, actual %v", "", groupQueryCondition3) - } -} func TestGetGroupProjects(t *testing.T) { - userID, err := dao.Register(models.User{ - Username: "grouptestu09", - Email: "grouptest09@example.com", - Password: "Harbor123456", - }) - defer dao.DeleteUser(int(userID)) projectID1, err := dao.AddProject(models.Project{ Name: "grouptest01", OwnerID: 1, @@ -307,7 +283,7 @@ func TestGetGroupProjects(t *testing.T) { } defer dao.DeleteProject(projectID2) groupID, err := AddUserGroup(models.UserGroup{ - GroupName: "test_group_01", + GroupName: "test_group_03", GroupType: 1, LdapGroupDN: "cn=harbor_users,ou=groups,dc=example,dc=com", }) @@ -322,8 +298,7 @@ func TestGetGroupProjects(t *testing.T) { }) defer project.DeleteProjectMemberByID(pmid) type args struct { - groupDNCondition string - query *models.ProjectQueryParam + query *models.ProjectQueryParam } member := &models.MemberQuery{ Name: "grouptestu09", @@ -335,19 +310,17 @@ func TestGetGroupProjects(t *testing.T) { wantErr bool }{ {"Query with group DN", - args{"'cn=harbor_users,ou=groups,dc=example,dc=com'", - &models.ProjectQueryParam{ - Member: member, - }}, + args{&models.ProjectQueryParam{ + Member: member, + }}, 1, false}, {"Query without group DN", - args{"", - &models.ProjectQueryParam{}}, + args{&models.ProjectQueryParam{}}, 1, false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := dao.GetGroupProjects(tt.args.groupDNCondition, tt.args.query) + got, err := dao.GetGroupProjects([]int{groupID}, tt.args.query) if (err != nil) != tt.wantErr { t.Errorf("GetGroupProjects() error = %v, wantErr %v", err, tt.wantErr) return @@ -377,7 +350,7 @@ func TestGetTotalGroupProjects(t *testing.T) { } defer dao.DeleteProject(projectID2) groupID, err := AddUserGroup(models.UserGroup{ - GroupName: "test_group_01", + GroupName: "test_group_05", GroupType: 1, LdapGroupDN: "cn=harbor_users,ou=groups,dc=example,dc=com", }) @@ -392,8 +365,7 @@ func TestGetTotalGroupProjects(t *testing.T) { }) defer project.DeleteProjectMemberByID(pmid) type args struct { - groupDNCondition string - query *models.ProjectQueryParam + query *models.ProjectQueryParam } tests := []struct { name string @@ -401,18 +373,16 @@ func TestGetTotalGroupProjects(t *testing.T) { wantSize int wantErr bool }{ - {"Query with group DN", - args{"'cn=harbor_users,ou=groups,dc=example,dc=com'", - &models.ProjectQueryParam{}}, + {"Query with group ID", + args{&models.ProjectQueryParam{}}, 1, false}, - {"Query without group DN", - args{"", - &models.ProjectQueryParam{}}, + {"Query without group ID", + args{&models.ProjectQueryParam{}}, 1, false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := dao.GetTotalGroupProjects(tt.args.groupDNCondition, tt.args.query) + got, err := dao.GetTotalGroupProjects([]int{groupID}, tt.args.query) if (err != nil) != tt.wantErr { t.Errorf("GetGroupProjects() error = %v, wantErr %v", err, tt.wantErr) return @@ -423,3 +393,90 @@ func TestGetTotalGroupProjects(t *testing.T) { }) } } +func TestGetRolesByLDAPGroup(t *testing.T) { + + userGroupList, err := QueryUserGroup(models.UserGroup{LdapGroupDN: "cn=harbor_users,ou=sample,ou=vmware,dc=harbor,dc=com", GroupType: 1}) + if err != nil || len(userGroupList) < 1 { + t.Errorf("failed to query user group, err %v", err) + } + gl2, err2 := GetGroupIDByGroupName([]string{"test_http_group", "test_myhttp_group"}, common.HTTPGroupType) + if err2 != nil || len(gl2) != 2 { + t.Errorf("failed to query http user group, err %v", err) + } + project, err := dao.GetProjectByName("member_test_01") + if err != nil { + t.Errorf("Error occurred when Get project by name: %v", err) + } + privateProject, err := dao.GetProjectByName("group_project_private") + if err != nil { + t.Errorf("Error occurred when Get project by name: %v", err) + } + + type args struct { + projectID int64 + groupIDs []int + } + tests := []struct { + name string + args args + wantSize int + wantErr bool + }{ + {"Check normal", args{projectID: project.ProjectID, groupIDs: []int{userGroupList[0].ID, gl2[0], gl2[1]}}, 2, false}, + {"Check non exist", args{projectID: privateProject.ProjectID, groupIDs: []int{9999}}, 0, false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := dao.GetRolesByGroupID(tt.args.projectID, tt.args.groupIDs) + if (err != nil) != tt.wantErr { + t.Errorf("TestGetRolesByLDAPGroup() error = %v, wantErr %v", err, tt.wantErr) + return + } + if len(got) != tt.wantSize { + t.Errorf("TestGetRolesByLDAPGroup() = %v, want %v", len(got), tt.wantSize) + } + }) + } +} + +func TestGetGroupIDByGroupName(t *testing.T) { + groupList, err := QueryUserGroup(models.UserGroup{GroupName: "test_http_group", GroupType: 2}) + if err != nil { + t.Error(err) + } + if len(groupList) < 0 { + t.Error(err) + } + groupList2, err := QueryUserGroup(models.UserGroup{GroupName: "test_myhttp_group", GroupType: 2}) + if err != nil { + t.Error(err) + } + if len(groupList2) < 0 { + t.Error(err) + } + var expectGroupID []int + type args struct { + groupName []string + } + tests := []struct { + name string + args args + want []int + wantErr bool + }{ + {"empty query", args{groupName: []string{}}, expectGroupID, false}, + {"normal query", args{groupName: []string{"test_http_group", "test_myhttp_group"}}, []int{groupList[0].ID, groupList2[0].ID}, false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := GetGroupIDByGroupName(tt.args.groupName, common.HTTPGroupType) + if (err != nil) != tt.wantErr { + t.Errorf("GetHTTPGroupIDByGroupName() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("GetHTTPGroupIDByGroupName() = %#v, want %#v", got, tt.want) + } + }) + } +} diff --git a/src/common/dao/notification/notification_job.go b/src/common/dao/notification/notification_job.go new file mode 100755 index 000000000..1bd8c5039 --- /dev/null +++ b/src/common/dao/notification/notification_job.go @@ -0,0 +1,122 @@ +package notification + +import ( + "fmt" + + "github.com/astaxie/beego/orm" + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/pkg/errors" +) + +// UpdateNotificationJob update notification job +func UpdateNotificationJob(job *models.NotificationJob, props ...string) (int64, error) { + if job == nil { + return 0, errors.New("nil job") + } + + if job.ID == 0 { + return 0, fmt.Errorf("notification job ID is empty") + } + + o := dao.GetOrmer() + return o.Update(job, props...) +} + +// AddNotificationJob insert new notification job to DB +func AddNotificationJob(job *models.NotificationJob) (int64, error) { + if job == nil { + return 0, errors.New("nil job") + } + o := dao.GetOrmer() + if len(job.Status) == 0 { + job.Status = models.JobPending + } + return o.Insert(job) +} + +// GetNotificationJob ... +func GetNotificationJob(id int64) (*models.NotificationJob, error) { + o := dao.GetOrmer() + j := &models.NotificationJob{ + ID: id, + } + err := o.Read(j) + if err == orm.ErrNoRows { + return nil, nil + } + return j, nil +} + +// GetTotalCountOfNotificationJobs ... +func GetTotalCountOfNotificationJobs(query ...*models.NotificationJobQuery) (int64, error) { + qs := notificationJobQueryConditions(query...) + return qs.Count() +} + +// GetNotificationJobs ... +func GetNotificationJobs(query ...*models.NotificationJobQuery) ([]*models.NotificationJob, error) { + var jobs []*models.NotificationJob + + qs := notificationJobQueryConditions(query...) + if len(query) > 0 && query[0] != nil { + qs = dao.PaginateForQuerySetter(qs, query[0].Page, query[0].Size) + } + + qs = qs.OrderBy("-UpdateTime") + + _, err := qs.All(&jobs) + return jobs, err +} + +// GetLastTriggerJobsGroupByEventType get notification jobs info of policy, including event type and last trigger time +func GetLastTriggerJobsGroupByEventType(policyID int64) ([]*models.NotificationJob, error) { + o := dao.GetOrmer() + // get jobs last triggered(created) group by event_type. postgres group by usage reference: + // https://stackoverflow.com/questions/13325583/postgresql-max-and-group-by + sql := `select distinct on (event_type) event_type, id, creation_time, status, notify_type, job_uuid, update_time, + creation_time, job_detail from notification_job where policy_id = ? + order by event_type, id desc, creation_time, status, notify_type, job_uuid, update_time, creation_time, job_detail` + + jobs := []*models.NotificationJob{} + _, err := o.Raw(sql, policyID).QueryRows(&jobs) + if err != nil { + log.Errorf("query last trigger info group by event type failed: %v", err) + return nil, err + } + + return jobs, nil +} + +// DeleteNotificationJob ... +func DeleteNotificationJob(id int64) error { + o := dao.GetOrmer() + _, err := o.Delete(&models.NotificationJob{ID: id}) + return err +} + +// DeleteAllNotificationJobsByPolicyID ... +func DeleteAllNotificationJobsByPolicyID(policyID int64) (int64, error) { + o := dao.GetOrmer() + return o.Delete(&models.NotificationJob{PolicyID: policyID}, "policy_id") +} + +func notificationJobQueryConditions(query ...*models.NotificationJobQuery) orm.QuerySeter { + qs := dao.GetOrmer().QueryTable(&models.NotificationJob{}) + if len(query) == 0 || query[0] == nil { + return qs + } + + q := query[0] + if q.PolicyID != 0 { + qs = qs.Filter("PolicyID", q.PolicyID) + } + if len(q.Statuses) > 0 { + qs = qs.Filter("Status__in", q.Statuses) + } + if len(q.EventTypes) > 0 { + qs = qs.Filter("EventType__in", q.EventTypes) + } + return qs +} diff --git a/src/common/dao/notification/notification_job_test.go b/src/common/dao/notification/notification_job_test.go new file mode 100644 index 000000000..0f7b97750 --- /dev/null +++ b/src/common/dao/notification/notification_job_test.go @@ -0,0 +1,263 @@ +package notification + +import ( + "testing" + + "github.com/goharbor/harbor/src/common/models" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + testJob1 = &models.NotificationJob{ + PolicyID: 1111, + EventType: "pushImage", + NotifyType: "http", + Status: "pending", + JobDetail: "{\"type\":\"pushImage\",\"occur_at\":1563536782,\"event_data\":{\"resources\":[{\"digest\":\"sha256:bf1684a6e3676389ec861c602e97f27b03f14178e5bc3f70dce198f9f160cce9\",\"tag\":\"v1.0\",\"resource_url\":\"10.194.32.23/myproj/alpine:v1.0\"}],\"repository\":{\"date_created\":1563505587,\"name\":\"alpine\",\"namespace\":\"myproj\",\"repo_full_name\":\"myproj/alpine\",\"repo_type\":\"private\"}},\"operator\":\"admin\"}", + UUID: "00000000", + } + testJob2 = &models.NotificationJob{ + PolicyID: 111, + EventType: "pullImage", + NotifyType: "http", + Status: "", + JobDetail: "{\"type\":\"pushImage\",\"occur_at\":1563537782,\"event_data\":{\"resources\":[{\"digest\":\"sha256:bf1684a6e3676389ec861c602e97f27b03f14178e5bc3f70dce198f9f160cce9\",\"tag\":\"v1.0\",\"resource_url\":\"10.194.32.23/myproj/alpine:v1.0\"}],\"repository\":{\"date_created\":1563505587,\"name\":\"alpine\",\"namespace\":\"myproj\",\"repo_full_name\":\"myproj/alpine\",\"repo_type\":\"private\"}},\"operator\":\"admin\"}", + UUID: "00000000", + } + testJob3 = &models.NotificationJob{ + PolicyID: 111, + EventType: "deleteImage", + NotifyType: "http", + Status: "pending", + JobDetail: "{\"type\":\"pushImage\",\"occur_at\":1563538782,\"event_data\":{\"resources\":[{\"digest\":\"sha256:bf1684a6e3676389ec861c602e97f27b03f14178e5bc3f70dce198f9f160cce9\",\"tag\":\"v1.0\",\"resource_url\":\"10.194.32.23/myproj/alpine:v1.0\"}],\"repository\":{\"date_created\":1563505587,\"name\":\"alpine\",\"namespace\":\"myproj\",\"repo_full_name\":\"myproj/alpine\",\"repo_type\":\"private\"}},\"operator\":\"admin\"}", + UUID: "00000000", + } +) + +func TestAddNotificationJob(t *testing.T) { + tests := []struct { + name string + job *models.NotificationJob + want int64 + wantErr bool + }{ + {name: "AddNotificationJob nil", job: nil, wantErr: true}, + {name: "AddNotificationJob 1", job: testJob1, want: 1}, + {name: "AddNotificationJob 2", job: testJob2, want: 2}, + {name: "AddNotificationJob 3", job: testJob3, want: 3}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := AddNotificationJob(tt.job) + if tt.wantErr { + require.NotNil(t, err, "wantErr: %s", err) + return + } + require.Nil(t, err) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestGetTotalCountOfNotificationJobs(t *testing.T) { + type args struct { + query *models.NotificationJobQuery + } + tests := []struct { + name string + args args + want int64 + wantErr bool + }{ + { + name: "GetTotalCountOfNotificationJobs 1", + args: args{ + query: &models.NotificationJobQuery{ + PolicyID: 111, + }, + }, + want: 2, + }, + { + name: "GetTotalCountOfNotificationJobs 2", + args: args{}, + want: 3, + }, + { + name: "GetTotalCountOfNotificationJobs 3", + args: args{ + query: &models.NotificationJobQuery{ + Statuses: []string{"pending"}, + }, + }, + want: 3, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := GetTotalCountOfNotificationJobs(tt.args.query) + if tt.wantErr { + require.NotNil(t, err, "wantErr: %s", err) + return + } + require.Nil(t, err) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestGetLastTriggerJobsGroupByEventType(t *testing.T) { + type args struct { + policyID int64 + } + tests := []struct { + name string + args args + want []*models.NotificationJob + wantErr bool + }{ + { + name: "GetLastTriggerJobsGroupByEventType", + args: args{ + policyID: 111, + }, + want: []*models.NotificationJob{ + testJob2, + testJob3, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := GetLastTriggerJobsGroupByEventType(tt.args.policyID) + if tt.wantErr { + require.NotNil(t, err, "wantErr: %s", err) + return + } + require.Nil(t, err) + assert.Equal(t, len(tt.want), len(got)) + }) + } + +} + +func TestUpdateNotificationJob(t *testing.T) { + type args struct { + job *models.NotificationJob + props []string + } + tests := []struct { + name string + args args + want int64 + wantErr bool + }{ + {name: "UpdateNotificationJob Want Error 1", args: args{job: nil}, wantErr: true}, + {name: "UpdateNotificationJob Want Error 2", args: args{job: &models.NotificationJob{ID: 0}}, wantErr: true}, + { + name: "UpdateNotificationJob 1", + args: args{ + job: &models.NotificationJob{ID: 1, UUID: "111111111111111"}, + props: []string{"UUID"}, + }, + }, + { + name: "UpdateNotificationJob 2", + args: args{ + job: &models.NotificationJob{ID: 2, UUID: "222222222222222"}, + props: []string{"UUID"}, + }, + }, + { + name: "UpdateNotificationJob 3", + args: args{ + job: &models.NotificationJob{ID: 3, UUID: "333333333333333"}, + props: []string{"UUID"}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := UpdateNotificationJob(tt.args.job, tt.args.props...) + if tt.wantErr { + require.NotNil(t, err, "Error: %s", err) + return + } + + require.Nil(t, err) + gotJob, err := GetNotificationJob(tt.args.job.ID) + + require.Nil(t, err) + assert.Equal(t, tt.args.job.UUID, gotJob.UUID) + }) + } +} + +func TestDeleteNotificationJob(t *testing.T) { + type args struct { + id int64 + } + tests := []struct { + name string + args args + wantErr bool + }{ + {name: "DeleteNotificationJob 1", args: args{id: 1}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := DeleteNotificationJob(tt.args.id) + + if tt.wantErr { + require.NotNil(t, err, "Error: %s", err) + return + } + + require.Nil(t, err) + job, err := GetNotificationJob(tt.args.id) + + require.Nil(t, err) + assert.Nil(t, job) + }) + } +} + +func TestDeleteAllNotificationJobs(t *testing.T) { + type args struct { + policyID int64 + query []*models.NotificationJobQuery + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "DeleteAllNotificationJobs 1", + args: args{ + policyID: 111, + query: []*models.NotificationJobQuery{ + {PolicyID: 111}, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := DeleteAllNotificationJobsByPolicyID(tt.args.policyID) + + if tt.wantErr { + require.NotNil(t, err, "Error: %s", err) + return + } + + require.Nil(t, err) + jobs, err := GetNotificationJobs(tt.args.query...) + + require.Nil(t, err) + assert.Equal(t, 0, len(jobs)) + }) + } +} diff --git a/src/common/dao/notification/notification_policy.go b/src/common/dao/notification/notification_policy.go new file mode 100755 index 000000000..58bf8a52c --- /dev/null +++ b/src/common/dao/notification/notification_policy.go @@ -0,0 +1,69 @@ +package notification + +import ( + "github.com/astaxie/beego/orm" + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" + "github.com/pkg/errors" +) + +// GetNotificationPolicy return notification policy by id +func GetNotificationPolicy(id int64) (*models.NotificationPolicy, error) { + policy := new(models.NotificationPolicy) + o := dao.GetOrmer() + err := o.QueryTable(policy).Filter("id", id).One(policy) + if err == orm.ErrNoRows { + return nil, nil + } + return policy, err +} + +// GetNotificationPolicyByName return notification policy by name +func GetNotificationPolicyByName(name string, projectID int64) (*models.NotificationPolicy, error) { + policy := new(models.NotificationPolicy) + o := dao.GetOrmer() + err := o.QueryTable(policy).Filter("name", name).Filter("projectID", projectID).One(policy) + if err == orm.ErrNoRows { + return nil, nil + } + return policy, err +} + +// GetNotificationPolicies returns all notification policy in project +func GetNotificationPolicies(projectID int64) ([]*models.NotificationPolicy, error) { + var policies []*models.NotificationPolicy + qs := dao.GetOrmer().QueryTable(new(models.NotificationPolicy)).Filter("ProjectID", projectID) + + _, err := qs.All(&policies) + if err != nil { + return nil, err + } + return policies, nil + +} + +// AddNotificationPolicy insert new notification policy to DB +func AddNotificationPolicy(policy *models.NotificationPolicy) (int64, error) { + if policy == nil { + return 0, errors.New("nil policy") + } + o := dao.GetOrmer() + return o.Insert(policy) +} + +// UpdateNotificationPolicy update t specified notification policy +func UpdateNotificationPolicy(policy *models.NotificationPolicy) error { + if policy == nil { + return errors.New("nil policy") + } + o := dao.GetOrmer() + _, err := o.Update(policy) + return err +} + +// DeleteNotificationPolicy delete notification policy by id +func DeleteNotificationPolicy(id int64) error { + o := dao.GetOrmer() + _, err := o.Delete(&models.NotificationPolicy{ID: id}) + return err +} diff --git a/src/common/dao/notification/notification_policy_test.go b/src/common/dao/notification/notification_policy_test.go new file mode 100644 index 000000000..756a01c7d --- /dev/null +++ b/src/common/dao/notification/notification_policy_test.go @@ -0,0 +1,291 @@ +package notification + +import ( + "testing" + "time" + + "github.com/goharbor/harbor/src/common/models" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + testPly1 = &models.NotificationPolicy{ + Name: "webhook test policy1", + Description: "webhook test policy1 description", + ProjectID: 111, + TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\",\"token\":\"xxxxxxxxx\",\"skip_cert_verify\":true}]", + EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\",\"uploadChart\",\"deleteChart\",\"downloadChart\",\"scanningFailed\",\"scanningCompleted\"]", + Creator: "no one", + CreationTime: time.Now(), + UpdateTime: time.Now(), + Enabled: true, + } +) + +var ( + testPly2 = &models.NotificationPolicy{ + Name: "webhook test policy2", + Description: "webhook test policy2 description", + ProjectID: 222, + TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\",\"token\":\"xxxxxxxxx\",\"skip_cert_verify\":true}]", + EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\",\"uploadChart\",\"deleteChart\",\"downloadChart\",\"scanningFailed\",\"scanningCompleted\"]", + Creator: "no one", + CreationTime: time.Now(), + UpdateTime: time.Now(), + Enabled: true, + } +) + +var ( + testPly3 = &models.NotificationPolicy{ + Name: "webhook test policy3", + Description: "webhook test policy3 description", + ProjectID: 333, + TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\",\"token\":\"xxxxxxxxx\",\"skip_cert_verify\":true}]", + EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\",\"uploadChart\",\"deleteChart\",\"downloadChart\",\"scanningFailed\",\"scanningCompleted\"]", + Creator: "no one", + CreationTime: time.Now(), + UpdateTime: time.Now(), + Enabled: true, + } +) + +func TestAddNotificationPolicy(t *testing.T) { + tests := []struct { + name string + policy *models.NotificationPolicy + want int64 + wantErr bool + }{ + {name: "AddNotificationPolicy nil", policy: nil, wantErr: true}, + {name: "AddNotificationPolicy 1", policy: testPly1, want: 1}, + {name: "AddNotificationPolicy 2", policy: testPly2, want: 2}, + {name: "AddNotificationPolicy 3", policy: testPly3, want: 3}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := AddNotificationPolicy(tt.policy) + + if tt.wantErr { + require.NotNil(t, err, "wantErr: %s", err) + return + } + require.Nil(t, err) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestGetNotificationPolicies(t *testing.T) { + tests := []struct { + name string + projectID int64 + wantPolicies []*models.NotificationPolicy + wantErr bool + }{ + {name: "GetNotificationPolicies nil", projectID: 0, wantPolicies: []*models.NotificationPolicy{}}, + {name: "GetNotificationPolicies 1", projectID: 111, wantPolicies: []*models.NotificationPolicy{testPly1}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotPolicies, err := GetNotificationPolicies(tt.projectID) + if tt.wantErr { + require.NotNil(t, err, "wantErr: %s", err) + return + } + + require.Nil(t, err) + for i, gotPolicy := range gotPolicies { + assert.Equal(t, tt.wantPolicies[i].Name, gotPolicy.Name) + assert.Equal(t, tt.wantPolicies[i].ID, gotPolicy.ID) + assert.Equal(t, tt.wantPolicies[i].EventTypesDB, gotPolicy.EventTypesDB) + assert.Equal(t, tt.wantPolicies[i].TargetsDB, gotPolicy.TargetsDB) + assert.Equal(t, tt.wantPolicies[i].Creator, gotPolicy.Creator) + assert.Equal(t, tt.wantPolicies[i].Enabled, gotPolicy.Enabled) + assert.Equal(t, tt.wantPolicies[i].Description, gotPolicy.Description) + } + }) + } +} + +func TestGetNotificationPolicy(t *testing.T) { + tests := []struct { + name string + id int64 + wantPolicy *models.NotificationPolicy + wantErr bool + }{ + {name: "GetRepPolicy 1", id: 1, wantPolicy: testPly1}, + {name: "GetRepPolicy 2", id: 2, wantPolicy: testPly2}, + {name: "GetRepPolicy 3", id: 3, wantPolicy: testPly3}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotPolicy, err := GetNotificationPolicy(tt.id) + if tt.wantErr { + require.NotNil(t, err, "wantErr: %s", err) + return + } + require.Nil(t, err) + assert.Equal(t, tt.wantPolicy.Name, gotPolicy.Name) + assert.Equal(t, tt.wantPolicy.ID, gotPolicy.ID) + assert.Equal(t, tt.wantPolicy.EventTypesDB, gotPolicy.EventTypesDB) + assert.Equal(t, tt.wantPolicy.TargetsDB, gotPolicy.TargetsDB) + assert.Equal(t, tt.wantPolicy.Creator, gotPolicy.Creator) + assert.Equal(t, tt.wantPolicy.Enabled, gotPolicy.Enabled) + assert.Equal(t, tt.wantPolicy.Description, gotPolicy.Description) + }) + } +} + +func TestGetNotificationPolicyByName(t *testing.T) { + type args struct { + name string + projectID int64 + } + tests := []struct { + name string + args args + wantPolicy *models.NotificationPolicy + wantErr bool + }{ + {name: "GetNotificationPolicyByName 1", args: args{name: testPly1.Name, projectID: testPly1.ProjectID}, wantPolicy: testPly1}, + {name: "GetNotificationPolicyByName 2", args: args{name: testPly2.Name, projectID: testPly2.ProjectID}, wantPolicy: testPly2}, + {name: "GetNotificationPolicyByName 3", args: args{name: testPly3.Name, projectID: testPly3.ProjectID}, wantPolicy: testPly3}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotPolicy, err := GetNotificationPolicyByName(tt.args.name, tt.args.projectID) + if tt.wantErr { + require.NotNil(t, err, "wantErr: %s", err) + return + } + require.Nil(t, err) + assert.Equal(t, tt.wantPolicy.Name, gotPolicy.Name) + assert.Equal(t, tt.wantPolicy.ID, gotPolicy.ID) + assert.Equal(t, tt.wantPolicy.EventTypesDB, gotPolicy.EventTypesDB) + assert.Equal(t, tt.wantPolicy.TargetsDB, gotPolicy.TargetsDB) + assert.Equal(t, tt.wantPolicy.Creator, gotPolicy.Creator) + assert.Equal(t, tt.wantPolicy.Enabled, gotPolicy.Enabled) + assert.Equal(t, tt.wantPolicy.Description, gotPolicy.Description) + }) + } + +} + +func TestUpdateNotificationPolicy(t *testing.T) { + type args struct { + policy *models.NotificationPolicy + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "UpdateNotificationPolicy nil", + args: args{ + policy: nil, + }, + wantErr: true, + }, + + { + name: "UpdateNotificationPolicy 1", + args: args{ + policy: &models.NotificationPolicy{ + ID: 1, + Name: "webhook test policy1 new", + Description: "webhook test policy1 description new", + ProjectID: 111, + TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\",\"token\":\"xxxxxxxxx\",\"skip_cert_verify\":true}]", + EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\",\"uploadChart\",\"deleteChart\",\"downloadChart\",\"scanningFailed\",\"scanningCompleted\"]", + Creator: "no one", + CreationTime: time.Now(), + UpdateTime: time.Now(), + Enabled: true, + }, + }, + }, + { + name: "UpdateNotificationPolicy 2", + args: args{ + policy: &models.NotificationPolicy{ + ID: 2, + Name: "webhook test policy2 new", + Description: "webhook test policy2 description new", + ProjectID: 222, + TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\",\"token\":\"xxxxxxxxx\",\"skip_cert_verify\":true}]", + EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\",\"uploadChart\",\"deleteChart\",\"downloadChart\",\"scanningFailed\",\"scanningCompleted\"]", + Creator: "no one", + CreationTime: time.Now(), + UpdateTime: time.Now(), + Enabled: true, + }, + }, + }, + { + name: "UpdateNotificationPolicy 3", + args: args{ + policy: &models.NotificationPolicy{ + ID: 3, + Name: "webhook test policy3 new", + Description: "webhook test policy3 description new", + ProjectID: 333, + TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\",\"token\":\"xxxxxxxxx\",\"skip_cert_verify\":true}]", + EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\",\"uploadChart\",\"deleteChart\",\"downloadChart\",\"scanningFailed\",\"scanningCompleted\"]", + Creator: "no one", + CreationTime: time.Now(), + UpdateTime: time.Now(), + Enabled: true, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := UpdateNotificationPolicy(tt.args.policy) + + if tt.wantErr { + require.NotNil(t, err, "Error: %s", err) + return + } + + require.Nil(t, err) + gotPolicy, err := GetNotificationPolicy(tt.args.policy.ID) + + require.Nil(t, err) + assert.Equal(t, tt.args.policy.Description, gotPolicy.Description) + assert.Equal(t, tt.args.policy.Name, gotPolicy.Name) + }) + } + +} + +func TestDeleteNotificationPolicy(t *testing.T) { + tests := []struct { + name string + id int64 + wantErr bool + }{ + {name: "DeleteNotificationPolicy 1", id: 1, wantErr: false}, + {name: "DeleteNotificationPolicy 2", id: 2, wantErr: false}, + {name: "DeleteNotificationPolicy 3", id: 3, wantErr: false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := DeleteNotificationPolicy(tt.id) + if tt.wantErr { + require.NotNil(t, err, "wantErr: %s", err) + return + } + require.Nil(t, err) + policy, err := GetNotificationPolicy(tt.id) + require.Nil(t, err) + assert.Nil(t, policy) + }) + } +} diff --git a/src/common/dao/notification/notification_test.go b/src/common/dao/notification/notification_test.go new file mode 100644 index 000000000..2912e75f9 --- /dev/null +++ b/src/common/dao/notification/notification_test.go @@ -0,0 +1,13 @@ +package notification + +import ( + "os" + "testing" + + "github.com/goharbor/harbor/src/common/dao" +) + +func TestMain(m *testing.M) { + dao.PrepareTestForPostgresSQL() + os.Exit(m.Run()) +} diff --git a/src/common/dao/pgsql.go b/src/common/dao/pgsql.go index e1b3da6cb..bf98c6b08 100644 --- a/src/common/dao/pgsql.go +++ b/src/common/dao/pgsql.go @@ -31,12 +31,14 @@ import ( const defaultMigrationPath = "migrations/postgresql/" type pgsql struct { - host string - port string - usr string - pwd string - database string - sslmode string + host string + port string + usr string + pwd string + database string + sslmode string + maxIdleConns int + maxOpenConns int } // Name returns the name of PostgreSQL @@ -51,17 +53,19 @@ func (p *pgsql) String() string { } // NewPGSQL returns an instance of postgres -func NewPGSQL(host string, port string, usr string, pwd string, database string, sslmode string) Database { +func NewPGSQL(host string, port string, usr string, pwd string, database string, sslmode string, maxIdleConns int, maxOpenConns int) Database { if len(sslmode) == 0 { sslmode = "disable" } return &pgsql{ - host: host, - port: port, - usr: usr, - pwd: pwd, - database: database, - sslmode: sslmode, + host: host, + port: port, + usr: usr, + pwd: pwd, + database: database, + sslmode: sslmode, + maxIdleConns: maxIdleConns, + maxOpenConns: maxOpenConns, } } @@ -82,7 +86,7 @@ func (p *pgsql) Register(alias ...string) error { info := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=%s", p.host, p.port, p.usr, p.pwd, p.database, p.sslmode) - return orm.RegisterDataBase(an, "postgres", info) + return orm.RegisterDataBase(an, "postgres", info, p.maxIdleConns, p.maxOpenConns) } // UpgradeSchema calls migrate tool to upgrade schema to the latest based on the SQL scripts. diff --git a/src/common/dao/pro_meta.go b/src/common/dao/pro_meta.go index d4a9c4e6f..a6593e2ef 100644 --- a/src/common/dao/pro_meta.go +++ b/src/common/dao/pro_meta.go @@ -44,7 +44,7 @@ func DeleteProjectMetadata(projectID int64, name ...string) error { params = append(params, projectID) if len(name) > 0 { - sql += fmt.Sprintf(` and name in ( %s )`, paramPlaceholder(len(name))) + sql += fmt.Sprintf(` and name in ( %s )`, ParamPlaceholderForIn(len(name))) params = append(params, name) } @@ -74,7 +74,7 @@ func GetProjectMetadata(projectID int64, name ...string) ([]*models.ProjectMetad params = append(params, projectID) if len(name) > 0 { - sql += fmt.Sprintf(` and name in ( %s )`, paramPlaceholder(len(name))) + sql += fmt.Sprintf(` and name in ( %s )`, ParamPlaceholderForIn(len(name))) params = append(params, name) } @@ -82,7 +82,9 @@ func GetProjectMetadata(projectID int64, name ...string) ([]*models.ProjectMetad return proMetas, err } -func paramPlaceholder(n int) string { +// ParamPlaceholderForIn returns a string that contains placeholders for sql keyword "in" +// e.g. n=3, returns "?,?,?" +func ParamPlaceholderForIn(n int) string { placeholders := []string{} for i := 0; i < n; i++ { placeholders = append(placeholders, "?") diff --git a/src/common/dao/project.go b/src/common/dao/project.go index 423b6b23b..e027ec221 100644 --- a/src/common/dao/project.go +++ b/src/common/dao/project.go @@ -156,19 +156,21 @@ func GetProjects(query *models.ProjectQueryParam) ([]*models.Project, error) { // GetGroupProjects - Get user's all projects, including user is the user member of this project // and the user is in the group which is a group member of this project. -func GetGroupProjects(groupDNCondition string, query *models.ProjectQueryParam) ([]*models.Project, error) { +func GetGroupProjects(groupIDs []int, query *models.ProjectQueryParam) ([]*models.Project, error) { sql, params := projectQueryConditions(query) sql = `select distinct p.project_id, p.name, p.owner_id, p.creation_time, p.update_time ` + sql - if len(groupDNCondition) > 0 { + groupIDCondition := JoinNumberConditions(groupIDs) + if len(groupIDs) > 0 { sql = fmt.Sprintf( `%s union select distinct p.project_id, p.name, p.owner_id, p.creation_time, p.update_time from project p left join project_member pm on p.project_id = pm.project_id - left join user_group ug on ug.id = pm.entity_id and pm.entity_type = 'g' and ug.group_type = 1 - where ug.ldap_group_dn in ( %s ) order by name`, - sql, groupDNCondition) + left join user_group ug on ug.id = pm.entity_id and pm.entity_type = 'g' + where ug.id in ( %s )`, + sql, groupIDCondition) } + sql = sql + ` order by name` sqlStr, queryParams := CreatePagination(query, sql, params) log.Debugf("query sql:%v", sql) var projects []*models.Project @@ -178,10 +180,11 @@ func GetGroupProjects(groupDNCondition string, query *models.ProjectQueryParam) // GetTotalGroupProjects - Get the total count of projects, including user is the member of this project and the // user is in the group, which is the group member of this project. -func GetTotalGroupProjects(groupDNCondition string, query *models.ProjectQueryParam) (int, error) { +func GetTotalGroupProjects(groupIDs []int, query *models.ProjectQueryParam) (int, error) { var sql string sqlCondition, params := projectQueryConditions(query) - if len(groupDNCondition) == 0 { + groupIDCondition := JoinNumberConditions(groupIDs) + if len(groupIDs) == 0 { sql = `select count(1) ` + sqlCondition } else { sql = fmt.Sprintf( @@ -189,9 +192,9 @@ func GetTotalGroupProjects(groupDNCondition string, query *models.ProjectQueryPa from ( select p.project_id %s union select p.project_id from project p left join project_member pm on p.project_id = pm.project_id - left join user_group ug on ug.id = pm.entity_id and pm.entity_type = 'g' and ug.group_type = 1 - where ug.ldap_group_dn in ( %s )) t`, - sqlCondition, groupDNCondition) + left join user_group ug on ug.id = pm.entity_id and pm.entity_type = 'g' + where ug.id in ( %s )) t`, + sqlCondition, groupIDCondition) } log.Debugf("query sql:%v", sql) var count int @@ -257,7 +260,7 @@ func projectQueryConditions(query *models.ProjectQueryParam) (string, []interfac } if len(query.ProjectIDs) > 0 { sql += fmt.Sprintf(` and p.project_id in ( %s )`, - paramPlaceholder(len(query.ProjectIDs))) + ParamPlaceholderForIn(len(query.ProjectIDs))) params = append(params, query.ProjectIDs) } return sql, params @@ -291,29 +294,24 @@ func DeleteProject(id int64) error { return err } -// GetRolesByLDAPGroup - Get Project roles of the -// specified group DN is a member of current project -func GetRolesByLDAPGroup(projectID int64, groupDNCondition string) ([]int, error) { +// GetRolesByGroupID - Get Project roles of the +// specified group is a member of current project +func GetRolesByGroupID(projectID int64, groupIDs []int) ([]int, error) { var roles []int - if len(groupDNCondition) == 0 { + if len(groupIDs) == 0 { return roles, nil } + groupIDCondition := JoinNumberConditions(groupIDs) o := GetOrmer() - // Because an LDAP user can be memberof multiple groups, - // the role is in descent order (1-admin, 2-developer, 3-guest, 4-master), use min to select the max privilege role. sql := fmt.Sprintf( - `select min(pm.role) from project_member pm + `select distinct pm.role from project_member pm left join user_group ug on pm.entity_type = 'g' and pm.entity_id = ug.id - where ug.ldap_group_dn in ( %s ) and pm.project_id = ? `, - groupDNCondition) - log.Debugf("sql:%v", sql) + where ug.id in ( %s ) and pm.project_id = ?`, + groupIDCondition) + log.Debugf("sql for GetRolesByGroupID(project ID: %d, group ids: %v):%v", projectID, groupIDs, sql) if _, err := o.Raw(sql, projectID).QueryRows(&roles); err != nil { - log.Warningf("Error in GetRolesByLDAPGroup, error: %v", err) + log.Warningf("Error in GetRolesByGroupID, error: %v", err) return nil, err } - // If there is no row selected, the min returns an empty row, to avoid return 0 as role - if len(roles) == 1 && roles[0] == 0 { - return []int{}, nil - } return roles, nil } diff --git a/src/common/dao/project/projectmember.go b/src/common/dao/project/projectmember.go index f9a81e706..081b036f0 100644 --- a/src/common/dao/project/projectmember.go +++ b/src/common/dao/project/projectmember.go @@ -30,13 +30,13 @@ func GetProjectMember(queryMember models.Member) ([]*models.Member, error) { } o := dao.GetOrmer() - sql := ` select a.* from (select pm.id as id, pm.project_id as project_id, ug.id as entity_id, ug.group_name as entity_name, ug.creation_time, ug.update_time, r.name as rolename, - r.role_id as role, pm.entity_type as entity_type from user_group ug join project_member pm + sql := ` select a.* from (select pm.id as id, pm.project_id as project_id, ug.id as entity_id, ug.group_name as entity_name, ug.creation_time, ug.update_time, r.name as rolename, + r.role_id as role, pm.entity_type as entity_type from user_group ug join project_member pm on pm.project_id = ? and ug.id = pm.entity_id join role r on pm.role = r.role_id where pm.entity_type = 'g' union - select pm.id as id, pm.project_id as project_id, u.user_id as entity_id, u.username as entity_name, u.creation_time, u.update_time, r.name as rolename, - r.role_id as role, pm.entity_type as entity_type from harbor_user u join project_member pm - on pm.project_id = ? and u.user_id = pm.entity_id + select pm.id as id, pm.project_id as project_id, u.user_id as entity_id, u.username as entity_name, u.creation_time, u.update_time, r.name as rolename, + r.role_id as role, pm.entity_type as entity_type from harbor_user u join project_member pm + on pm.project_id = ? and u.user_id = pm.entity_id join role r on pm.role = r.role_id where u.deleted = false and pm.entity_type = 'u') as a where a.project_id = ? ` queryParam := make([]interface{}, 1) @@ -70,6 +70,27 @@ func GetProjectMember(queryMember models.Member) ([]*models.Member, error) { return members, err } +// GetTotalOfProjectMembers returns total of project members +func GetTotalOfProjectMembers(projectID int64, roles ...int) (int64, error) { + log.Debugf("Query condition %+v", projectID) + if projectID == 0 { + return 0, fmt.Errorf("failed to get total of project members, project id required %v", projectID) + } + + sql := "SELECT COUNT(1) FROM project_member WHERE project_id = ?" + + queryParam := []interface{}{projectID} + + if len(roles) > 0 { + sql += " AND role = ?" + queryParam = append(queryParam, roles[0]) + } + + var count int64 + err := dao.GetOrmer().Raw(sql, queryParam).QueryRow(&count) + return count, err +} + // AddProjectMember inserts a record to table project_member func AddProjectMember(member models.Member) (int, error) { @@ -120,23 +141,23 @@ func DeleteProjectMemberByID(pmid int) error { // SearchMemberByName search members of the project by entity_name func SearchMemberByName(projectID int64, entityName string) ([]*models.Member, error) { o := dao.GetOrmer() - sql := `select pm.id, pm.project_id, - u.username as entity_name, + sql := `select pm.id, pm.project_id, + u.username as entity_name, r.name as rolename, - pm.role, pm.entity_id, pm.entity_type + pm.role, pm.entity_id, pm.entity_type from project_member pm left join harbor_user u on pm.entity_id = u.user_id and pm.entity_type = 'u' left join role r on pm.role = r.role_id - where u.deleted = false and pm.project_id = ? and u.username like ? + where u.deleted = false and pm.project_id = ? and u.username like ? union - select pm.id, pm.project_id, - ug.group_name as entity_name, + select pm.id, pm.project_id, + ug.group_name as entity_name, r.name as rolename, - pm.role, pm.entity_id, pm.entity_type + pm.role, pm.entity_id, pm.entity_type from project_member pm left join user_group ug on pm.entity_id = ug.id and pm.entity_type = 'g' left join role r on pm.role = r.role_id - where pm.project_id = ? and ug.group_name like ? + where pm.project_id = ? and ug.group_name like ? order by entity_name ` queryParam := make([]interface{}, 4) queryParam = append(queryParam, projectID) @@ -148,16 +169,3 @@ func SearchMemberByName(projectID int64, entityName string) ([]*models.Member, e _, err := o.Raw(sql, queryParam).QueryRows(&members) return members, err } - -// GetRolesByGroup -- Query group roles -func GetRolesByGroup(projectID int64, groupDNCondition string) []int { - var roles []int - o := dao.GetOrmer() - sql := `select role from project_member pm - left join user_group ug on pm.project_id = ? - where ug.group_type = 1 and ug.ldap_group_dn in (` + groupDNCondition + `)` - if _, err := o.Raw(sql, projectID).QueryRows(&roles); err != nil { - return roles - } - return roles -} diff --git a/src/common/dao/project/projectmember_test.go b/src/common/dao/project/projectmember_test.go index 66de3b6a8..fadb598b2 100644 --- a/src/common/dao/project/projectmember_test.go +++ b/src/common/dao/project/projectmember_test.go @@ -51,11 +51,18 @@ func TestMain(m *testing.M) { "update project set owner_id = (select user_id from harbor_user where username = 'member_test_01') where name = 'member_test_01'", "insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from harbor_user where username = 'member_test_01'), 'u', 1)", "insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_group_01'), 'g', 1)", + + "insert into harbor_user (username, email, password, realname) values ('member_test_02', 'member_test_02@example.com', '123456', 'member_test_02')", + "insert into project (name, owner_id) values ('member_test_02', 1)", + "insert into user_group (group_name, group_type, ldap_group_dn) values ('test_group_02', 1, 'CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com')", + "update project set owner_id = (select user_id from harbor_user where username = 'member_test_02') where name = 'member_test_02'", + "insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_02') , (select user_id from harbor_user where username = 'member_test_02'), 'u', 1)", + "insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_02') , (select id from user_group where group_name = 'test_group_02'), 'g', 1)", } clearSqls := []string{ - "delete from project where name='member_test_01'", - "delete from harbor_user where username='member_test_01' or username='pm_sample'", + "delete from project where name='member_test_01' or name='member_test_02'", + "delete from harbor_user where username='member_test_01' or username='member_test_02' or username='pm_sample'", "delete from user_group", "delete from project_member", } @@ -285,6 +292,39 @@ func TestGetProjectMember(t *testing.T) { } } + +func TestGetTotalOfProjectMembers(t *testing.T) { + currentProject, _ := dao.GetProjectByName("member_test_02") + + type args struct { + projectID int64 + roles []int + } + tests := []struct { + name string + args args + want int64 + wantErr bool + }{ + {"Get total of project admin", args{currentProject.ProjectID, []int{common.RoleProjectAdmin}}, 2, false}, + {"Get total of master", args{currentProject.ProjectID, []int{common.RoleMaster}}, 0, false}, + {"Get total of developer", args{currentProject.ProjectID, []int{common.RoleDeveloper}}, 0, false}, + {"Get total of guest", args{currentProject.ProjectID, []int{common.RoleGuest}}, 0, false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := GetTotalOfProjectMembers(tt.args.projectID, tt.args.roles...) + if (err != nil) != tt.wantErr { + t.Errorf("GetTotalOfProjectMembers() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("GetTotalOfProjectMembers() = %v, want %v", got, tt.want) + } + }) + } +} + func PrepareGroupTest() { initSqls := []string{ `insert into user_group (group_name, group_type, ldap_group_dn) values ('harbor_group_01', 1, 'cn=harbor_user,dc=example,dc=com')`, @@ -305,30 +345,3 @@ func PrepareGroupTest() { } dao.PrepareTestData(clearSqls, initSqls) } -func TestGetRolesByGroup(t *testing.T) { - PrepareGroupTest() - - project, err := dao.GetProjectByName("group_project") - if err != nil { - t.Errorf("Error occurred when GetProjectByName : %v", err) - } - type args struct { - projectID int64 - groupDNCondition string - } - tests := []struct { - name string - args args - want []int - }{ - {"Query group with role", args{project.ProjectID, "'cn=harbor_user,dc=example,dc=com'"}, []int{2}}, - {"Query group no role", args{project.ProjectID, "'cn=another_user,dc=example,dc=com'"}, []int{}}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := GetRolesByGroup(tt.args.projectID, tt.args.groupDNCondition); !dao.ArrayEqual(got, tt.want) { - t.Errorf("GetRolesByGroup() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/src/common/dao/project_blob.go b/src/common/dao/project_blob.go new file mode 100644 index 000000000..b6ade9938 --- /dev/null +++ b/src/common/dao/project_blob.go @@ -0,0 +1,122 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "fmt" + "time" + + "github.com/goharbor/harbor/src/common/models" +) + +// AddBlobToProject ... +func AddBlobToProject(blobID, projectID int64) (int64, error) { + pb := &models.ProjectBlob{ + BlobID: blobID, + ProjectID: projectID, + CreationTime: time.Now(), + } + + _, id, err := GetOrmer().ReadOrCreate(pb, "blob_id", "project_id") + return id, err +} + +// AddBlobsToProject ... +func AddBlobsToProject(projectID int64, blobs ...*models.Blob) (int64, error) { + if len(blobs) == 0 { + return 0, nil + } + + now := time.Now() + + var projectBlobs []*models.ProjectBlob + for _, blob := range blobs { + projectBlobs = append(projectBlobs, &models.ProjectBlob{ + BlobID: blob.ID, + ProjectID: projectID, + CreationTime: now, + }) + } + + return GetOrmer().InsertMulti(len(projectBlobs), projectBlobs) +} + +// RemoveBlobsFromProject ... +func RemoveBlobsFromProject(projectID int64, blobs ...*models.Blob) error { + var blobIDs []interface{} + for _, blob := range blobs { + blobIDs = append(blobIDs, blob.ID) + } + + if len(blobIDs) == 0 { + return nil + } + + sql := fmt.Sprintf(`DELETE FROM project_blob WHERE blob_id IN (%s)`, ParamPlaceholderForIn(len(blobIDs))) + + _, err := GetOrmer().Raw(sql, blobIDs).Exec() + return err +} + +// HasBlobInProject ... +func HasBlobInProject(projectID int64, digest string) (bool, error) { + sql := `SELECT COUNT(*) FROM project_blob JOIN blob ON project_blob.blob_id = blob.id AND project_id = ? AND digest = ?` + + var count int64 + if err := GetOrmer().Raw(sql, projectID, digest).QueryRow(&count); err != nil { + return false, err + } + + return count > 0, nil +} + +// GetBlobsNotInProject returns blobs not in project +func GetBlobsNotInProject(projectID int64, blobDigests ...string) ([]*models.Blob, error) { + if len(blobDigests) == 0 { + return nil, nil + } + + sql := fmt.Sprintf("SELECT * FROM blob WHERE id NOT IN (SELECT blob_id FROM project_blob WHERE project_id = ?) AND digest IN (%s)", + ParamPlaceholderForIn(len(blobDigests))) + + params := []interface{}{projectID} + for _, digest := range blobDigests { + params = append(params, digest) + } + + var blobs []*models.Blob + if _, err := GetOrmer().Raw(sql, params...).QueryRows(&blobs); err != nil { + return nil, err + } + + return blobs, nil +} + +// CountSizeOfProject ... +func CountSizeOfProject(pid int64) (int64, error) { + var blobs []models.Blob + + _, err := GetOrmer().Raw(`SELECT bb.id, bb.digest, bb.content_type, bb.size, bb.creation_time FROM project_blob pb LEFT JOIN blob bb ON pb.blob_id = bb.id WHERE pb.project_id = ? `, pid).QueryRows(&blobs) + if err != nil { + return 0, err + } + + var size int64 + for _, blob := range blobs { + size += blob.Size + } + + return size, err +} diff --git a/src/common/dao/project_blob_test.go b/src/common/dao/project_blob_test.go new file mode 100644 index 000000000..3d3643aee --- /dev/null +++ b/src/common/dao/project_blob_test.go @@ -0,0 +1,68 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "testing" + + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils" + "github.com/opencontainers/go-digest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHasBlobInProject(t *testing.T) { + _, blob, err := GetOrCreateBlob(&models.Blob{ + Digest: digest.FromString(utils.GenerateRandomString()).String(), + Size: 100, + }) + require.Nil(t, err) + + _, err = AddBlobToProject(blob.ID, 1) + require.Nil(t, err) + + has, err := HasBlobInProject(1, blob.Digest) + require.Nil(t, err) + assert.True(t, has) +} + +func TestCountSizeOfProject(t *testing.T) { + id1, err := AddBlob(&models.Blob{ + Digest: "CountSizeOfProject_blob1", + Size: 101, + }) + require.Nil(t, err) + + id2, err := AddBlob(&models.Blob{ + Digest: "CountSizeOfProject_blob2", + Size: 202, + }) + require.Nil(t, err) + + pid1, err := AddProject(models.Project{ + Name: "CountSizeOfProject_project1", + OwnerID: 1, + }) + require.Nil(t, err) + + _, err = AddBlobToProject(id1, pid1) + require.Nil(t, err) + _, err = AddBlobToProject(id2, pid1) + require.Nil(t, err) + + pSize, err := CountSizeOfProject(pid1) + assert.Equal(t, pSize, int64(303)) +} diff --git a/src/common/dao/project_test.go b/src/common/dao/project_test.go index 7358840b9..b35200047 100644 --- a/src/common/dao/project_test.go +++ b/src/common/dao/project_test.go @@ -118,124 +118,6 @@ func Test_projectQueryConditions(t *testing.T) { } } -func TestGetGroupProjects(t *testing.T) { - prepareGroupTest() - query := &models.ProjectQueryParam{Member: &models.MemberQuery{Name: "sample_group"}} - type args struct { - groupDNCondition string - query *models.ProjectQueryParam - } - tests := []struct { - name string - args args - wantSize int - wantErr bool - }{ - {"Verify correct sql", args{groupDNCondition: "'cn=harbor_user,dc=example,dc=com'", query: query}, 1, false}, - {"Verify missed sql", args{groupDNCondition: "'cn=another_user,dc=example,dc=com'", query: query}, 0, false}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := GetGroupProjects(tt.args.groupDNCondition, tt.args.query) - if (err != nil) != tt.wantErr { - t.Errorf("GetGroupProjects() error = %v, wantErr %v", err, tt.wantErr) - return - } - if len(got) != tt.wantSize { - t.Errorf("GetGroupProjects() = %v, want %v", got, tt.wantSize) - } - }) - } -} - -func prepareGroupTest() { - initSqls := []string{ - `insert into user_group (group_name, group_type, ldap_group_dn) values ('harbor_group_01', 1, 'cn=harbor_user,dc=example,dc=com')`, - `insert into harbor_user (username, email, password, realname) values ('sample01', 'sample01@example.com', 'harbor12345', 'sample01')`, - `insert into project (name, owner_id) values ('group_project', 1)`, - `insert into project (name, owner_id) values ('group_project_private', 1)`, - `insert into project_metadata (project_id, name, value) values ((select project_id from project where name = 'group_project'), 'public', 'false')`, - `insert into project_metadata (project_id, name, value) values ((select project_id from project where name = 'group_project_private'), 'public', 'false')`, - `insert into project_member (project_id, entity_id, entity_type, role) values ((select project_id from project where name = 'group_project'), (select id from user_group where group_name = 'harbor_group_01'),'g', 2)`, - } - - clearSqls := []string{ - `delete from project_metadata where project_id in (select project_id from project where name in ('group_project', 'group_project_private'))`, - `delete from project where name in ('group_project', 'group_project_private')`, - `delete from project_member where project_id in (select project_id from project where name in ('group_project', 'group_project_private'))`, - `delete from user_group where group_name = 'harbor_group_01'`, - `delete from harbor_user where username = 'sample01'`, - } - PrepareTestData(clearSqls, initSqls) -} - -func TestGetTotalGroupProjects(t *testing.T) { - prepareGroupTest() - query := &models.ProjectQueryParam{Member: &models.MemberQuery{Name: "sample_group"}} - type args struct { - groupDNCondition string - query *models.ProjectQueryParam - } - tests := []struct { - name string - args args - want int - wantErr bool - }{ - {"Verify correct sql", args{groupDNCondition: "'cn=harbor_user,dc=example,dc=com'", query: query}, 1, false}, - {"Verify missed sql", args{groupDNCondition: "'cn=another_user,dc=example,dc=com'", query: query}, 0, false}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := GetTotalGroupProjects(tt.args.groupDNCondition, tt.args.query) - if (err != nil) != tt.wantErr { - t.Errorf("GetTotalGroupProjects() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { - t.Errorf("GetTotalGroupProjects() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestGetRolesByLDAPGroup(t *testing.T) { - prepareGroupTest() - project, err := GetProjectByName("group_project") - if err != nil { - t.Errorf("Error occurred when Get project by name: %v", err) - } - privateProject, err := GetProjectByName("group_project_private") - if err != nil { - t.Errorf("Error occurred when Get project by name: %v", err) - } - type args struct { - projectID int64 - groupDNCondition string - } - tests := []struct { - name string - args args - wantSize int - wantErr bool - }{ - {"Check normal", args{project.ProjectID, "'cn=harbor_user,dc=example,dc=com'"}, 1, false}, - {"Check non exist", args{privateProject.ProjectID, "'cn=not_harbor_user,dc=example,dc=com'"}, 0, false}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := GetRolesByLDAPGroup(tt.args.projectID, tt.args.groupDNCondition) - if (err != nil) != tt.wantErr { - t.Errorf("TestGetRolesByLDAPGroup() error = %v, wantErr %v", err, tt.wantErr) - return - } - if len(got) != tt.wantSize { - t.Errorf("TestGetRolesByLDAPGroup() = %v, want %v", len(got), tt.wantSize) - } - }) - } -} - func TestProjetExistsByName(t *testing.T) { name := "project_exist_by_name_test" exist := ProjectExistsByName(name) diff --git a/src/common/dao/quota.go b/src/common/dao/quota.go new file mode 100644 index 000000000..c86c53797 --- /dev/null +++ b/src/common/dao/quota.go @@ -0,0 +1,235 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/astaxie/beego/orm" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/quota/driver" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/pkg/types" +) + +var ( + quotaOrderMap = map[string]string{ + "creation_time": "b.creation_time asc", + "+creation_time": "b.creation_time asc", + "-creation_time": "b.creation_time desc", + "update_time": "b.update_time asc", + "+update_time": "b.update_time asc", + "-update_time": "b.update_time desc", + } +) + +// AddQuota add quota to the database. +func AddQuota(quota models.Quota) (int64, error) { + now := time.Now() + quota.CreationTime = now + quota.UpdateTime = now + return GetOrmer().Insert("a) +} + +// GetQuota returns quota by id. +func GetQuota(id int64) (*models.Quota, error) { + q := models.Quota{ID: id} + err := GetOrmer().Read(&q, "ID") + if err == orm.ErrNoRows { + return nil, nil + } + return &q, err +} + +// UpdateQuota update the quota. +func UpdateQuota(quota models.Quota) error { + quota.UpdateTime = time.Now() + _, err := GetOrmer().Update("a) + return err +} + +// Quota quota mode for api +type Quota struct { + ID int64 `orm:"pk;auto;column(id)" json:"id"` + Ref driver.RefObject `json:"ref"` + Reference string `orm:"column(reference)" json:"-"` + ReferenceID string `orm:"column(reference_id)" json:"-"` + Hard string `orm:"column(hard);type(jsonb)" json:"-"` + Used string `orm:"column(used);type(jsonb)" json:"-"` + CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"` + UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"` +} + +// MarshalJSON ... +func (q *Quota) MarshalJSON() ([]byte, error) { + hard, err := types.NewResourceList(q.Hard) + if err != nil { + return nil, err + } + + used, err := types.NewResourceList(q.Used) + if err != nil { + return nil, err + } + + type Alias Quota + return json.Marshal(&struct { + *Alias + Hard types.ResourceList `json:"hard"` + Used types.ResourceList `json:"used"` + }{ + Alias: (*Alias)(q), + Hard: hard, + Used: used, + }) +} + +// ListQuotas returns quotas by query. +func ListQuotas(query ...*models.QuotaQuery) ([]*Quota, error) { + condition, params := quotaQueryConditions(query...) + + sql := fmt.Sprintf(` +SELECT + a.id, + a.reference, + a.reference_id, + a.hard, + b.used, + b.creation_time, + b.update_time +FROM + quota AS a + JOIN quota_usage AS b ON a.id = b.id %s`, condition) + + orderBy := quotaOrderBy(query...) + if orderBy != "" { + sql += ` order by ` + orderBy + } + + if len(query) > 0 && query[0] != nil { + page, size := query[0].Page, query[0].Size + if size > 0 { + sql += ` limit ?` + params = append(params, size) + if page > 0 { + sql += ` offset ?` + params = append(params, size*(page-1)) + } + } + } + + var quotas []*Quota + if _, err := GetOrmer().Raw(sql, params).QueryRows("as); err != nil { + return nil, err + } + + for _, quota := range quotas { + d, ok := driver.Get(quota.Reference) + if !ok { + continue + } + + ref, err := d.Load(quota.ReferenceID) + if err != nil { + log.Warning(fmt.Sprintf("Load quota reference object (%s, %s) failed: %v", quota.Reference, quota.ReferenceID, err)) + continue + } + + quota.Ref = ref + } + + return quotas, nil +} + +// GetTotalOfQuotas returns total of quotas +func GetTotalOfQuotas(query ...*models.QuotaQuery) (int64, error) { + condition, params := quotaQueryConditions(query...) + sql := fmt.Sprintf("SELECT COUNT(1) FROM quota AS a JOIN quota_usage AS b ON a.id = b.id %s", condition) + + var count int64 + if err := GetOrmer().Raw(sql, params).QueryRow(&count); err != nil { + return 0, err + } + + return count, nil +} + +func quotaQueryConditions(query ...*models.QuotaQuery) (string, []interface{}) { + params := []interface{}{} + sql := "" + if len(query) == 0 || query[0] == nil { + return sql, params + } + + sql += `WHERE 1=1 ` + + q := query[0] + if q.ID != 0 { + sql += `AND a.id = ? ` + params = append(params, q.ID) + } + if q.Reference != "" { + sql += `AND a.reference = ? ` + params = append(params, q.Reference) + } + if q.ReferenceID != "" { + sql += `AND a.reference_id = ? ` + params = append(params, q.ReferenceID) + } + + if len(q.ReferenceIDs) != 0 { + sql += fmt.Sprintf(`AND a.reference_id IN (%s) `, ParamPlaceholderForIn(len(q.ReferenceIDs))) + params = append(params, q.ReferenceIDs) + } + + return sql, params +} + +func castQuantity(field string) string { + // cast -1 to max int64 when order by field + return fmt.Sprintf("CAST( (CASE WHEN (%[1]s) IS NULL THEN '0' WHEN (%[1]s) = '-1' THEN '9223372036854775807' ELSE (%[1]s) END) AS BIGINT )", field) +} + +func quotaOrderBy(query ...*models.QuotaQuery) string { + orderBy := "b.creation_time DESC" + + if len(query) > 0 && query[0] != nil && query[0].Sort != "" { + if val, ok := quotaOrderMap[query[0].Sort]; ok { + orderBy = val + } else { + sort := query[0].Sort + + order := "ASC" + if sort[0] == '-' { + order = "DESC" + sort = sort[1:] + } + + prefix := []string{"hard.", "used."} + for _, p := range prefix { + if strings.HasPrefix(sort, p) { + field := fmt.Sprintf("%s->>'%s'", strings.TrimSuffix(p, "."), strings.TrimPrefix(sort, p)) + orderBy = fmt.Sprintf("(%s) %s", castQuantity(field), order) + break + } + } + } + } + + return orderBy +} diff --git a/src/common/dao/quota_test.go b/src/common/dao/quota_test.go new file mode 100644 index 000000000..21daf10b9 --- /dev/null +++ b/src/common/dao/quota_test.go @@ -0,0 +1,143 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "testing" + "time" + + "github.com/goharbor/harbor/src/common/models" + "github.com/stretchr/testify/suite" +) + +var ( + quotaReference = "dao" + quotaUserReference = "user" + quotaHard = models.QuotaHard{"storage": 1024} + quotaHardLarger = models.QuotaHard{"storage": 2048} +) + +type QuotaDaoSuite struct { + suite.Suite +} + +func (suite *QuotaDaoSuite) equalHard(quota1 *models.Quota, quota2 *models.Quota) { + hard1, err := quota1.GetHard() + suite.Nil(err, "hard1 invalid") + + hard2, err := quota2.GetHard() + suite.Nil(err, "hard2 invalid") + + suite.Equal(hard1, hard2) +} + +func (suite *QuotaDaoSuite) TearDownTest() { + ClearTable("quota") + ClearTable("quota_usage") +} + +func (suite *QuotaDaoSuite) TestAddQuota() { + _, err1 := AddQuota(models.Quota{Reference: quotaReference, ReferenceID: "1", Hard: quotaHard.String()}) + suite.Nil(err1) + + // Will failed for reference and reference_id should unique in db + _, err2 := AddQuota(models.Quota{Reference: quotaReference, ReferenceID: "1", Hard: quotaHard.String()}) + suite.Error(err2) + + _, err3 := AddQuota(models.Quota{Reference: quotaUserReference, ReferenceID: "1", Hard: quotaHard.String()}) + suite.Nil(err3) +} + +func (suite *QuotaDaoSuite) TestGetQuota() { + quota1 := models.Quota{Reference: quotaReference, ReferenceID: "1", Hard: quotaHard.String()} + id, err := AddQuota(quota1) + suite.Nil(err) + + // Get the new added quota + quota2, err := GetQuota(id) + suite.Nil(err) + suite.NotNil(quota2) + + // Get the quota which id is 10000 not found + quota3, err := GetQuota(10000) + suite.Nil(err) + suite.Nil(quota3) +} + +func (suite *QuotaDaoSuite) TestUpdateQuota() { + quota1 := models.Quota{Reference: quotaReference, ReferenceID: "1", Hard: quotaHard.String()} + id, err := AddQuota(quota1) + suite.Nil(err) + + // Get the new added quota + quota2, err := GetQuota(id) + suite.Nil(err) + suite.equalHard("a1, quota2) + + // Update the quota + quota2.SetHard(quotaHardLarger) + time.Sleep(time.Millisecond * 10) // Ensure that UpdateTime changed + suite.Nil(UpdateQuota(*quota2)) + + // Get the updated quota + quota3, err := GetQuota(id) + suite.Nil(err) + suite.equalHard(quota2, quota3) + suite.NotEqual(quota2.UpdateTime, quota3.UpdateTime) +} + +func (suite *QuotaDaoSuite) TestListQuotas() { + id1, _ := AddQuota(models.Quota{Reference: quotaReference, ReferenceID: "1", Hard: quotaHard.String()}) + AddQuotaUsage(models.QuotaUsage{ID: id1, Reference: quotaReference, ReferenceID: "1", Used: "{}"}) + + id2, _ := AddQuota(models.Quota{Reference: quotaReference, ReferenceID: "2", Hard: quotaHard.String()}) + AddQuotaUsage(models.QuotaUsage{ID: id2, Reference: quotaReference, ReferenceID: "2", Used: "{}"}) + + id3, _ := AddQuota(models.Quota{Reference: quotaUserReference, ReferenceID: "1", Hard: quotaHardLarger.String()}) + AddQuotaUsage(models.QuotaUsage{ID: id3, Reference: quotaUserReference, ReferenceID: "1", Used: "{}"}) + + id4, _ := AddQuota(models.Quota{Reference: quotaReference, ReferenceID: "3", Hard: quotaHard.String()}) + AddQuotaUsage(models.QuotaUsage{ID: id4, Reference: quotaReference, ReferenceID: "3", Used: "{}"}) + + // List all the quotas + quotas, err := ListQuotas() + suite.Nil(err) + suite.Equal(4, len(quotas)) + suite.Equal(quotaReference, quotas[0].Reference) + + // List quotas filter by reference + quotas, err = ListQuotas(&models.QuotaQuery{Reference: quotaReference}) + suite.Nil(err) + suite.Equal(3, len(quotas)) + + // List quotas filter by reference ids + quotas, err = ListQuotas(&models.QuotaQuery{Reference: quotaReference, ReferenceIDs: []string{"1", "2"}}) + suite.Nil(err) + suite.Equal(2, len(quotas)) + + // List quotas by pagination + quotas, err = ListQuotas(&models.QuotaQuery{Pagination: models.Pagination{Size: 2}}) + suite.Nil(err) + suite.Equal(2, len(quotas)) + + // List quotas by sorting + quotas, err = ListQuotas(&models.QuotaQuery{Sorting: models.Sorting{Sort: "-hard.storage"}}) + suite.Nil(err) + suite.Equal(quotaUserReference, quotas[0].Reference) +} + +func TestRunQuotaDaoSuite(t *testing.T) { + suite.Run(t, new(QuotaDaoSuite)) +} diff --git a/src/common/dao/quota_usage.go b/src/common/dao/quota_usage.go new file mode 100644 index 000000000..d8b55db9b --- /dev/null +++ b/src/common/dao/quota_usage.go @@ -0,0 +1,144 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "fmt" + "strings" + "time" + + "github.com/astaxie/beego/orm" + "github.com/goharbor/harbor/src/common/models" +) + +var ( + quotaUsageOrderMap = map[string]string{ + "id": "id asc", + "+id": "id asc", + "-id": "id desc", + "creation_time": "creation_time asc", + "+creation_time": "creation_time asc", + "-creation_time": "creation_time desc", + "update_time": "update_time asc", + "+update_time": "update_time asc", + "-update_time": "update_time desc", + } +) + +// AddQuotaUsage add quota usage to the database. +func AddQuotaUsage(quotaUsage models.QuotaUsage) (int64, error) { + now := time.Now() + quotaUsage.CreationTime = now + quotaUsage.UpdateTime = now + return GetOrmer().Insert("aUsage) +} + +// GetQuotaUsage returns quota usage by id. +func GetQuotaUsage(id int64) (*models.QuotaUsage, error) { + q := models.QuotaUsage{ID: id} + err := GetOrmer().Read(&q, "ID") + if err == orm.ErrNoRows { + return nil, nil + } + return &q, err +} + +// UpdateQuotaUsage update the quota usage. +func UpdateQuotaUsage(quotaUsage models.QuotaUsage) error { + quotaUsage.UpdateTime = time.Now() + _, err := GetOrmer().Update("aUsage) + return err +} + +// ListQuotaUsages returns quota usages by query. +func ListQuotaUsages(query ...*models.QuotaUsageQuery) ([]*models.QuotaUsage, error) { + condition, params := quotaUsageQueryConditions(query...) + sql := fmt.Sprintf(`select * %s`, condition) + + orderBy := quotaUsageOrderBy(query...) + if orderBy != "" { + sql += ` order by ` + orderBy + } + + if len(query) > 0 && query[0] != nil { + page, size := query[0].Page, query[0].Size + if size > 0 { + sql += ` limit ?` + params = append(params, size) + if page > 0 { + sql += ` offset ?` + params = append(params, size*(page-1)) + } + } + } + + var quotaUsages []*models.QuotaUsage + if _, err := GetOrmer().Raw(sql, params).QueryRows("aUsages); err != nil { + return nil, err + } + + return quotaUsages, nil +} + +func quotaUsageQueryConditions(query ...*models.QuotaUsageQuery) (string, []interface{}) { + params := []interface{}{} + sql := `from quota_usage ` + if len(query) == 0 || query[0] == nil { + return sql, params + } + + sql += `where 1=1 ` + + q := query[0] + if q.Reference != "" { + sql += `and reference = ? ` + params = append(params, q.Reference) + } + if q.ReferenceID != "" { + sql += `and reference_id = ? ` + params = append(params, q.ReferenceID) + } + if len(q.ReferenceIDs) != 0 { + sql += fmt.Sprintf(`and reference_id in (%s) `, ParamPlaceholderForIn(len(q.ReferenceIDs))) + params = append(params, q.ReferenceIDs) + } + + return sql, params +} + +func quotaUsageOrderBy(query ...*models.QuotaUsageQuery) string { + orderBy := "" + + if len(query) > 0 && query[0] != nil && query[0].Sort != "" { + if val, ok := quotaUsageOrderMap[query[0].Sort]; ok { + orderBy = val + } else { + sort := query[0].Sort + + order := "asc" + if sort[0] == '-' { + order = "desc" + sort = sort[1:] + } + + prefix := "used." + if strings.HasPrefix(sort, prefix) { + orderBy = fmt.Sprintf("used->>'%s' %s", strings.TrimPrefix(sort, prefix), order) + } + } + } + + return orderBy +} diff --git a/src/common/dao/quota_usage_test.go b/src/common/dao/quota_usage_test.go new file mode 100644 index 000000000..40ff14124 --- /dev/null +++ b/src/common/dao/quota_usage_test.go @@ -0,0 +1,135 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "testing" + "time" + + "github.com/goharbor/harbor/src/common/models" + "github.com/stretchr/testify/suite" +) + +var ( + quotaUsageReference = "project" + quotaUsageUserReference = "user" + quotaUsageUsed = models.QuotaUsed{"storage": 1024} + quotaUsageUsedLarger = models.QuotaUsed{"storage": 2048} +) + +type QuotaUsageDaoSuite struct { + suite.Suite +} + +func (suite *QuotaUsageDaoSuite) equalUsed(usage1 *models.QuotaUsage, usage2 *models.QuotaUsage) { + used1, err := usage1.GetUsed() + suite.Nil(err, "used1 invalid") + + used2, err := usage2.GetUsed() + suite.Nil(err, "used2 invalid") + + suite.Equal(used1, used2) +} + +func (suite *QuotaUsageDaoSuite) TearDownTest() { + ClearTable("quota_usage") +} + +func (suite *QuotaUsageDaoSuite) TestAddQuotaUsage() { + _, err1 := AddQuotaUsage(models.QuotaUsage{Reference: quotaUsageReference, ReferenceID: "1", Used: quotaUsageUsed.String()}) + suite.Nil(err1) + + // Will failed for reference and reference_id should unique in db + _, err2 := AddQuotaUsage(models.QuotaUsage{Reference: quotaUsageReference, ReferenceID: "1", Used: quotaUsageUsed.String()}) + suite.Error(err2) + + _, err3 := AddQuotaUsage(models.QuotaUsage{Reference: quotaUsageUserReference, ReferenceID: "1", Used: quotaUsageUsed.String()}) + suite.Nil(err3) +} + +func (suite *QuotaUsageDaoSuite) TestGetQuotaUsage() { + quotaUsage1 := models.QuotaUsage{Reference: quotaUsageReference, ReferenceID: "1", Used: quotaUsageUsed.String()} + id, err := AddQuotaUsage(quotaUsage1) + suite.Nil(err) + + // Get the new added quotaUsage + quotaUsage2, err := GetQuotaUsage(id) + suite.Nil(err) + suite.NotNil(quotaUsage2) + + // Get the quotaUsage which id is 10000 not found + quotaUsage3, err := GetQuotaUsage(10000) + suite.Nil(err) + suite.Nil(quotaUsage3) +} + +func (suite *QuotaUsageDaoSuite) TestUpdateQuotaUsage() { + quotaUsage1 := models.QuotaUsage{Reference: quotaUsageReference, ReferenceID: "1", Used: quotaUsageUsed.String()} + id, err := AddQuotaUsage(quotaUsage1) + suite.Nil(err) + + // Get the new added quotaUsage + quotaUsage2, err := GetQuotaUsage(id) + suite.Nil(err) + suite.equalUsed("aUsage1, quotaUsage2) + + // Update the quotaUsage + quotaUsage2.SetUsed(quotaUsageUsedLarger) + time.Sleep(time.Millisecond * 10) // Ensure that UpdateTime changed + suite.Nil(UpdateQuotaUsage(*quotaUsage2)) + + // Get the updated quotaUsage + quotaUsage3, err := GetQuotaUsage(id) + suite.Nil(err) + suite.equalUsed(quotaUsage2, quotaUsage3) + suite.NotEqual(quotaUsage2.UpdateTime, quotaUsage3.UpdateTime) +} + +func (suite *QuotaUsageDaoSuite) TestListQuotaUsages() { + AddQuotaUsage(models.QuotaUsage{Reference: quotaUsageReference, ReferenceID: "1", Used: quotaUsageUsed.String()}) + AddQuotaUsage(models.QuotaUsage{Reference: quotaUsageReference, ReferenceID: "2", Used: quotaUsageUsed.String()}) + AddQuotaUsage(models.QuotaUsage{Reference: quotaUsageReference, ReferenceID: "3", Used: quotaUsageUsed.String()}) + AddQuotaUsage(models.QuotaUsage{Reference: quotaUsageUserReference, ReferenceID: "1", Used: quotaUsageUsedLarger.String()}) + + // List all the quotaUsages + quotaUsages, err := ListQuotaUsages() + suite.Nil(err) + suite.Equal(4, len(quotaUsages)) + suite.Equal(quotaUsageReference, quotaUsages[0].Reference) + + // List quotaUsages filter by reference + quotaUsages, err = ListQuotaUsages(&models.QuotaUsageQuery{Reference: quotaUsageReference}) + suite.Nil(err) + suite.Equal(3, len(quotaUsages)) + + // List quotaUsages filter by reference ids + quotaUsages, err = ListQuotaUsages(&models.QuotaUsageQuery{Reference: quotaUsageReference, ReferenceIDs: []string{"1", "2"}}) + suite.Nil(err) + suite.Equal(2, len(quotaUsages)) + + // List quotaUsages by pagination + quotaUsages, err = ListQuotaUsages(&models.QuotaUsageQuery{Pagination: models.Pagination{Size: 2}}) + suite.Nil(err) + suite.Equal(2, len(quotaUsages)) + + // List quotaUsages by sorting + quotaUsages, err = ListQuotaUsages(&models.QuotaUsageQuery{Sorting: models.Sorting{Sort: "-used.storage"}}) + suite.Nil(err) + suite.Equal(quotaUsageUserReference, quotaUsages[0].Reference) +} + +func TestRunQuotaUsageDaoSuite(t *testing.T) { + suite.Run(t, new(QuotaUsageDaoSuite)) +} diff --git a/src/common/dao/repository.go b/src/common/dao/repository.go index c05a46899..abb859525 100644 --- a/src/common/dao/repository.go +++ b/src/common/dao/repository.go @@ -178,7 +178,7 @@ func repositoryQueryConditions(query ...*models.RepositoryQuery) (string, []inte if len(q.ProjectIDs) > 0 { sql += fmt.Sprintf(`and r.project_id in ( %s ) `, - paramPlaceholder(len(q.ProjectIDs))) + ParamPlaceholderForIn(len(q.ProjectIDs))) params = append(params, q.ProjectIDs) } diff --git a/src/common/dao/scan_job.go b/src/common/dao/scan_job.go index 6aa151bc7..fe4aa6ab9 100644 --- a/src/common/dao/scan_job.go +++ b/src/common/dao/scan_job.go @@ -15,12 +15,11 @@ package dao import ( + "encoding/json" + "fmt" "github.com/astaxie/beego/orm" "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/common/utils/log" - - "encoding/json" - "fmt" "time" ) diff --git a/src/common/dao/testutils.go b/src/common/dao/testutils.go index 95d6c3ab7..910d5af72 100644 --- a/src/common/dao/testutils.go +++ b/src/common/dao/testutils.go @@ -120,6 +120,19 @@ func PrepareTestData(clearSqls []string, initSqls []string) { } } +// ExecuteBatchSQL ... +func ExecuteBatchSQL(sqls []string) { + o := GetOrmer() + + for _, sql := range sqls { + fmt.Printf("Exec sql:%v\n", sql) + _, err := o.Raw(sql).Exec() + if err != nil { + fmt.Printf("failed to execute batch sql, sql:%v, error: %v", sql, err) + } + } +} + // ArrayEqual ... func ArrayEqual(arrayA, arrayB []int) bool { if len(arrayA) != len(arrayB) { diff --git a/src/common/dao/user.go b/src/common/dao/user.go index 9349c3477..04e79d066 100644 --- a/src/common/dao/user.go +++ b/src/common/dao/user.go @@ -234,6 +234,14 @@ func OnBoardUser(u *models.User) error { } if created { u.UserID = int(id) + // current orm framework doesn't support to fetch a pointer or sql.NullString with QueryRow + // https://github.com/astaxie/beego/issues/3767 + if len(u.Email) == 0 { + _, err = o.Raw("update harbor_user set email = null where user_id = ? ", id).Exec() + if err != nil { + return err + } + } } else { existing, err := GetUser(*u) if err != nil { diff --git a/src/common/dao/user_test.go b/src/common/dao/user_test.go index ff48b27ec..2b3029c17 100644 --- a/src/common/dao/user_test.go +++ b/src/common/dao/user_test.go @@ -90,3 +90,23 @@ func TestOnBoardUser(t *testing.T) { assert.True(u.UserID == id) CleanUser(int64(id)) } +func TestOnBoardUser_EmptyEmail(t *testing.T) { + assert := assert.New(t) + u := &models.User{ + Username: "empty_email", + Password: "password1", + Realname: "empty_email", + } + err := OnBoardUser(u) + assert.Nil(err) + id := u.UserID + assert.True(id > 0) + err = OnBoardUser(u) + assert.Nil(err) + assert.True(u.UserID == id) + assert.Equal("", u.Email) + + user, err := GetUser(models.User{Username: "empty_email"}) + assert.Equal("", user.Email) + CleanUser(int64(id)) +} diff --git a/src/common/dao/utils.go b/src/common/dao/utils.go new file mode 100644 index 000000000..489f43e45 --- /dev/null +++ b/src/common/dao/utils.go @@ -0,0 +1,11 @@ +package dao + +import ( + "fmt" + "strings" +) + +// JoinNumberConditions - To join number condition into string,used in sql query +func JoinNumberConditions(ids []int) string { + return strings.Trim(strings.Replace(fmt.Sprint(ids), " ", ",", -1), "[]") +} diff --git a/src/common/dao/utils_test.go b/src/common/dao/utils_test.go new file mode 100644 index 000000000..78f2f4a3a --- /dev/null +++ b/src/common/dao/utils_test.go @@ -0,0 +1,24 @@ +package dao + +import "testing" + +func TestJoinNumberConditions(t *testing.T) { + type args struct { + ids []int + } + tests := []struct { + name string + args args + want string + }{ + {name: "normal test", args: args{[]int{1, 2, 3}}, want: "1,2,3"}, + {name: "dummy test", args: args{[]int{}}, want: ""}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := JoinNumberConditions(tt.args.ids); got != tt.want { + t.Errorf("JoinNumberConditions() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/src/common/http/client.go b/src/common/http/client.go index 533212dc0..7699e33f2 100644 --- a/src/common/http/client.go +++ b/src/common/http/client.go @@ -16,6 +16,7 @@ package http import ( "bytes" + "crypto/tls" "encoding/json" "errors" "io" @@ -35,6 +36,36 @@ type Client struct { client *http.Client } +var defaultHTTPTransport, secureHTTPTransport, insecureHTTPTransport *http.Transport + +func init() { + defaultHTTPTransport = &http.Transport{} + + secureHTTPTransport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: false, + }, + } + insecureHTTPTransport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + } +} + +// GetHTTPTransport returns HttpTransport based on insecure configuration +func GetHTTPTransport(insecure ...bool) *http.Transport { + if len(insecure) == 0 { + return defaultHTTPTransport + } + if insecure[0] { + return insecureHTTPTransport + } + return secureHTTPTransport +} + // NewClient creates an instance of Client. // Use net/http.Client as the default value if c is nil. // Modifiers modify the request before sending it. diff --git a/src/common/http/client_test.go b/src/common/http/client_test.go new file mode 100644 index 000000000..09f576c97 --- /dev/null +++ b/src/common/http/client_test.go @@ -0,0 +1,14 @@ +package http + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetHTTPTransport(t *testing.T) { + transport := GetHTTPTransport(true) + assert.True(t, transport.TLSClientConfig.InsecureSkipVerify) + transport = GetHTTPTransport(false) + assert.False(t, transport.TLSClientConfig.InsecureSkipVerify) +} diff --git a/src/common/job/client.go b/src/common/job/client.go index 51ce18301..01f3c18e2 100644 --- a/src/common/job/client.go +++ b/src/common/job/client.go @@ -11,9 +11,16 @@ import ( commonhttp "github.com/goharbor/harbor/src/common/http" "github.com/goharbor/harbor/src/common/http/modifier/auth" "github.com/goharbor/harbor/src/common/job/models" + "github.com/goharbor/harbor/src/core/config" "github.com/goharbor/harbor/src/jobservice/job" ) +var ( + // GlobalClient is an instance of the default client that can be used globally + // Notes: the client needs to be initialized before can be used + GlobalClient Client +) + // Client wraps interface to access jobservice. type Client interface { SubmitJob(*models.JobData) (string, error) @@ -29,6 +36,11 @@ type DefaultClient struct { client *commonhttp.Client } +// Init the GlobalClient +func Init() { + GlobalClient = NewDefaultClient(config.InternalJobServiceURL(), config.CoreSecret()) +} + // NewDefaultClient creates a default client based on endpoint and secret. func NewDefaultClient(endpoint, secret string) *DefaultClient { var c *commonhttp.Client diff --git a/src/common/models/artifact.go b/src/common/models/artifact.go new file mode 100644 index 000000000..fa6760702 --- /dev/null +++ b/src/common/models/artifact.go @@ -0,0 +1,32 @@ +package models + +import ( + "time" +) + +// Artifact holds the details of a artifact. +type Artifact struct { + ID int64 `orm:"pk;auto;column(id)" json:"id"` + PID int64 `orm:"column(project_id)" json:"project_id"` + Repo string `orm:"column(repo)" json:"repo"` + Tag string `orm:"column(tag)" json:"tag"` + Digest string `orm:"column(digest)" json:"digest"` + Kind string `orm:"column(kind)" json:"kind"` + PushTime time.Time `orm:"column(push_time)" json:"push_time"` + PullTime time.Time `orm:"column(pull_time)" json:"pull_time"` + CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"` +} + +// TableName ... +func (af *Artifact) TableName() string { + return "artifact" +} + +// ArtifactQuery ... +type ArtifactQuery struct { + PID int64 + Repo string + Tag string + Digest string + Pagination +} diff --git a/src/common/models/artifact_blob.go b/src/common/models/artifact_blob.go new file mode 100644 index 000000000..a402306ee --- /dev/null +++ b/src/common/models/artifact_blob.go @@ -0,0 +1,18 @@ +package models + +import ( + "time" +) + +// ArtifactAndBlob holds the relationship between manifest and blob. +type ArtifactAndBlob struct { + ID int64 `orm:"pk;auto;column(id)" json:"id"` + DigestAF string `orm:"column(digest_af)" json:"digest_af"` + DigestBlob string `orm:"column(digest_blob)" json:"digest_blob"` + CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"` +} + +// TableName ... +func (afb *ArtifactAndBlob) TableName() string { + return "artifact_blob" +} diff --git a/src/common/models/base.go b/src/common/models/base.go index be8877cb8..de04d0285 100644 --- a/src/common/models/base.go +++ b/src/common/models/base.go @@ -36,5 +36,15 @@ func init() { new(AdminJob), new(JobLog), new(Robot), - new(OIDCUser)) + new(OIDCUser), + new(NotificationPolicy), + new(NotificationJob), + new(Blob), + new(ProjectBlob), + new(Artifact), + new(ArtifactAndBlob), + new(CVEWhitelist), + new(Quota), + new(QuotaUsage), + ) } diff --git a/src/common/models/blob.go b/src/common/models/blob.go new file mode 100644 index 000000000..71a3c9b67 --- /dev/null +++ b/src/common/models/blob.go @@ -0,0 +1,19 @@ +package models + +import ( + "time" +) + +// Blob holds the details of a blob. +type Blob struct { + ID int64 `orm:"pk;auto;column(id)" json:"id"` + Digest string `orm:"column(digest)" json:"digest"` + ContentType string `orm:"column(content_type)" json:"content_type"` + Size int64 `orm:"column(size)" json:"size"` + CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"` +} + +// TableName ... +func (b *Blob) TableName() string { + return "blob" +} diff --git a/src/common/models/config.go b/src/common/models/config.go index cbcb3f810..dfd13d4bb 100644 --- a/src/common/models/config.go +++ b/src/common/models/config.go @@ -45,12 +45,14 @@ type SQLite struct { // PostGreSQL ... type PostGreSQL struct { - Host string `json:"host"` - Port int `json:"port"` - Username string `json:"username"` - Password string `json:"password,omitempty"` - Database string `json:"database"` - SSLMode string `json:"sslmode"` + Host string `json:"host"` + Port int `json:"port"` + Username string `json:"username"` + Password string `json:"password,omitempty"` + Database string `json:"database"` + SSLMode string `json:"sslmode"` + MaxIdleConns int `json:"max_idle_conns"` + MaxOpenConns int `json:"max_open_conns"` } // Email ... @@ -70,7 +72,7 @@ type HTTPAuthProxy struct { Endpoint string `json:"endpoint"` TokenReviewEndpoint string `json:"tokenreivew_endpoint"` VerifyCert bool `json:"verify_cert"` - AlwaysOnBoard bool `json:"always_onboard"` + SkipSearch bool `json:"skip_search"` } // OIDCSetting wraps the settings for OIDC auth endpoint @@ -84,6 +86,12 @@ type OIDCSetting struct { Scope []string `json:"scope"` } +// QuotaSetting wraps the settings for Quota +type QuotaSetting struct { + CountPerProject int64 `json:"count_per_project"` + StoragePerProject int64 `json:"storage_per_project"` +} + // ConfigEntry ... type ConfigEntry struct { ID int64 `orm:"pk;auto;column(id)" json:"-"` diff --git a/src/common/models/cve_whitelist.go b/src/common/models/cve_whitelist.go new file mode 100644 index 000000000..90badb372 --- /dev/null +++ b/src/common/models/cve_whitelist.go @@ -0,0 +1,55 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package models + +import "time" + +// CVEWhitelist defines the data model for a CVE whitelist +type CVEWhitelist struct { + ID int64 `orm:"pk;auto;column(id)" json:"id"` + ProjectID int64 `orm:"column(project_id)" json:"project_id"` + ExpiresAt *int64 `orm:"column(expires_at)" json:"expires_at,omitempty"` + Items []CVEWhitelistItem `orm:"-" json:"items"` + ItemsText string `orm:"column(items)" json:"-"` + CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"` + UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"` +} + +// CVEWhitelistItem defines one item in the CVE whitelist +type CVEWhitelistItem struct { + CVEID string `json:"cve_id"` +} + +// TableName ... +func (c *CVEWhitelist) TableName() string { + return "cve_whitelist" +} + +// CVESet returns the set of CVE id of the items in the whitelist to help filter the vulnerability list +func (c *CVEWhitelist) CVESet() map[string]struct{} { + r := map[string]struct{}{} + for _, it := range c.Items { + r[it.CVEID] = struct{}{} + } + return r +} + +// IsExpired returns whether the whitelist is expired +func (c *CVEWhitelist) IsExpired() bool { + if c.ExpiresAt == nil { + return false + } + return time.Now().Unix() >= *c.ExpiresAt +} diff --git a/src/common/models/cve_whitelist_test.go b/src/common/models/cve_whitelist_test.go new file mode 100644 index 000000000..cb47e7021 --- /dev/null +++ b/src/common/models/cve_whitelist_test.go @@ -0,0 +1,72 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package models + +import ( + "github.com/stretchr/testify/assert" + "reflect" + "testing" + "time" +) + +func TestCVEWhitelist_All(t *testing.T) { + future := int64(4411494000) + now := time.Now().Unix() + cases := []struct { + input CVEWhitelist + cveset map[string]struct{} + expired bool + }{ + { + input: CVEWhitelist{ + ID: 1, + ProjectID: 0, + Items: []CVEWhitelistItem{}, + }, + cveset: map[string]struct{}{}, + expired: false, + }, + { + input: CVEWhitelist{ + ID: 1, + ProjectID: 0, + Items: []CVEWhitelistItem{}, + ExpiresAt: &now, + }, + cveset: map[string]struct{}{}, + expired: true, + }, + { + input: CVEWhitelist{ + ID: 2, + ProjectID: 3, + Items: []CVEWhitelistItem{ + {CVEID: "CVE-1999-0067"}, + {CVEID: "CVE-2016-7654321"}, + }, + ExpiresAt: &future, + }, + cveset: map[string]struct{}{ + "CVE-1999-0067": {}, + "CVE-2016-7654321": {}, + }, + expired: false, + }, + } + for _, c := range cases { + assert.Equal(t, c.expired, c.input.IsExpired()) + assert.True(t, reflect.DeepEqual(c.cveset, c.input.CVESet())) + } +} diff --git a/src/common/models/hook_notification.go b/src/common/models/hook_notification.go new file mode 100755 index 000000000..60c667afd --- /dev/null +++ b/src/common/models/hook_notification.go @@ -0,0 +1,111 @@ +package models + +import ( + "encoding/json" + "time" +) + +const ( + // NotificationPolicyTable is table name for notification policies + NotificationPolicyTable = "notification_policy" + // NotificationJobTable is table name for notification job + NotificationJobTable = "notification_job" +) + +// NotificationPolicy is the model for a notification policy. +type NotificationPolicy struct { + ID int64 `orm:"pk;auto;column(id)" json:"id"` + Name string `orm:"column(name)" json:"name"` + Description string `orm:"column(description)" json:"description"` + ProjectID int64 `orm:"column(project_id)" json:"project_id"` + TargetsDB string `orm:"column(targets)" json:"-"` + Targets []EventTarget `orm:"-" json:"targets"` + EventTypesDB string `orm:"column(event_types)" json:"-"` + EventTypes []string `orm:"-" json:"event_types"` + Creator string `orm:"column(creator)" json:"creator"` + CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"` + UpdateTime time.Time `orm:"column(update_time);auto_now_add" json:"update_time"` + Enabled bool `orm:"column(enabled)" json:"enabled"` +} + +// TableName set table name for ORM. +func (w *NotificationPolicy) TableName() string { + return NotificationPolicyTable +} + +// ConvertToDBModel convert struct data in notification policy to DB model data +func (w *NotificationPolicy) ConvertToDBModel() error { + if len(w.Targets) != 0 { + targets, err := json.Marshal(w.Targets) + if err != nil { + return err + } + w.TargetsDB = string(targets) + } + if len(w.EventTypes) != 0 { + eventTypes, err := json.Marshal(w.EventTypes) + if err != nil { + return err + } + w.EventTypesDB = string(eventTypes) + } + + return nil +} + +// ConvertFromDBModel convert from DB model data to struct data +func (w *NotificationPolicy) ConvertFromDBModel() error { + targets := []EventTarget{} + if len(w.TargetsDB) != 0 { + err := json.Unmarshal([]byte(w.TargetsDB), &targets) + if err != nil { + return err + } + } + w.Targets = targets + + types := []string{} + if len(w.EventTypesDB) != 0 { + err := json.Unmarshal([]byte(w.EventTypesDB), &types) + if err != nil { + return err + } + } + w.EventTypes = types + + return nil +} + +// NotificationJob is the model for a notification job +type NotificationJob struct { + ID int64 `orm:"pk;auto;column(id)" json:"id"` + PolicyID int64 `orm:"column(policy_id)" json:"policy_id"` + EventType string `orm:"column(event_type)" json:"event_type"` + NotifyType string `orm:"column(notify_type)" json:"notify_type"` + Status string `orm:"column(status)" json:"status"` + JobDetail string `orm:"column(job_detail)" json:"job_detail"` + UUID string `orm:"column(job_uuid)" json:"-"` + CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"` + UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"` +} + +// TableName set table name for ORM. +func (w *NotificationJob) TableName() string { + return NotificationJobTable +} + +// NotificationJobQuery holds query conditions for notification job +type NotificationJobQuery struct { + PolicyID int64 + Statuses []string + EventTypes []string + Pagination +} + +// EventTarget defines the structure of target a notification send to +type EventTarget struct { + Type string `json:"type"` + Address string `json:"address"` + AuthHeader string `json:"auth_header,omitempty"` + SkipCertVerify bool `json:"skip_cert_verify"` +} diff --git a/src/common/models/hook_notification_test.go b/src/common/models/hook_notification_test.go new file mode 100644 index 000000000..31c18c8b6 --- /dev/null +++ b/src/common/models/hook_notification_test.go @@ -0,0 +1,114 @@ +package models + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNotificationPolicy_ConvertFromDBModel(t *testing.T) { + tests := []struct { + name string + policy *NotificationPolicy + want *NotificationPolicy + wantErr bool + }{ + { + name: "ConvertFromDBModel want error 1", + policy: &NotificationPolicy{ + TargetsDB: "[{{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\"}]", + }, + wantErr: true, + }, + { + name: "ConvertFromDBModel want error 2", + policy: &NotificationPolicy{ + EventTypesDB: "[{\"pushImage\",\"pullImage\",\"deleteImage\"]", + }, + wantErr: true, + }, + { + name: "ConvertFromDBModel 1", + policy: &NotificationPolicy{ + TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\"}]", + EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\"]", + }, + want: &NotificationPolicy{ + Targets: []EventTarget{ + { + Type: "http", + Address: "http://10.173.32.58:9009", + }, + }, + EventTypes: []string{"pushImage", "pullImage", "deleteImage"}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.policy.ConvertFromDBModel() + if tt.wantErr { + require.NotNil(t, err, "wantErr: %s", err) + return + } + require.Nil(t, err) + assert.Equal(t, tt.want.Targets, tt.policy.Targets) + assert.Equal(t, tt.want.EventTypes, tt.policy.EventTypes) + }) + } +} + +func TestNotificationPolicy_ConvertToDBModel(t *testing.T) { + tests := []struct { + name string + policy *NotificationPolicy + want *NotificationPolicy + wantErr bool + }{ + { + name: "ConvertToDBModel 1", + policy: &NotificationPolicy{ + Targets: []EventTarget{ + { + Type: "http", + Address: "http://127.0.0.1", + SkipCertVerify: false, + }, + }, + EventTypes: []string{"pushImage", "pullImage", "deleteImage"}, + }, + want: &NotificationPolicy{ + TargetsDB: "[{\"type\":\"http\",\"address\":\"http://127.0.0.1\",\"skip_cert_verify\":false}]", + EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\"]", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.policy.ConvertToDBModel() + if tt.wantErr { + require.NotNil(t, err, "wantErr: %s", err) + return + } + require.Nil(t, err) + assert.Equal(t, tt.want.TargetsDB, tt.policy.TargetsDB) + assert.Equal(t, tt.want.EventTypesDB, tt.policy.EventTypesDB) + }) + } +} + +func TestNotificationJob_TableName(t *testing.T) { + job := &NotificationJob{} + got := job.TableName() + assert.Equal(t, NotificationJobTable, got) +} + +func TestNotificationPolicy_TableName(t *testing.T) { + policy := &NotificationPolicy{} + got := policy.TableName() + assert.Equal(t, NotificationPolicyTable, got) + +} diff --git a/src/common/models/pro_meta.go b/src/common/models/pro_meta.go index 97427ac6a..d9952714c 100644 --- a/src/common/models/pro_meta.go +++ b/src/common/models/pro_meta.go @@ -20,16 +20,17 @@ import ( // keys of project metadata and severity values const ( - ProMetaPublic = "public" - ProMetaEnableContentTrust = "enable_content_trust" - ProMetaPreventVul = "prevent_vul" // prevent vulnerable images from being pulled - ProMetaSeverity = "severity" - ProMetaAutoScan = "auto_scan" - SeverityNone = "negligible" - SeverityLow = "low" - SeverityMedium = "medium" - SeverityHigh = "high" - SeverityCritical = "critical" + ProMetaPublic = "public" + ProMetaEnableContentTrust = "enable_content_trust" + ProMetaPreventVul = "prevent_vul" // prevent vulnerable images from being pulled + ProMetaSeverity = "severity" + ProMetaAutoScan = "auto_scan" + ProMetaReuseSysCVEWhitelist = "reuse_sys_cve_whitelist" + SeverityNone = "negligible" + SeverityLow = "low" + SeverityMedium = "medium" + SeverityHigh = "high" + SeverityCritical = "critical" ) // ProjectMetadata holds the metadata of a project. diff --git a/src/common/models/project.go b/src/common/models/project.go index bebadcdd1..1b56284a3 100644 --- a/src/common/models/project.go +++ b/src/common/models/project.go @@ -17,10 +17,18 @@ package models import ( "strings" "time" + + "github.com/goharbor/harbor/src/pkg/types" ) -// ProjectTable is the table name for project -const ProjectTable = "project" +const ( + // ProjectTable is the table name for project + ProjectTable = "project" + // ProjectPublic means project is public + ProjectPublic = "public" + // ProjectPrivate means project is private + ProjectPrivate = "private" +) // Project holds the details of a project. type Project struct { @@ -36,6 +44,7 @@ type Project struct { RepoCount int64 `orm:"-" json:"repo_count"` ChartCount uint64 `orm:"-" json:"chart_count"` Metadata map[string]string `orm:"-" json:"metadata"` + CVEWhitelist CVEWhitelist `orm:"-" json:"cve_whitelist"` } // GetMetadata ... @@ -83,6 +92,15 @@ func (p *Project) VulPrevented() bool { return isTrue(prevent) } +// ReuseSysCVEWhitelist ... +func (p *Project) ReuseSysCVEWhitelist() bool { + r, ok := p.GetMetadata(ProMetaReuseSysCVEWhitelist) + if !ok { + return true + } + return isTrue(r) +} + // Severity ... func (p *Project) Severity() string { severity, exist := p.GetMetadata(ProMetaSeverity) @@ -128,9 +146,9 @@ type ProjectQueryParam struct { // MemberQuery filter by member's username and role type MemberQuery struct { - Name string // the username of member - Role int // the role of the member has to the project - GroupList []*UserGroup // the group list of current user + Name string // the username of member + Role int // the role of the member has to the project + GroupIDs []int // the group ID of current user belongs to } // Pagination ... @@ -154,9 +172,13 @@ type BaseProjectCollection struct { // ProjectRequest holds informations that need for creating project API type ProjectRequest struct { - Name string `json:"project_name"` - Public *int `json:"public"` // deprecated, reserved for project creation in replication - Metadata map[string]string `json:"metadata"` + Name string `json:"project_name"` + Public *int `json:"public"` // deprecated, reserved for project creation in replication + Metadata map[string]string `json:"metadata"` + CVEWhitelist CVEWhitelist `json:"cve_whitelist"` + + CountLimit *int64 `json:"count_limit,omitempty"` + StorageLimit *int64 `json:"storage_limit,omitempty"` } // ProjectQueryResult ... @@ -169,3 +191,19 @@ type ProjectQueryResult struct { func (p *Project) TableName() string { return ProjectTable } + +// ProjectSummary ... +type ProjectSummary struct { + RepoCount int64 `json:"repo_count"` + ChartCount uint64 `json:"chart_count"` + + ProjectAdminCount int64 `json:"project_admin_count"` + MasterCount int64 `json:"master_count"` + DeveloperCount int64 `json:"developer_count"` + GuestCount int64 `json:"guest_count"` + + Quota struct { + Hard types.ResourceList `json:"hard"` + Used types.ResourceList `json:"used"` + } `json:"quota"` +} diff --git a/src/common/models/project_blob.go b/src/common/models/project_blob.go new file mode 100644 index 000000000..119dadbc0 --- /dev/null +++ b/src/common/models/project_blob.go @@ -0,0 +1,32 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package models + +import ( + "time" +) + +// ProjectBlob holds the relationship between manifest and blob. +type ProjectBlob struct { + ID int64 `orm:"pk;auto;column(id)" json:"id"` + ProjectID int64 `orm:"column(project_id)" json:"project_id"` + BlobID int64 `orm:"column(blob_id)" json:"blob_id"` + CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"` +} + +// TableName ... +func (*ProjectBlob) TableName() string { + return "project_blob" +} diff --git a/src/common/models/quota.go b/src/common/models/quota.go new file mode 100644 index 000000000..e7d8ade6e --- /dev/null +++ b/src/common/models/quota.go @@ -0,0 +1,85 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package models + +import ( + "encoding/json" + "time" + + "github.com/goharbor/harbor/src/pkg/types" +) + +// QuotaHard a map for the quota hard +type QuotaHard map[string]int64 + +func (h QuotaHard) String() string { + bytes, _ := json.Marshal(h) + return string(bytes) +} + +// Copy returns copied quota hard +func (h QuotaHard) Copy() QuotaHard { + hard := QuotaHard{} + for key, value := range h { + hard[key] = value + } + + return hard +} + +// Quota model for quota +type Quota struct { + ID int64 `orm:"pk;auto;column(id)" json:"id"` + Reference string `orm:"column(reference)" json:"reference"` // The reference type for quota, eg: project, user + ReferenceID string `orm:"column(reference_id)" json:"reference_id"` + Hard string `orm:"column(hard);type(jsonb)" json:"-"` + CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"` + UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"` +} + +// TableName returns table name for orm +func (q *Quota) TableName() string { + return "quota" +} + +// GetHard returns quota hard +func (q *Quota) GetHard() (QuotaHard, error) { + var hard QuotaHard + if err := json.Unmarshal([]byte(q.Hard), &hard); err != nil { + return nil, err + } + + return hard, nil +} + +// SetHard set new quota hard +func (q *Quota) SetHard(hard QuotaHard) { + q.Hard = hard.String() +} + +// QuotaQuery query parameters for quota +type QuotaQuery struct { + ID int64 + Reference string + ReferenceID string + ReferenceIDs []string + Pagination + Sorting +} + +// QuotaUpdateRequest the request for quota update +type QuotaUpdateRequest struct { + Hard types.ResourceList `json:"hard"` +} diff --git a/src/common/models/quota_usage.go b/src/common/models/quota_usage.go new file mode 100644 index 000000000..c5c24eeb3 --- /dev/null +++ b/src/common/models/quota_usage.go @@ -0,0 +1,77 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package models + +import ( + "encoding/json" + "time" +) + +// QuotaUsed a map for the quota used +type QuotaUsed map[string]int64 + +func (u QuotaUsed) String() string { + bytes, _ := json.Marshal(u) + return string(bytes) +} + +// Copy returns copied quota used +func (u QuotaUsed) Copy() QuotaUsed { + used := QuotaUsed{} + for key, value := range u { + used[key] = value + } + + return used +} + +// QuotaUsage model for quota usage +type QuotaUsage struct { + ID int64 `orm:"pk;auto;column(id)" json:"id"` + Reference string `orm:"column(reference)" json:"reference"` // The reference type for quota usage, eg: project, user + ReferenceID string `orm:"column(reference_id)" json:"reference_id"` + Used string `orm:"column(used);type(jsonb)" json:"-"` + CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"` + UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"` +} + +// TableName returns table name for orm +func (qu *QuotaUsage) TableName() string { + return "quota_usage" +} + +// GetUsed returns quota used +func (qu *QuotaUsage) GetUsed() (QuotaUsed, error) { + var used QuotaUsed + if err := json.Unmarshal([]byte(qu.Used), &used); err != nil { + return nil, err + } + + return used, nil +} + +// SetUsed set quota used +func (qu *QuotaUsage) SetUsed(used QuotaUsed) { + qu.Used = used.String() +} + +// QuotaUsageQuery query parameters for quota +type QuotaUsageQuery struct { + Reference string + ReferenceID string + ReferenceIDs []string + Pagination + Sorting +} diff --git a/src/common/models/repo.go b/src/common/models/repo.go index 92a51d375..9993fbcc6 100644 --- a/src/common/models/repo.go +++ b/src/common/models/repo.go @@ -16,6 +16,9 @@ package models import ( "time" + + "github.com/goharbor/harbor/src/common/utils/notary/model" + "github.com/theupdateframework/notary/tuf/data" ) // RepoTable is the table name for repository @@ -47,3 +50,38 @@ type RepositoryQuery struct { Pagination Sorting } + +// TagResp holds the information of one image tag +type TagResp struct { + TagDetail + Signature *model.Target `json:"signature"` + ScanOverview *ImgScanOverview `json:"scan_overview,omitempty"` + Labels []*Label `json:"labels"` + PushTime time.Time `json:"push_time"` + PullTime time.Time `json:"pull_time"` +} + +// TagDetail ... +type TagDetail struct { + Digest string `json:"digest"` + Name string `json:"name"` + Size int64 `json:"size"` + Architecture string `json:"architecture"` + OS string `json:"os"` + OSVersion string `json:"os.version"` + DockerVersion string `json:"docker_version"` + Author string `json:"author"` + Created time.Time `json:"created"` + Config *TagCfg `json:"config"` +} + +// TagCfg ... +type TagCfg struct { + Labels map[string]string `json:"labels"` +} + +// Signature ... +type Signature struct { + Tag string `json:"tag"` + Hashes data.Hashes `json:"hashes"` +} diff --git a/src/common/models/robot.go b/src/common/models/robot.go index b4bb119b2..2e64ca8d2 100644 --- a/src/common/models/robot.go +++ b/src/common/models/robot.go @@ -65,7 +65,6 @@ func (rq *RobotReq) Valid(v *validation.Validation) { // RobotRep ... type RobotRep struct { - ID int64 `json:"id"` Name string `json:"name"` Token string `json:"token"` } diff --git a/src/common/models/scan_job.go b/src/common/models/scan_job.go index 8a26fd741..75546223d 100644 --- a/src/common/models/scan_job.go +++ b/src/common/models/scan_job.go @@ -34,31 +34,6 @@ type ScanJob struct { UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"` } -// Severity represents the severity of a image/component in terms of vulnerability. -type Severity int64 - -// Sevxxx is the list of severity of image after scanning. -const ( - _ Severity = iota - SevNone - SevUnknown - SevLow - SevMedium - SevHigh -) - -// String is the output function for sererity variable -func (sev Severity) String() string { - name := []string{"negligible", "unknown", "low", "medium", "high"} - i := int64(sev) - switch { - case i >= 1 && i <= int64(SevHigh): - return name[i-1] - default: - return "unknown" - } -} - // TableName is required by by beego orm to map ScanJob to table img_scan_job func (s *ScanJob) TableName() string { return ScanJobTable @@ -101,17 +76,6 @@ type ImageScanReq struct { Tag string `json:"tag"` } -// VulnerabilityItem is an item in the vulnerability result returned by vulnerability details API. -type VulnerabilityItem struct { - ID string `json:"id"` - Severity Severity `json:"severity"` - Pkg string `json:"package"` - Version string `json:"version"` - Description string `json:"description"` - Link string `json:"link"` - Fixed string `json:"fixedVersion,omitempty"` -} - // ScanAllPolicy is represent the json request and object for scan all policy, the parm is het type ScanAllPolicy struct { Type string `json:"type"` diff --git a/src/common/models/sev.go b/src/common/models/sev.go new file mode 100644 index 000000000..3ccf89753 --- /dev/null +++ b/src/common/models/sev.go @@ -0,0 +1,26 @@ +package models + +// Severity represents the severity of a image/component in terms of vulnerability. +type Severity int64 + +// Sevxxx is the list of severity of image after scanning. +const ( + _ Severity = iota + SevNone + SevUnknown + SevLow + SevMedium + SevHigh +) + +// String is the output function for severity variable +func (sev Severity) String() string { + name := []string{"negligible", "unknown", "low", "medium", "high"} + i := int64(sev) + switch { + case i >= 1 && i <= int64(SevHigh): + return name[i-1] + default: + return "unknown" + } +} diff --git a/src/common/models/token.go b/src/common/models/token.go index f5bbd797b..ac50fba42 100644 --- a/src/common/models/token.go +++ b/src/common/models/token.go @@ -16,9 +16,19 @@ package models // Token represents the json returned by registry token service type Token struct { - Token string `json:"token"` - ExpiresIn int `json:"expires_in"` - IssuedAt string `json:"issued_at"` + Token string `json:"token"` + AccessToken string `json:"access_token"` // the token returned by azure container registry is called "access_token" + ExpiresIn int `json:"expires_in"` + IssuedAt string `json:"issued_at"` +} + +// GetToken returns the content of the token +func (t *Token) GetToken() string { + token := t.Token + if len(token) == 0 { + token = t.AccessToken + } + return token } // ResourceActions ... diff --git a/src/common/models/user.go b/src/common/models/user.go index 9b224bd80..77fac1a83 100644 --- a/src/common/models/user.go +++ b/src/common/models/user.go @@ -35,13 +35,13 @@ type User struct { // to it. Role int `orm:"-" json:"role_id"` // RoleList []Role `json:"role_list"` - HasAdminRole bool `orm:"column(sysadmin_flag)" json:"has_admin_role"` - ResetUUID string `orm:"column(reset_uuid)" json:"reset_uuid"` - Salt string `orm:"column(salt)" json:"-"` - CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"` - UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"` - GroupList []*UserGroup `orm:"-" json:"-"` - OIDCUserMeta *OIDCUser `orm:"-" json:"oidc_user_meta,omitempty"` + HasAdminRole bool `orm:"column(sysadmin_flag)" json:"has_admin_role"` + ResetUUID string `orm:"column(reset_uuid)" json:"reset_uuid"` + Salt string `orm:"column(salt)" json:"-"` + CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"` + UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"` + GroupIDs []int `orm:"-" json:"-"` + OIDCUserMeta *OIDCUser `orm:"-" json:"oidc_user_meta,omitempty"` } // UserQuery ... diff --git a/src/common/quota/driver/driver.go b/src/common/quota/driver/driver.go new file mode 100644 index 000000000..fbd339e37 --- /dev/null +++ b/src/common/quota/driver/driver.go @@ -0,0 +1,59 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "sync" + + "github.com/goharbor/harbor/src/pkg/types" +) + +var ( + driversMu sync.RWMutex + drivers = map[string]Driver{} +) + +// RefObject type for quota ref object +type RefObject map[string]interface{} + +// Driver the driver for quota +type Driver interface { + // HardLimits returns default resource list + HardLimits() types.ResourceList + // Load returns quota ref object by key + Load(key string) (RefObject, error) + // Validate validate the hard limits + Validate(hardLimits types.ResourceList) error +} + +// Register register quota driver +func Register(name string, driver Driver) { + driversMu.Lock() + defer driversMu.Unlock() + if driver == nil { + panic("quota: Register driver is nil") + } + + drivers[name] = driver +} + +// Get returns quota driver by name +func Get(name string) (Driver, bool) { + driversMu.Lock() + defer driversMu.Unlock() + + driver, ok := drivers[name] + return driver, ok +} diff --git a/src/common/quota/driver/mocks/driver.go b/src/common/quota/driver/mocks/driver.go new file mode 100644 index 000000000..8f8c1ac82 --- /dev/null +++ b/src/common/quota/driver/mocks/driver.go @@ -0,0 +1,65 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package mocks + +import driver "github.com/goharbor/harbor/src/common/quota/driver" +import mock "github.com/stretchr/testify/mock" +import types "github.com/goharbor/harbor/src/pkg/types" + +// Driver is an autogenerated mock type for the Driver type +type Driver struct { + mock.Mock +} + +// HardLimits provides a mock function with given fields: +func (_m *Driver) HardLimits() types.ResourceList { + ret := _m.Called() + + var r0 types.ResourceList + if rf, ok := ret.Get(0).(func() types.ResourceList); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.ResourceList) + } + } + + return r0 +} + +// Load provides a mock function with given fields: key +func (_m *Driver) Load(key string) (driver.RefObject, error) { + ret := _m.Called(key) + + var r0 driver.RefObject + if rf, ok := ret.Get(0).(func(string) driver.RefObject); ok { + r0 = rf(key) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(driver.RefObject) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(key) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Validate provides a mock function with given fields: resources +func (_m *Driver) Validate(resources types.ResourceList) error { + ret := _m.Called(resources) + + var r0 error + if rf, ok := ret.Get(0).(func(types.ResourceList) error); ok { + r0 = rf(resources) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/src/common/quota/driver/project/driver.go b/src/common/quota/driver/project/driver.go new file mode 100644 index 000000000..8fafded6c --- /dev/null +++ b/src/common/quota/driver/project/driver.go @@ -0,0 +1,143 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package project + +import ( + "context" + "fmt" + "strconv" + + "github.com/goharbor/harbor/src/common" + "github.com/goharbor/harbor/src/common/config" + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" + dr "github.com/goharbor/harbor/src/common/quota/driver" + "github.com/goharbor/harbor/src/pkg/types" + "github.com/graph-gophers/dataloader" +) + +func init() { + dr.Register("project", newDriver()) +} + +func getProjectsBatchFn(ctx context.Context, keys dataloader.Keys) []*dataloader.Result { + handleError := func(err error) []*dataloader.Result { + var results []*dataloader.Result + var result dataloader.Result + result.Error = err + results = append(results, &result) + return results + } + + var projectIDs []int64 + for _, key := range keys { + id, err := strconv.ParseInt(key.String(), 10, 64) + if err != nil { + return handleError(err) + } + projectIDs = append(projectIDs, id) + } + + projects, err := dao.GetProjects(&models.ProjectQueryParam{}) + if err != nil { + return handleError(err) + } + + var projectsMap = make(map[int64]*models.Project, len(projectIDs)) + for _, project := range projects { + projectsMap[project.ProjectID] = project + } + + var results []*dataloader.Result + for _, projectID := range projectIDs { + project, ok := projectsMap[projectID] + if !ok { + return handleError(fmt.Errorf("project not found, "+"project_id: %d", projectID)) + } + + result := dataloader.Result{ + Data: project, + Error: nil, + } + results = append(results, &result) + } + + return results +} + +type driver struct { + cfg *config.CfgManager + loader *dataloader.Loader +} + +func (d *driver) HardLimits() types.ResourceList { + return types.ResourceList{ + types.ResourceCount: d.cfg.Get(common.CountPerProject).GetInt64(), + types.ResourceStorage: d.cfg.Get(common.StoragePerProject).GetInt64(), + } +} + +func (d *driver) Load(key string) (dr.RefObject, error) { + thunk := d.loader.Load(context.TODO(), dataloader.StringKey(key)) + + result, err := thunk() + if err != nil { + return nil, err + } + + project, ok := result.(*models.Project) + if !ok { + return nil, fmt.Errorf("bad result for project: %s", key) + } + + return dr.RefObject{ + "id": project.ProjectID, + "name": project.Name, + "owner_name": project.OwnerName, + }, nil +} + +func (d *driver) Validate(hardLimits types.ResourceList) error { + resources := map[types.ResourceName]bool{ + types.ResourceCount: true, + types.ResourceStorage: true, + } + + for resource, value := range hardLimits { + if !resources[resource] { + return fmt.Errorf("resource %s not support", resource) + } + + if value <= 0 && value != types.UNLIMITED { + return fmt.Errorf("invalid value for resource %s", resource) + } + } + + for resource := range resources { + if _, found := hardLimits[resource]; !found { + return fmt.Errorf("resource %s not found", resource) + } + } + + return nil +} + +func newDriver() dr.Driver { + cfg := config.NewDBCfgManager() + + loader := dataloader.NewBatchedLoader(getProjectsBatchFn) + + return &driver{cfg: cfg, loader: loader} +} diff --git a/src/common/quota/driver/project/driver_test.go b/src/common/quota/driver/project/driver_test.go new file mode 100644 index 000000000..992af0ae9 --- /dev/null +++ b/src/common/quota/driver/project/driver_test.go @@ -0,0 +1,77 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package project + +import ( + "os" + "testing" + + "github.com/goharbor/harbor/src/common/dao" + dr "github.com/goharbor/harbor/src/common/quota/driver" + "github.com/goharbor/harbor/src/pkg/types" + "github.com/stretchr/testify/suite" +) + +type DriverSuite struct { + suite.Suite +} + +func (suite *DriverSuite) TestHardLimits() { + driver := newDriver() + + suite.Equal(types.ResourceList{types.ResourceCount: -1, types.ResourceStorage: -1}, driver.HardLimits()) +} + +func (suite *DriverSuite) TestLoad() { + driver := newDriver() + + if ref, err := driver.Load("1"); suite.Nil(err) { + obj := dr.RefObject{ + "id": int64(1), + "name": "library", + "owner_name": "", + } + + suite.Equal(obj, ref) + } + + if ref, err := driver.Load("100000"); suite.Error(err) { + suite.Empty(ref) + } + + if ref, err := driver.Load("library"); suite.Error(err) { + suite.Empty(ref) + } +} + +func (suite *DriverSuite) TestValidate() { + driver := newDriver() + + suite.Nil(driver.Validate(types.ResourceList{types.ResourceCount: 1, types.ResourceStorage: 1024})) + suite.Error(driver.Validate(types.ResourceList{})) + suite.Error(driver.Validate(types.ResourceList{types.ResourceCount: 1})) + suite.Error(driver.Validate(types.ResourceList{types.ResourceCount: 1, types.ResourceStorage: 0})) + suite.Error(driver.Validate(types.ResourceList{types.ResourceCount: 1, types.ResourceName("foo"): 1})) +} + +func TestMain(m *testing.M) { + dao.PrepareTestForPostgresSQL() + + os.Exit(m.Run()) +} + +func TestRunDriverSuite(t *testing.T) { + suite.Run(t, new(DriverSuite)) +} diff --git a/src/common/quota/errors.go b/src/common/quota/errors.go new file mode 100644 index 000000000..c828734dd --- /dev/null +++ b/src/common/quota/errors.go @@ -0,0 +1,111 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package quota + +import ( + "fmt" + "strings" + + "github.com/goharbor/harbor/src/pkg/types" +) + +// Errors contains all happened errors +type Errors []error + +// GetErrors gets all errors that have occurred and returns a slice of errors (Error type) +func (errs Errors) GetErrors() []error { + return errs +} + +// Add adds an error to a given slice of errors +func (errs Errors) Add(newErrors ...error) Errors { + for _, err := range newErrors { + if err == nil { + continue + } + + if errors, ok := err.(Errors); ok { + errs = errs.Add(errors...) + } else { + ok = true + for _, e := range errs { + if err == e { + ok = false + } + } + if ok { + errs = append(errs, err) + } + } + } + + return errs +} + +// Error takes a slice of all errors that have occurred and returns it as a formatted string +func (errs Errors) Error() string { + var errors = []string{} + for _, e := range errs { + errors = append(errors, e.Error()) + } + return strings.Join(errors, "; ") +} + +// ResourceOverflow ... +type ResourceOverflow struct { + Resource types.ResourceName + HardLimit int64 + CurrentUsed int64 + NewUsed int64 +} + +func (e *ResourceOverflow) Error() string { + resource := e.Resource + var ( + op string + delta int64 + ) + + if e.NewUsed > e.CurrentUsed { + op = "add" + delta = e.NewUsed - e.CurrentUsed + } else { + op = "subtract" + delta = e.CurrentUsed - e.NewUsed + } + + return fmt.Sprintf("%s %s of %s resource overflow the hard limit, current usage is %s and hard limit is %s", + op, resource.FormatValue(delta), resource, + resource.FormatValue(e.CurrentUsed), resource.FormatValue(e.HardLimit)) +} + +// NewResourceOverflowError ... +func NewResourceOverflowError(resource types.ResourceName, hardLimit, currentUsed, newUsed int64) error { + return &ResourceOverflow{Resource: resource, HardLimit: hardLimit, CurrentUsed: currentUsed, NewUsed: newUsed} +} + +// ResourceNotFound ... +type ResourceNotFound struct { + Resource types.ResourceName +} + +func (e *ResourceNotFound) Error() string { + return fmt.Sprintf("resource %s not found", e.Resource) +} + +// NewResourceNotFoundError ... +func NewResourceNotFoundError(resource types.ResourceName) error { + return &ResourceNotFound{Resource: resource} +} diff --git a/src/common/quota/manager.go b/src/common/quota/manager.go new file mode 100644 index 000000000..a70199ed3 --- /dev/null +++ b/src/common/quota/manager.go @@ -0,0 +1,276 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package quota + +import ( + "fmt" + "time" + + "github.com/astaxie/beego/orm" + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/quota/driver" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/pkg/types" +) + +// Manager manager for quota +type Manager struct { + driver driver.Driver + reference string + referenceID string +} + +func (m *Manager) addQuota(o orm.Ormer, hardLimits types.ResourceList, now time.Time) (int64, error) { + quota := &models.Quota{ + Reference: m.reference, + ReferenceID: m.referenceID, + Hard: hardLimits.String(), + CreationTime: now, + UpdateTime: now, + } + + return o.Insert(quota) +} + +func (m *Manager) addUsage(o orm.Ormer, used types.ResourceList, now time.Time, ids ...int64) (int64, error) { + usage := &models.QuotaUsage{ + Reference: m.reference, + ReferenceID: m.referenceID, + Used: used.String(), + CreationTime: now, + UpdateTime: now, + } + + if len(ids) > 0 { + usage.ID = ids[0] + } + + return o.Insert(usage) +} + +func (m *Manager) newQuota(o orm.Ormer, hardLimits types.ResourceList, usages ...types.ResourceList) (int64, error) { + now := time.Now() + + id, err := m.addQuota(o, hardLimits, now) + if err != nil { + return 0, err + } + + var used types.ResourceList + if len(usages) > 0 { + used = usages[0] + } else { + used = types.Zero(hardLimits) + } + + if _, err := m.addUsage(o, used, now, id); err != nil { + return 0, err + } + + return id, nil +} + +func (m *Manager) getQuotaForUpdate(o orm.Ormer) (*models.Quota, error) { + quota := &models.Quota{Reference: m.reference, ReferenceID: m.referenceID} + if err := o.ReadForUpdate(quota, "reference", "reference_id"); err != nil { + if err == orm.ErrNoRows { + if _, err := m.newQuota(o, m.driver.HardLimits()); err != nil { + return nil, err + } + + return m.getQuotaForUpdate(o) + } + + return nil, err + } + + return quota, nil +} + +func (m *Manager) getUsageForUpdate(o orm.Ormer) (*models.QuotaUsage, error) { + usage := &models.QuotaUsage{Reference: m.reference, ReferenceID: m.referenceID} + if err := o.ReadForUpdate(usage, "reference", "reference_id"); err != nil { + return nil, err + } + + return usage, nil +} + +func (m *Manager) updateUsage(o orm.Ormer, resources types.ResourceList, + calculate func(types.ResourceList, types.ResourceList) types.ResourceList) error { + + quota, err := m.getQuotaForUpdate(o) + if err != nil { + return err + } + hardLimits, err := types.NewResourceList(quota.Hard) + if err != nil { + return err + } + + usage, err := m.getUsageForUpdate(o) + if err != nil { + return err + } + used, err := types.NewResourceList(usage.Used) + if err != nil { + return err + } + + newUsed := calculate(used, resources) + + // ensure that new used is never negative + if negativeUsed := types.IsNegative(newUsed); len(negativeUsed) > 0 { + return fmt.Errorf("quota usage is negative for resource(s): %s", prettyPrintResourceNames(negativeUsed)) + } + + if err := isSafe(hardLimits, used, newUsed); err != nil { + return err + } + + usage.Used = newUsed.String() + usage.UpdateTime = time.Now() + + _, err = o.Update(usage) + return err +} + +// NewQuota create new quota for (reference, reference id) +func (m *Manager) NewQuota(hardLimit types.ResourceList, usages ...types.ResourceList) (int64, error) { + var id int64 + err := dao.WithTransaction(func(o orm.Ormer) (err error) { + id, err = m.newQuota(o, hardLimit, usages...) + return err + }) + + if err != nil { + return 0, err + } + + return id, nil +} + +// DeleteQuota delete the quota +func (m *Manager) DeleteQuota() error { + return dao.WithTransaction(func(o orm.Ormer) error { + quota := &models.Quota{Reference: m.reference, ReferenceID: m.referenceID} + if _, err := o.Delete(quota, "reference", "reference_id"); err != nil { + return err + } + + usage := &models.QuotaUsage{Reference: m.reference, ReferenceID: m.referenceID} + if _, err := o.Delete(usage, "reference", "reference_id"); err != nil { + return err + } + + return nil + }) +} + +// UpdateQuota update the quota resource spec +func (m *Manager) UpdateQuota(hardLimits types.ResourceList) error { + o := dao.GetOrmer() + if err := m.driver.Validate(hardLimits); err != nil { + return err + } + + sql := `UPDATE quota SET hard = ? WHERE reference = ? AND reference_id = ?` + _, err := o.Raw(sql, hardLimits.String(), m.reference, m.referenceID).Exec() + + return err +} + +// EnsureQuota ensures the reference has quota and usage, +// if non-existent, will create new quota and usage. +// if existent, update the quota and usage. +func (m *Manager) EnsureQuota(usages types.ResourceList) error { + query := &models.QuotaQuery{ + Reference: m.reference, + ReferenceID: m.referenceID, + } + quotas, err := dao.ListQuotas(query) + if err != nil { + return err + } + + // non-existent: create quota and usage + defaultHardLimit := m.driver.HardLimits() + if len(quotas) == 0 { + _, err := m.NewQuota(defaultHardLimit, usages) + if err != nil { + return err + } + return nil + } + + // existent + used := usages + quotaUsed, err := types.NewResourceList(quotas[0].Used) + if err != nil { + return err + } + if types.Equals(quotaUsed, used) { + return nil + } + dao.WithTransaction(func(o orm.Ormer) error { + usage, err := m.getUsageForUpdate(o) + if err != nil { + return err + } + usage.Used = used.String() + usage.UpdateTime = time.Now() + _, err = o.Update(usage) + if err != nil { + return err + } + return nil + }) + + return nil +} + +// AddResources add resources to usage +func (m *Manager) AddResources(resources types.ResourceList) error { + return dao.WithTransaction(func(o orm.Ormer) error { + return m.updateUsage(o, resources, types.Add) + }) +} + +// SubtractResources subtract resources from usage +func (m *Manager) SubtractResources(resources types.ResourceList) error { + return dao.WithTransaction(func(o orm.Ormer) error { + return m.updateUsage(o, resources, types.Subtract) + }) +} + +// NewManager returns quota manager +func NewManager(reference string, referenceID string) (*Manager, error) { + d, ok := driver.Get(reference) + if !ok { + return nil, fmt.Errorf("quota not support for %s", reference) + } + + if _, err := d.Load(referenceID); err != nil { + log.Warning(fmt.Sprintf("Load quota reference object (%s, %s) failed: %v", reference, referenceID, err)) + return nil, err + } + + return &Manager{ + driver: d, + reference: reference, + referenceID: referenceID, + }, nil +} diff --git a/src/common/quota/manager_test.go b/src/common/quota/manager_test.go new file mode 100644 index 000000000..344d06e47 --- /dev/null +++ b/src/common/quota/manager_test.go @@ -0,0 +1,342 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package quota + +import ( + "fmt" + "os" + "sync" + "testing" + + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/quota/driver" + "github.com/goharbor/harbor/src/common/quota/driver/mocks" + "github.com/goharbor/harbor/src/pkg/types" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" +) + +var ( + hardLimits = types.ResourceList{types.ResourceCount: -1, types.ResourceStorage: 1000} + reference = "mock" +) + +func init() { + mockDriver := &mocks.Driver{} + + mockHardLimitsFn := func() types.ResourceList { + return types.ResourceList{ + types.ResourceCount: -1, + types.ResourceStorage: -1, + } + } + + mockLoadFn := func(key string) driver.RefObject { + return driver.RefObject{"id": key} + } + + mockDriver.On("HardLimits").Return(mockHardLimitsFn) + mockDriver.On("Load", mock.AnythingOfType("string")).Return(mockLoadFn, nil) + mockDriver.On("Validate", mock.AnythingOfType("types.ResourceList")).Return(nil) + + driver.Register(reference, mockDriver) +} + +func mustResourceList(s string) types.ResourceList { + resources, _ := types.NewResourceList(s) + return resources +} + +type ManagerSuite struct { + suite.Suite +} + +func (suite *ManagerSuite) SetupTest() { + _, ok := driver.Get(reference) + if !ok { + suite.Fail("driver not found for %s", reference) + } +} + +func (suite *ManagerSuite) quotaManager(referenceIDs ...string) *Manager { + referenceID := "1" + if len(referenceIDs) > 0 { + referenceID = referenceIDs[0] + } + + mgr, _ := NewManager(reference, referenceID) + return mgr +} + +func (suite *ManagerSuite) TearDownTest() { + dao.ClearTable("quota") + dao.ClearTable("quota_usage") +} + +func (suite *ManagerSuite) TestNewQuota() { + mgr := suite.quotaManager() + + if id, err := mgr.NewQuota(hardLimits); suite.Nil(err) { + quota, _ := dao.GetQuota(id) + suite.Equal(hardLimits, mustResourceList(quota.Hard)) + } + + mgr = suite.quotaManager("2") + used := types.ResourceList{types.ResourceStorage: 100} + if id, err := mgr.NewQuota(hardLimits, used); suite.Nil(err) { + quota, _ := dao.GetQuota(id) + suite.Equal(hardLimits, mustResourceList(quota.Hard)) + + usage, _ := dao.GetQuotaUsage(id) + suite.Equal(used, mustResourceList(usage.Used)) + } +} + +func (suite *ManagerSuite) TestDeleteQuota() { + mgr := suite.quotaManager() + + id, err := mgr.NewQuota(hardLimits) + if suite.Nil(err) { + quota, _ := dao.GetQuota(id) + suite.Equal(hardLimits, mustResourceList(quota.Hard)) + } + + if err := mgr.DeleteQuota(); suite.Nil(err) { + quota, _ := dao.GetQuota(id) + suite.Nil(quota) + } +} + +func (suite *ManagerSuite) TestUpdateQuota() { + mgr := suite.quotaManager() + + id, _ := mgr.NewQuota(hardLimits) + largeHardLimits := types.ResourceList{types.ResourceCount: -1, types.ResourceStorage: 1000000} + + if err := mgr.UpdateQuota(largeHardLimits); suite.Nil(err) { + quota, _ := dao.GetQuota(id) + suite.Equal(largeHardLimits, mustResourceList(quota.Hard)) + } +} + +func (suite *ManagerSuite) TestEnsureQuota() { + // non-existent + nonExistRefID := "3" + mgr := suite.quotaManager(nonExistRefID) + infinite := types.ResourceList{types.ResourceCount: -1, types.ResourceStorage: -1} + usage := types.ResourceList{types.ResourceCount: 10, types.ResourceStorage: 10} + err := mgr.EnsureQuota(usage) + suite.Nil(err) + query := &models.QuotaQuery{ + Reference: reference, + ReferenceID: nonExistRefID, + } + quotas, err := dao.ListQuotas(query) + suite.Nil(err) + suite.Equal(usage, mustResourceList(quotas[0].Used)) + suite.Equal(infinite, mustResourceList(quotas[0].Hard)) + + // existent + existRefID := "4" + mgr = suite.quotaManager(existRefID) + used := types.ResourceList{types.ResourceCount: 11, types.ResourceStorage: 11} + if id, err := mgr.NewQuota(hardLimits, used); suite.Nil(err) { + quota, _ := dao.GetQuota(id) + suite.Equal(hardLimits, mustResourceList(quota.Hard)) + + usage, _ := dao.GetQuotaUsage(id) + suite.Equal(used, mustResourceList(usage.Used)) + } + + usage2 := types.ResourceList{types.ResourceCount: 12, types.ResourceStorage: 12} + err = mgr.EnsureQuota(usage2) + suite.Nil(err) + query2 := &models.QuotaQuery{ + Reference: reference, + ReferenceID: existRefID, + } + quotas2, err := dao.ListQuotas(query2) + suite.Equal(usage2, mustResourceList(quotas2[0].Used)) + suite.Equal(hardLimits, mustResourceList(quotas2[0].Hard)) + +} + +func (suite *ManagerSuite) TestQuotaAutoCreation() { + for i := 0; i < 10; i++ { + mgr := suite.quotaManager(fmt.Sprintf("%d", i)) + resource := types.ResourceList{types.ResourceCount: 0, types.ResourceStorage: 100} + + suite.Nil(mgr.AddResources(resource)) + } +} + +func (suite *ManagerSuite) TestAddResources() { + mgr := suite.quotaManager() + id, _ := mgr.NewQuota(hardLimits) + + resource := types.ResourceList{types.ResourceCount: 0, types.ResourceStorage: 100} + + if suite.Nil(mgr.AddResources(resource)) { + usage, _ := dao.GetQuotaUsage(id) + suite.Equal(resource, mustResourceList(usage.Used)) + } + + if suite.Nil(mgr.AddResources(resource)) { + usage, _ := dao.GetQuotaUsage(id) + suite.Equal(types.ResourceList{types.ResourceCount: 0, types.ResourceStorage: 200}, mustResourceList(usage.Used)) + } + + if err := mgr.AddResources(types.ResourceList{types.ResourceStorage: 10000}); suite.Error(err) { + if errs, ok := err.(Errors); suite.True(ok) { + for _, err := range errs { + suite.IsType(&ResourceOverflow{}, err) + } + } + } +} + +func (suite *ManagerSuite) TestSubtractResources() { + mgr := suite.quotaManager() + id, _ := mgr.NewQuota(hardLimits) + + resource := types.ResourceList{types.ResourceCount: 0, types.ResourceStorage: 100} + + if suite.Nil(mgr.AddResources(resource)) { + usage, _ := dao.GetQuotaUsage(id) + suite.Equal(resource, mustResourceList(usage.Used)) + } + + if suite.Nil(mgr.SubtractResources(resource)) { + usage, _ := dao.GetQuotaUsage(id) + suite.Equal(types.ResourceList{types.ResourceCount: 0, types.ResourceStorage: 0}, mustResourceList(usage.Used)) + } +} + +func (suite *ManagerSuite) TestRaceAddResources() { + mgr := suite.quotaManager() + mgr.NewQuota(hardLimits) + + resources := types.ResourceList{ + types.ResourceStorage: 100, + } + + var wg sync.WaitGroup + + results := make([]bool, 100) + for i := 0; i < 100; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + + results[i] = mgr.AddResources(resources) == nil + }(i) + } + wg.Wait() + + var success int + for _, result := range results { + if result { + success++ + } + } + + suite.Equal(10, success) +} + +func (suite *ManagerSuite) TestRaceSubtractResources() { + mgr := suite.quotaManager() + mgr.NewQuota(hardLimits, types.ResourceList{types.ResourceStorage: 1000}) + + resources := types.ResourceList{ + types.ResourceStorage: 100, + } + + var wg sync.WaitGroup + + results := make([]bool, 100) + for i := 0; i < 100; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + + results[i] = mgr.SubtractResources(resources) == nil + }(i) + } + wg.Wait() + + var success int + for _, result := range results { + if result { + success++ + } + } + + suite.Equal(10, success) +} + +func TestMain(m *testing.M) { + dao.PrepareTestForPostgresSQL() + + if result := m.Run(); result != 0 { + os.Exit(result) + } +} + +func TestRunManagerSuite(t *testing.T) { + suite.Run(t, new(ManagerSuite)) +} + +func BenchmarkAddResources(b *testing.B) { + defer func() { + dao.ClearTable("quota") + dao.ClearTable("quota_usage") + }() + + mgr, _ := NewManager(reference, "1") + mgr.NewQuota(types.ResourceList{types.ResourceStorage: int64(b.N)}) + + resource := types.ResourceList{ + types.ResourceStorage: 1, + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + mgr.AddResources(resource) + } + b.StopTimer() +} + +func BenchmarkAddResourcesParallel(b *testing.B) { + defer func() { + dao.ClearTable("quota") + dao.ClearTable("quota_usage") + }() + + mgr, _ := NewManager(reference, "1") + mgr.NewQuota(types.ResourceList{}) + + resource := types.ResourceList{ + types.ResourceStorage: 1, + } + + b.ResetTimer() + b.RunParallel(func(b *testing.PB) { + for b.Next() { + mgr.AddResources(resource) + } + }) + b.StopTimer() +} diff --git a/src/replication/adapter/chart_registry.go b/src/common/quota/quota.go similarity index 57% rename from src/replication/adapter/chart_registry.go rename to src/common/quota/quota.go index fef80e18e..4446d61eb 100644 --- a/src/replication/adapter/chart_registry.go +++ b/src/common/quota/quota.go @@ -12,19 +12,24 @@ // See the License for the specific language governing permissions and // limitations under the License. -package adapter +package quota import ( - "io" + "fmt" - "github.com/goharbor/harbor/src/replication/model" + "github.com/goharbor/harbor/src/common/quota/driver" + "github.com/goharbor/harbor/src/pkg/types" + + // project driver for quota + _ "github.com/goharbor/harbor/src/common/quota/driver/project" ) -// ChartRegistry defines the capabilities that a chart registry should have -type ChartRegistry interface { - FetchCharts(filters []*model.Filter) ([]*model.Resource, error) - ChartExist(name, version string) (bool, error) - DownloadChart(name, version string) (io.ReadCloser, error) - UploadChart(name, version string, chart io.Reader) error - DeleteChart(name, version string) error +// Validate validate hard limits +func Validate(reference string, hardLimits types.ResourceList) error { + d, ok := driver.Get(reference) + if !ok { + return fmt.Errorf("quota not support for %s", reference) + } + + return d.Validate(hardLimits) } diff --git a/src/common/quota/quota_test.go b/src/common/quota/quota_test.go new file mode 100644 index 000000000..cc089e86c --- /dev/null +++ b/src/common/quota/quota_test.go @@ -0,0 +1,45 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package quota + +import ( + "testing" + + _ "github.com/goharbor/harbor/src/common/quota/driver/project" + "github.com/goharbor/harbor/src/pkg/types" +) + +func TestValidate(t *testing.T) { + type args struct { + reference string + hardLimits types.ResourceList + } + tests := []struct { + name string + args args + wantErr bool + }{ + {"valid", args{"project", types.ResourceList{types.ResourceCount: 1, types.ResourceStorage: 1}}, false}, + {"invalid", args{"project", types.ResourceList{types.ResourceCount: 1, types.ResourceStorage: 0}}, true}, + {"not support", args{"not support", types.ResourceList{types.ResourceCount: 1}}, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := Validate(tt.args.reference, tt.args.hardLimits); (err != nil) != tt.wantErr { + t.Errorf("Validate() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/src/common/quota/types.go b/src/common/quota/types.go new file mode 100644 index 000000000..35a6f60cc --- /dev/null +++ b/src/common/quota/types.go @@ -0,0 +1,32 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package quota + +import ( + "github.com/goharbor/harbor/src/pkg/types" +) + +var ( + // ResourceCount alias types.ResourceCount + ResourceCount = types.ResourceCount + // ResourceStorage alias types.ResourceStorage + ResourceStorage = types.ResourceStorage +) + +// ResourceName alias types.ResourceName +type ResourceName = types.ResourceName + +// ResourceList alias types.ResourceList +type ResourceList = types.ResourceList diff --git a/src/common/quota/util.go b/src/common/quota/util.go new file mode 100644 index 000000000..5f8687cc7 --- /dev/null +++ b/src/common/quota/util.go @@ -0,0 +1,57 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package quota + +import ( + "sort" + "strings" + + "github.com/goharbor/harbor/src/pkg/types" +) + +func isSafe(hardLimits types.ResourceList, currentUsed types.ResourceList, newUsed types.ResourceList) error { + var errs Errors + + for resource, value := range newUsed { + hardLimit, found := hardLimits[resource] + if !found { + errs = errs.Add(NewResourceNotFoundError(resource)) + continue + } + + if hardLimit == types.UNLIMITED || value == currentUsed[resource] { + continue + } + + if value > hardLimit { + errs = errs.Add(NewResourceOverflowError(resource, hardLimit, currentUsed[resource], value)) + } + } + + if len(errs) > 0 { + return errs + } + + return nil +} + +func prettyPrintResourceNames(a []types.ResourceName) string { + values := []string{} + for _, value := range a { + values = append(values, string(value)) + } + sort.Strings(values) + return strings.Join(values, ",") +} diff --git a/src/common/quota/util_test.go b/src/common/quota/util_test.go new file mode 100644 index 000000000..d0db1166a --- /dev/null +++ b/src/common/quota/util_test.go @@ -0,0 +1,78 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package quota + +import ( + "testing" + + "github.com/goharbor/harbor/src/pkg/types" +) + +func Test_isSafe(t *testing.T) { + type args struct { + hardLimits types.ResourceList + currentUsed types.ResourceList + newUsed types.ResourceList + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + "unlimited", + args{ + types.ResourceList{types.ResourceStorage: types.UNLIMITED}, + types.ResourceList{types.ResourceStorage: 1000}, + types.ResourceList{types.ResourceStorage: 1000}, + }, + false, + }, + { + "ok", + args{ + types.ResourceList{types.ResourceStorage: 100}, + types.ResourceList{types.ResourceStorage: 10}, + types.ResourceList{types.ResourceStorage: 1}, + }, + false, + }, + { + "over the hard limit", + args{ + types.ResourceList{types.ResourceStorage: 100}, + types.ResourceList{types.ResourceStorage: 0}, + types.ResourceList{types.ResourceStorage: 200}, + }, + true, + }, + { + "hard limit not found", + args{ + types.ResourceList{types.ResourceStorage: 100}, + types.ResourceList{types.ResourceCount: 0}, + types.ResourceList{types.ResourceCount: 1}, + }, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := isSafe(tt.args.hardLimits, tt.args.currentUsed, tt.args.newUsed); (err != nil) != tt.wantErr { + t.Errorf("isSafe() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/src/common/rbac/const.go b/src/common/rbac/const.go old mode 100644 new mode 100755 index 226759d34..6cadbddef --- a/src/common/rbac/const.go +++ b/src/common/rbac/const.go @@ -27,6 +27,8 @@ const ( ActionUpdate = Action("update") ActionDelete = Action("delete") ActionList = Action("list") + + ActionOperate = Action("operate") ) // const resource variables @@ -46,6 +48,7 @@ const ( ResourceReplicationExecution = Resource("replication-execution") ResourceReplicationTask = Resource("replication-task") ResourceRepository = Resource("repository") + ResourceTagRetention = Resource("tag-retention") ResourceRepositoryLabel = Resource("repository-label") ResourceRepositoryTag = Resource("repository-tag") ResourceRepositoryTagLabel = Resource("repository-tag-label") @@ -53,5 +56,6 @@ const ( ResourceRepositoryTagScanJob = Resource("repository-tag-scan-job") ResourceRepositoryTagVulnerability = Resource("repository-tag-vulnerability") ResourceRobot = Resource("robot") + ResourceNotificationPolicy = Resource("notification-policy") ResourceSelf = Resource("") // subresource for self ) diff --git a/src/common/rbac/project/util.go b/src/common/rbac/project/util.go index 2a7a6968d..3de3f5810 100644 --- a/src/common/rbac/project/util.go +++ b/src/common/rbac/project/util.go @@ -54,6 +54,7 @@ var ( {Resource: rbac.ResourceSelf, Action: rbac.ActionDelete}, {Resource: rbac.ResourceMember, Action: rbac.ActionCreate}, + {Resource: rbac.ResourceMember, Action: rbac.ActionRead}, {Resource: rbac.ResourceMember, Action: rbac.ActionUpdate}, {Resource: rbac.ResourceMember, Action: rbac.ActionDelete}, {Resource: rbac.ResourceMember, Action: rbac.ActionList}, @@ -87,6 +88,13 @@ var ( {Resource: rbac.ResourceReplicationTask, Action: rbac.ActionUpdate}, {Resource: rbac.ResourceReplicationTask, Action: rbac.ActionDelete}, + {Resource: rbac.ResourceTagRetention, Action: rbac.ActionCreate}, + {Resource: rbac.ResourceTagRetention, Action: rbac.ActionRead}, + {Resource: rbac.ResourceTagRetention, Action: rbac.ActionUpdate}, + {Resource: rbac.ResourceTagRetention, Action: rbac.ActionDelete}, + {Resource: rbac.ResourceTagRetention, Action: rbac.ActionList}, + {Resource: rbac.ResourceTagRetention, Action: rbac.ActionOperate}, + {Resource: rbac.ResourceLabel, Action: rbac.ActionCreate}, {Resource: rbac.ResourceLabel, Action: rbac.ActionRead}, {Resource: rbac.ResourceLabel, Action: rbac.ActionUpdate}, @@ -143,6 +151,12 @@ var ( {Resource: rbac.ResourceRobot, Action: rbac.ActionUpdate}, {Resource: rbac.ResourceRobot, Action: rbac.ActionDelete}, {Resource: rbac.ResourceRobot, Action: rbac.ActionList}, + + {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionCreate}, + {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionUpdate}, + {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionDelete}, + {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionList}, + {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionRead}, } ) diff --git a/src/common/rbac/project/visitor_role.go b/src/common/rbac/project/visitor_role.go old mode 100644 new mode 100755 index 4287f97db..36202a602 --- a/src/common/rbac/project/visitor_role.go +++ b/src/common/rbac/project/visitor_role.go @@ -27,6 +27,7 @@ var ( {Resource: rbac.ResourceSelf, Action: rbac.ActionDelete}, {Resource: rbac.ResourceMember, Action: rbac.ActionCreate}, + {Resource: rbac.ResourceMember, Action: rbac.ActionRead}, {Resource: rbac.ResourceMember, Action: rbac.ActionUpdate}, {Resource: rbac.ResourceMember, Action: rbac.ActionDelete}, {Resource: rbac.ResourceMember, Action: rbac.ActionList}, @@ -60,6 +61,13 @@ var ( {Resource: rbac.ResourceRepository, Action: rbac.ActionPull}, {Resource: rbac.ResourceRepository, Action: rbac.ActionPush}, + {Resource: rbac.ResourceTagRetention, Action: rbac.ActionCreate}, + {Resource: rbac.ResourceTagRetention, Action: rbac.ActionRead}, + {Resource: rbac.ResourceTagRetention, Action: rbac.ActionUpdate}, + {Resource: rbac.ResourceTagRetention, Action: rbac.ActionDelete}, + {Resource: rbac.ResourceTagRetention, Action: rbac.ActionList}, + {Resource: rbac.ResourceTagRetention, Action: rbac.ActionOperate}, + {Resource: rbac.ResourceRepositoryLabel, Action: rbac.ActionCreate}, {Resource: rbac.ResourceRepositoryLabel, Action: rbac.ActionDelete}, {Resource: rbac.ResourceRepositoryLabel, Action: rbac.ActionList}, @@ -100,11 +108,18 @@ var ( {Resource: rbac.ResourceRobot, Action: rbac.ActionUpdate}, {Resource: rbac.ResourceRobot, Action: rbac.ActionDelete}, {Resource: rbac.ResourceRobot, Action: rbac.ActionList}, + + {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionCreate}, + {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionUpdate}, + {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionDelete}, + {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionList}, + {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionRead}, }, "master": { {Resource: rbac.ResourceSelf, Action: rbac.ActionRead}, + {Resource: rbac.ResourceMember, Action: rbac.ActionRead}, {Resource: rbac.ResourceMember, Action: rbac.ActionList}, {Resource: rbac.ResourceMetadata, Action: rbac.ActionCreate}, @@ -131,6 +146,13 @@ var ( {Resource: rbac.ResourceRepository, Action: rbac.ActionPush}, {Resource: rbac.ResourceRepository, Action: rbac.ActionPull}, + {Resource: rbac.ResourceTagRetention, Action: rbac.ActionCreate}, + {Resource: rbac.ResourceTagRetention, Action: rbac.ActionRead}, + {Resource: rbac.ResourceTagRetention, Action: rbac.ActionUpdate}, + {Resource: rbac.ResourceTagRetention, Action: rbac.ActionDelete}, + {Resource: rbac.ResourceTagRetention, Action: rbac.ActionList}, + {Resource: rbac.ResourceTagRetention, Action: rbac.ActionOperate}, + {Resource: rbac.ResourceRepositoryLabel, Action: rbac.ActionCreate}, {Resource: rbac.ResourceRepositoryLabel, Action: rbac.ActionDelete}, {Resource: rbac.ResourceRepositoryLabel, Action: rbac.ActionList}, @@ -167,11 +189,14 @@ var ( {Resource: rbac.ResourceRobot, Action: rbac.ActionRead}, {Resource: rbac.ResourceRobot, Action: rbac.ActionList}, + + {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionList}, }, "developer": { {Resource: rbac.ResourceSelf, Action: rbac.ActionRead}, + {Resource: rbac.ResourceMember, Action: rbac.ActionRead}, {Resource: rbac.ResourceMember, Action: rbac.ActionList}, {Resource: rbac.ResourceLog, Action: rbac.ActionList}, @@ -221,6 +246,7 @@ var ( "guest": { {Resource: rbac.ResourceSelf, Action: rbac.ActionRead}, + {Resource: rbac.ResourceMember, Action: rbac.ActionRead}, {Resource: rbac.ResourceMember, Action: rbac.ActionList}, {Resource: rbac.ResourceLog, Action: rbac.ActionList}, diff --git a/src/common/security/local/context.go b/src/common/security/local/context.go index 655fe34b1..907521e2f 100644 --- a/src/common/security/local/context.go +++ b/src/common/security/local/context.go @@ -17,7 +17,6 @@ package local import ( "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common/dao" - "github.com/goharbor/harbor/src/common/dao/group" "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/common/rbac" "github.com/goharbor/harbor/src/common/rbac/project" @@ -128,10 +127,24 @@ func (s *SecurityContext) GetProjectRoles(projectIDOrName interface{}) []int { roles = append(roles, common.RoleGuest) } } - if len(roles) != 0 { - return roles + return mergeRoles(roles, s.GetRolesByGroup(projectIDOrName)) +} + +func mergeRoles(rolesA, rolesB []int) []int { + type void struct{} + var roles []int + var placeHolder void + roleSet := make(map[int]void) + for _, r := range rolesA { + roleSet[r] = placeHolder } - return s.GetRolesByGroup(projectIDOrName) + for _, r := range rolesB { + roleSet[r] = placeHolder + } + for r := range roleSet { + roles = append(roles, r) + } + return roles } // GetRolesByGroup - Get the group role of current user to the project @@ -140,12 +153,11 @@ func (s *SecurityContext) GetRolesByGroup(projectIDOrName interface{}) []int { user := s.user project, err := s.pm.Get(projectIDOrName) // No user, group or project info - if err != nil || project == nil || user == nil || len(user.GroupList) == 0 { + if err != nil || project == nil || user == nil || len(user.GroupIDs) == 0 { return roles } - // Get role by LDAP group - groupDNConditions := group.GetGroupDNQueryCondition(user.GroupList) - roles, err = dao.GetRolesByLDAPGroup(project.ProjectID, groupDNConditions) + // Get role by Group ID + roles, err = dao.GetRolesByGroupID(project.ProjectID, user.GroupIDs) if err != nil { return nil } @@ -157,8 +169,8 @@ func (s *SecurityContext) GetMyProjects() ([]*models.Project, error) { result, err := s.pm.List( &models.ProjectQueryParam{ Member: &models.MemberQuery{ - Name: s.GetUsername(), - GroupList: s.user.GroupList, + Name: s.GetUsername(), + GroupIDs: s.user.GroupIDs, }, }) if err != nil { diff --git a/src/common/security/local/context_test.go b/src/common/security/local/context_test.go index 955c041cc..ffbb51885 100644 --- a/src/common/security/local/context_test.go +++ b/src/common/security/local/context_test.go @@ -20,6 +20,7 @@ import ( "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/dao/group" "github.com/goharbor/harbor/src/common/dao/project" "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/common/rbac" @@ -253,9 +254,16 @@ func TestHasPushPullPermWithGroup(t *testing.T) { if err != nil { t.Errorf("Error occurred when GetUser: %v", err) } - developer.GroupList = []*models.UserGroup{ - {GroupName: "test_group", GroupType: 1, LdapGroupDN: "cn=harbor_user,dc=example,dc=com"}, + + userGroups, err := group.QueryUserGroup(models.UserGroup{GroupType: common.LDAPGroupType, LdapGroupDN: "cn=harbor_user,dc=example,dc=com"}) + if err != nil { + t.Errorf("Failed to query user group %v", err) } + if len(userGroups) < 1 { + t.Errorf("Failed to retrieve user group") + } + + developer.GroupIDs = []int{userGroups[0].ID} resource := rbac.NewProjectNamespace(project.Name).Resource(rbac.ResourceRepository) @@ -332,9 +340,15 @@ func TestSecurityContext_GetRolesByGroup(t *testing.T) { if err != nil { t.Errorf("Error occurred when GetUser: %v", err) } - developer.GroupList = []*models.UserGroup{ - {GroupName: "test_group", GroupType: 1, LdapGroupDN: "cn=harbor_user,dc=example,dc=com"}, + userGroups, err := group.QueryUserGroup(models.UserGroup{GroupType: common.LDAPGroupType, LdapGroupDN: "cn=harbor_user,dc=example,dc=com"}) + if err != nil { + t.Errorf("Failed to query user group %v", err) } + if len(userGroups) < 1 { + t.Errorf("Failed to retrieve user group") + } + + developer.GroupIDs = []int{userGroups[0].ID} type fields struct { user *models.User pm promgr.ProjectManager @@ -394,3 +408,27 @@ func TestSecurityContext_GetMyProjects(t *testing.T) { }) } } + +func Test_mergeRoles(t *testing.T) { + type args struct { + rolesA []int + rolesB []int + } + tests := []struct { + name string + args args + want []int + }{ + {"normal", args{[]int{3, 4}, []int{1, 2, 3, 4}}, []int{1, 2, 3, 4}}, + {"empty", args{[]int{}, []int{}}, []int{}}, + {"left empty", args{[]int{}, []int{1, 2, 3, 4}}, []int{1, 2, 3, 4}}, + {"right empty", args{[]int{1, 2, 3, 4}, []int{}}, []int{1, 2, 3, 4}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := mergeRoles(tt.args.rolesA, tt.args.rolesB); !test.CheckSetsEqual(got, tt.want) { + t.Errorf("mergeRoles() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/src/common/utils/clair/utils.go b/src/common/utils/clair/utils.go index 2eb986e75..48ba3711e 100644 --- a/src/common/utils/clair/utils.go +++ b/src/common/utils/clair/utils.go @@ -15,10 +15,7 @@ package clair import ( - "fmt" - "github.com/goharbor/harbor/src/common/dao" "github.com/goharbor/harbor/src/common/models" - "github.com/goharbor/harbor/src/common/utils/log" "strings" ) @@ -41,26 +38,6 @@ func ParseClairSev(clairSev string) models.Severity { } } -// UpdateScanOverview qeuries the vulnerability based on the layerName and update the record in img_scan_overview table based on digest. -func UpdateScanOverview(digest, layerName string, clairEndpoint string, l ...*log.Logger) error { - var logger *log.Logger - if len(l) > 1 { - return fmt.Errorf("More than one logger specified") - } else if len(l) == 1 { - logger = l[0] - } else { - logger = log.DefaultLogger() - } - client := NewClient(clairEndpoint, logger) - res, err := client.GetResult(layerName) - if err != nil { - logger.Errorf("Failed to get result from Clair, error: %v", err) - return err - } - compOverview, sev := transformVuln(res) - return dao.UpdateImgScanOverview(digest, layerName, sev, compOverview) -} - func transformVuln(clairVuln *models.ClairLayerEnvelope) (*models.ComponentsOverview, models.Severity) { vulnMap := make(map[models.Severity]int) features := clairVuln.Layer.Features diff --git a/src/common/utils/ldap/ldap.go b/src/common/utils/ldap/ldap.go index e7c453376..512af7618 100644 --- a/src/common/utils/ldap/ldap.go +++ b/src/common/utils/ldap/ldap.go @@ -220,6 +220,27 @@ func (session *Session) SearchUser(username string) ([]models.LdapUser, error) { } u.GroupDNList = groupDNList } + + log.Debugf("Searching for nested groups") + nestedGroupDNList := []string{} + nestedGroupFilter := createNestedGroupFilter(ldapEntry.DN) + result, err := session.SearchLdap(nestedGroupFilter) + if err != nil { + return nil, err + } + + for _, groupEntry := range result.Entries { + if !contains(u.GroupDNList, groupEntry.DN) { + nestedGroupDNList = append(nestedGroupDNList, strings.TrimSpace(groupEntry.DN)) + log.Debugf("Found group %v", groupEntry.DN) + } else { + log.Debugf("%v is already in GroupDNList", groupEntry.DN) + } + } + + u.GroupDNList = append(u.GroupDNList, nestedGroupDNList...) + log.Debugf("Done searching for nested groups") + u.DN = ldapEntry.DN ldapUsers = append(ldapUsers, u) @@ -330,13 +351,13 @@ func (session *Session) createUserFilter(username string) string { filterTag = goldap.EscapeFilter(username) } - ldapFilter := session.ldapConfig.LdapFilter + ldapFilter := normalizeFilter(session.ldapConfig.LdapFilter) ldapUID := session.ldapConfig.LdapUID if ldapFilter == "" { ldapFilter = "(" + ldapUID + "=" + filterTag + ")" } else { - ldapFilter = "(&" + ldapFilter + "(" + ldapUID + "=" + filterTag + "))" + ldapFilter = "(&(" + ldapFilter + ")(" + ldapUID + "=" + filterTag + "))" } log.Debug("ldap filter :", ldapFilter) @@ -404,6 +425,7 @@ func createGroupSearchFilter(oldFilter, groupName, groupNameAttribute string) st filter := "" groupName = goldap.EscapeFilter(groupName) groupNameAttribute = goldap.EscapeFilter(groupNameAttribute) + oldFilter = normalizeFilter(oldFilter) if len(oldFilter) == 0 { if len(groupName) == 0 { filter = groupNameAttribute + "=*" @@ -419,3 +441,26 @@ func createGroupSearchFilter(oldFilter, groupName, groupNameAttribute string) st } return filter } + +func createNestedGroupFilter(userDN string) string { + filter := "" + filter = "(&(objectClass=group)(member:1.2.840.113556.1.4.1941:=" + userDN + "))" + return filter +} + +func contains(s []string, e string) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} + +// normalizeFilter - remove '(' and ')' in ldap filter +func normalizeFilter(filter string) string { + norFilter := strings.TrimSpace(filter) + norFilter = strings.TrimPrefix(norFilter, "(") + norFilter = strings.TrimSuffix(norFilter, ")") + return norFilter +} diff --git a/src/common/utils/ldap/ldap_test.go b/src/common/utils/ldap/ldap_test.go index ed80fd17a..e7b3344a6 100644 --- a/src/common/utils/ldap/ldap_test.go +++ b/src/common/utils/ldap/ldap_test.go @@ -369,3 +369,25 @@ func TestSession_SearchGroupByDN(t *testing.T) { }) } } + +func TestNormalizeFilter(t *testing.T) { + type args struct { + filter string + } + tests := []struct { + name string + args args + want string + }{ + {"normal test", args{"(objectclass=user)"}, "objectclass=user"}, + {"with space", args{" (objectclass=user) "}, "objectclass=user"}, + {"nothing", args{"objectclass=user"}, "objectclass=user"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := normalizeFilter(tt.args.filter); got != tt.want { + t.Errorf("normalizeFilter() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/src/common/utils/log/logger.go b/src/common/utils/log/logger.go index 882f8a716..5c8b6b376 100644 --- a/src/common/utils/log/logger.go +++ b/src/common/utils/log/logger.go @@ -278,7 +278,7 @@ func line(callDepth int) string { line = 0 } l := strings.SplitN(file, srcSeparator, 2) - if len(l) > 0 { + if len(l) > 1 { file = l[1] } return fmt.Sprintf("[%s:%d]:", file, line) diff --git a/src/common/utils/notary/helper.go b/src/common/utils/notary/helper.go index 76bd2ac0f..db80a9450 100644 --- a/src/common/utils/notary/helper.go +++ b/src/common/utils/notary/helper.go @@ -22,6 +22,8 @@ import ( "path" "strings" + "github.com/goharbor/harbor/src/common/utils/notary/model" + "github.com/docker/distribution/registry/auth/token" "github.com/goharbor/harbor/src/common/utils/log" "github.com/goharbor/harbor/src/common/utils/registry" @@ -41,14 +43,6 @@ var ( mockRetriever notary.PassRetriever ) -// Target represents the json object of a target of a docker image in notary. -// The struct will be used when repository is know so it won'g contain the name of a repository. -type Target struct { - Tag string `json:"tag"` - Hashes data.Hashes `json:"hashes"` - // TODO: update fields as needed. -} - func init() { mockRetriever = func(keyName, alias string, createNew bool, attempts int) (passphrase string, giveup bool, err error) { passphrase = "hardcode" @@ -60,7 +54,7 @@ func init() { } // GetInternalTargets wraps GetTargets to read config values for getting full-qualified repo from internal notary instance. -func GetInternalTargets(notaryEndpoint string, username string, repo string) ([]Target, error) { +func GetInternalTargets(notaryEndpoint string, username string, repo string) ([]model.Target, error) { ext, err := config.ExtEndpoint() if err != nil { log.Errorf("Error while reading external endpoint: %v", err) @@ -74,8 +68,8 @@ func GetInternalTargets(notaryEndpoint string, username string, repo string) ([] // GetTargets is a help function called by API to fetch signature information of a given repository. // Per docker's convention the repository should contain the information of endpoint, i.e. it should look // like "192.168.0.1/library/ubuntu", instead of "library/ubuntu" (fqRepo for fully-qualified repo) -func GetTargets(notaryEndpoint string, username string, fqRepo string) ([]Target, error) { - res := []Target{} +func GetTargets(notaryEndpoint string, username string, fqRepo string) ([]model.Target, error) { + res := []model.Target{} t, err := tokenutil.MakeToken(username, tokenutil.Notary, []*token.ResourceActions{ { @@ -109,13 +103,16 @@ func GetTargets(notaryEndpoint string, username string, fqRepo string) ([]Target log.Warningf("Failed to clear cached root.json: %s, error: %v, when repo is removed from notary the signature status maybe incorrect", rootJSON, rmErr) } for _, t := range targets { - res = append(res, Target{t.Name, t.Hashes}) + res = append(res, model.Target{ + Tag: t.Name, + Hashes: t.Hashes, + }) } return res, nil } // DigestFromTarget get a target and return the value of digest, in accordance to Docker-Content-Digest -func DigestFromTarget(t Target) (string, error) { +func DigestFromTarget(t model.Target) (string, error) { sha, ok := t.Hashes["sha256"] if !ok { return "", fmt.Errorf("no valid hash, expecting sha256") diff --git a/src/common/utils/notary/helper_test.go b/src/common/utils/notary/helper_test.go index d3c11e63b..a0c2a1f34 100644 --- a/src/common/utils/notary/helper_test.go +++ b/src/common/utils/notary/helper_test.go @@ -17,6 +17,8 @@ import ( "encoding/json" "fmt" + "github.com/goharbor/harbor/src/common/utils/notary/model" + notarytest "github.com/goharbor/harbor/src/common/utils/notary/test" "github.com/goharbor/harbor/src/common/utils/test" "github.com/goharbor/harbor/src/core/config" @@ -81,17 +83,19 @@ func TestGetDigestFromTarget(t *testing.T) { } }` - var t1 Target + var t1 model.Target err := json.Unmarshal([]byte(str), &t1) if err != nil { panic(err) } hash2 := make(map[string][]byte) - t2 := Target{"2.0", hash2} + t2 := model.Target{ + Tag: "2.0", + Hashes: hash2, + } d1, err1 := DigestFromTarget(t1) assert.Nil(t, err1, "Unexpected error: %v", err1) assert.Equal(t, "sha256:1359608115b94599e5641638bac5aef1ddfaa79bb96057ebf41ebc8d33acf8a7", d1, "digest mismatch") _, err2 := DigestFromTarget(t2) assert.NotNil(t, err2, "") - } diff --git a/src/common/utils/notary/model/model.go b/src/common/utils/notary/model/model.go new file mode 100644 index 000000000..ef83ef60c --- /dev/null +++ b/src/common/utils/notary/model/model.go @@ -0,0 +1,25 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import "github.com/theupdateframework/notary/tuf/data" + +// Target represents the json object of a target of a docker image in notary. +// The struct will be used when repository is know so it won'g contain the name of a repository. +type Target struct { + Tag string `json:"tag"` + Hashes data.Hashes `json:"hashes"` + // TODO: update fields as needed. +} diff --git a/src/common/utils/oidc/helper.go b/src/common/utils/oidc/helper.go index 32fee3c29..30f14e209 100644 --- a/src/common/utils/oidc/helper.go +++ b/src/common/utils/oidc/helper.go @@ -35,20 +35,14 @@ const googleEndpoint = "https://accounts.google.com" type providerHelper struct { sync.Mutex - ep endpoint - instance atomic.Value - setting atomic.Value -} - -type endpoint struct { - url string - VerifyCert bool + instance atomic.Value + setting atomic.Value + creationTime time.Time } func (p *providerHelper) get() (*gooidc.Provider, error) { if p.instance.Load() != nil { - s := p.setting.Load().(models.OIDCSetting) - if s.Endpoint != p.ep.url || s.VerifyCert != p.ep.VerifyCert { // relevant settings have changed, need to re-create provider. + if time.Now().Sub(p.creationTime) > 3*time.Second { if err := p.create(); err != nil { return nil, err } @@ -57,7 +51,7 @@ func (p *providerHelper) get() (*gooidc.Provider, error) { p.Lock() defer p.Unlock() if p.instance.Load() == nil { - if err := p.reload(); err != nil { + if err := p.reloadSetting(); err != nil { return nil, err } if err := p.create(); err != nil { @@ -65,7 +59,7 @@ func (p *providerHelper) get() (*gooidc.Provider, error) { } go func() { for { - if err := p.reload(); err != nil { + if err := p.reloadSetting(); err != nil { log.Warningf("Failed to refresh configuration, error: %v", err) } time.Sleep(3 * time.Second) @@ -73,10 +67,11 @@ func (p *providerHelper) get() (*gooidc.Provider, error) { }() } } + return p.instance.Load().(*gooidc.Provider), nil } -func (p *providerHelper) reload() error { +func (p *providerHelper) reloadSetting() error { conf, err := config.OIDCSetting() if err != nil { return fmt.Errorf("failed to load OIDC setting: %v", err) @@ -96,10 +91,7 @@ func (p *providerHelper) create() error { return fmt.Errorf("failed to create OIDC provider, error: %v", err) } p.instance.Store(provider) - p.ep = endpoint{ - url: s.Endpoint, - VerifyCert: s.VerifyCert, - } + p.creationTime = time.Now() return nil } @@ -214,3 +206,19 @@ func RefreshToken(ctx context.Context, token *Token) (*Token, error) { } return &Token{Token: *t, IDToken: it}, nil } + +// Conn wraps connection info of an OIDC endpoint +type Conn struct { + URL string `json:"url"` + VerifyCert bool `json:"verify_cert"` +} + +// TestEndpoint tests whether the endpoint is a valid OIDC endpoint. +// The nil return value indicates the success of the test +func TestEndpoint(conn Conn) error { + + // gooidc will try to call the discovery api when creating the provider and that's all we need to check + ctx := clientCtx(context.Background(), conn.VerifyCert) + _, err := gooidc.NewProvider(ctx, conn.URL) + return err +} diff --git a/src/common/utils/oidc/helper_test.go b/src/common/utils/oidc/helper_test.go index e1e71a8b9..d706836b8 100644 --- a/src/common/utils/oidc/helper_test.go +++ b/src/common/utils/oidc/helper_test.go @@ -49,21 +49,20 @@ func TestMain(m *testing.M) { func TestHelperLoadConf(t *testing.T) { testP := &providerHelper{} assert.Nil(t, testP.setting.Load()) - err := testP.reload() + err := testP.reloadSetting() assert.Nil(t, err) assert.Equal(t, "test", testP.setting.Load().(models.OIDCSetting).Name) - assert.Equal(t, endpoint{}, testP.ep) } func TestHelperCreate(t *testing.T) { testP := &providerHelper{} - err := testP.reload() + err := testP.reloadSetting() assert.Nil(t, err) assert.Nil(t, testP.instance.Load()) err = testP.create() assert.Nil(t, err) - assert.EqualValues(t, "https://accounts.google.com", testP.ep.url) assert.NotNil(t, testP.instance.Load()) + assert.True(t, time.Now().Sub(testP.creationTime) < 2*time.Second) } func TestHelperGet(t *testing.T) { @@ -98,3 +97,16 @@ func TestAuthCodeURL(t *testing.T) { assert.Equal(t, "offline", q.Get("access_type")) assert.False(t, strings.Contains(q.Get("scope"), "offline_access")) } + +func TestTestEndpoint(t *testing.T) { + c1 := Conn{ + URL: googleEndpoint, + VerifyCert: true, + } + c2 := Conn{ + URL: "https://www.baidu.com", + VerifyCert: false, + } + assert.Nil(t, TestEndpoint(c1)) + assert.NotNil(t, TestEndpoint(c2)) +} diff --git a/src/common/utils/passports.go b/src/common/utils/passports.go new file mode 100644 index 000000000..bce88be9e --- /dev/null +++ b/src/common/utils/passports.go @@ -0,0 +1,128 @@ +package utils + +import ( + "context" + "sync" + + "github.com/goharbor/harbor/src/common/utils/log" +) + +// PassportsPool holds a given number of passports, they can be applied or be revoked. PassportsPool +// is used to control the concurrency of tasks, the pool size determine the max concurrency. When users +// want to start a goroutine to perform some task, they must apply a passport firstly, and after finish +// the task, the passport must be revoked. +type PassportsPool interface { + // Apply applies a passport from the pool. + Apply() bool + // Revoke revokes a passport to the pool + Revoke() bool +} + +type passportsPool struct { + passports chan struct{} + stopped <-chan struct{} +} + +// NewPassportsPool creates a passports pool with given size +func NewPassportsPool(size int, stopped <-chan struct{}) PassportsPool { + return &passportsPool{ + passports: make(chan struct{}, size), + stopped: stopped, + } +} + +// Apply applies a passport from the pool. Returning value 'true' means passport acquired +// successfully. If no available passports in the pool, 'Apply' will wait for it. If the +// all passports in the pool are turned into invalid by the 'stopped' channel, then false +// is returned, means no more passports will be dispatched. +func (p *passportsPool) Apply() bool { + select { + case p.passports <- struct{}{}: + return true + case <-p.stopped: + return false + } +} + +// Revoke revokes a passport to the pool. Returning value 'true' means passport revoked +// successfully, otherwise 'Revoke' will wait. If pool turns into invalid by 'stopped' channel +// false will be returned. +func (p *passportsPool) Revoke() bool { + select { + case <-p.passports: + return true + case <-p.stopped: + return false + } +} + +// LimitedConcurrentRunner is used to run tasks, but limit the max concurrency. +type LimitedConcurrentRunner interface { + // AddTask adds a task to run + AddTask(task func() error) + // Wait waits all the tasks to be finished + Wait() + // Cancel cancels all tasks, tasks that already started will continue to run + Cancel() + // IsCancelled checks whether context is cancelled. This happens when some task encountered + // critical errors. + IsCancelled() bool +} + +type limitedConcurrentRunner struct { + wg *sync.WaitGroup + ctx context.Context + cancel context.CancelFunc + passportsPool PassportsPool +} + +// NewLimitedConcurrentRunner creates a runner +func NewLimitedConcurrentRunner(limit int) LimitedConcurrentRunner { + ctx, cancel := context.WithCancel(context.Background()) + return &limitedConcurrentRunner{ + wg: new(sync.WaitGroup), + ctx: ctx, + cancel: cancel, + passportsPool: NewPassportsPool(limit, ctx.Done()), + } +} + +// AddTask adds a task to run +func (r *limitedConcurrentRunner) AddTask(task func() error) { + r.wg.Add(1) + go func() { + defer func() { + r.wg.Done() + }() + + // Return false means no passport acquired, and no valid passport will be dispatched any more. + // For example, some crucial errors happened and all tasks should be cancelled. + if ok := r.passportsPool.Apply(); !ok { + return + } + defer func() { + r.passportsPool.Revoke() + }() + + err := task() + if err != nil { + log.Errorf("%v", err) + r.cancel() + } + }() +} + +// Wait waits all the tasks to be finished +func (r *limitedConcurrentRunner) Wait() { + r.wg.Wait() +} + +// Cancel cancels all tasks, tasks that already started will continue to run +func (r *limitedConcurrentRunner) Cancel() { + r.cancel() +} + +// IsCancelled checks whether context is cancelled. This happens when some task encountered critical errors. +func (r *limitedConcurrentRunner) IsCancelled() bool { + return r.ctx.Err() != nil +} diff --git a/src/common/utils/redis/helper.go b/src/common/utils/redis/helper.go new file mode 100644 index 000000000..5a137acdd --- /dev/null +++ b/src/common/utils/redis/helper.go @@ -0,0 +1,232 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package redis + +import ( + "errors" + "fmt" + "os" + "strconv" + "sync" + "time" + + "github.com/garyburd/redigo/redis" + "github.com/goharbor/harbor/src/common/utils" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/config" +) + +var ( + // ErrUnLock ... + ErrUnLock = errors.New("error to release the redis lock") +) + +const ( + unlockScript = ` +if redis.call("get",KEYS[1]) == ARGV[1] then + return redis.call("del",KEYS[1]) +else + return 0 +end +` +) + +// Mutex ... +type Mutex struct { + Conn redis.Conn + key string + value string + opts Options +} + +// New ... +func New(conn redis.Conn, key, value string) *Mutex { + o := *DefaultOptions() + if value == "" { + value = utils.GenerateRandomString() + } + return &Mutex{conn, key, value, o} +} + +// Require retry to require the lock +func (rm *Mutex) Require() (bool, error) { + var isRequired bool + var err error + + for i := 0; i < rm.opts.maxRetry; i++ { + isRequired, err = rm.require() + if isRequired { + break + } + if err != nil || !isRequired { + time.Sleep(rm.opts.retryDelay) + } + } + + return isRequired, err +} + +// require get the redis lock, for details, just refer to https://redis.io/topics/distlock +func (rm *Mutex) require() (bool, error) { + reply, err := redis.String(rm.Conn.Do("SET", rm.key, rm.value, "NX", "PX", int(rm.opts.expiry/time.Millisecond))) + if err != nil { + return false, err + } + return reply == "OK", nil +} + +// Free releases the lock, for details, just refer to https://redis.io/topics/distlock +func (rm *Mutex) Free() (bool, error) { + script := redis.NewScript(1, unlockScript) + resp, err := redis.Int(script.Do(rm.Conn, rm.key, rm.value)) + if err != nil { + return false, err + } + if resp == 0 { + return false, ErrUnLock + } + return true, nil +} + +// Options ... +type Options struct { + retryDelay time.Duration + expiry time.Duration + maxRetry int +} + +var ( + opt *Options + optOnce sync.Once + + defaultDelay = int64(1) // 1 second + defaultMaxRetry = 600 + defaultExpire = int64(2 * time.Hour / time.Second) // 2 hours +) + +// DefaultOptions ... +func DefaultOptions() *Options { + optOnce.Do(func() { + retryDelay, err := strconv.ParseInt(os.Getenv("REDIS_LOCK_RETRY_DELAY"), 10, 64) + if err != nil || retryDelay < 0 { + retryDelay = defaultDelay + } + + maxRetry, err := strconv.Atoi(os.Getenv("REDIS_LOCK_MAX_RETRY")) + if err != nil || maxRetry < 0 { + maxRetry = defaultMaxRetry + } + + expire, err := strconv.ParseInt(os.Getenv("REDIS_LOCK_EXPIRE"), 10, 64) + if err != nil || expire < 0 { + expire = defaultExpire + } + + opt = &Options{ + retryDelay: time.Duration(retryDelay) * time.Second, + expiry: time.Duration(expire) * time.Second, + maxRetry: maxRetry, + } + }) + + return opt +} + +var ( + pool *redis.Pool + poolOnce sync.Once + + poolMaxIdle = 200 + poolMaxActive = 1000 + poolIdleTimeout int64 = 180 +) + +// DefaultPool return default redis pool +func DefaultPool() *redis.Pool { + poolOnce.Do(func() { + maxIdle, err := strconv.Atoi(os.Getenv("REDIS_POOL_MAX_IDLE")) + if err != nil || maxIdle < 0 { + maxIdle = poolMaxIdle + } + + maxActive, err := strconv.Atoi(os.Getenv("REDIS_POOL_MAX_ACTIVE")) + if err != nil || maxActive < 0 { + maxActive = poolMaxActive + } + + idleTimeout, err := strconv.ParseInt(os.Getenv("REDIS_POOL_IDLE_TIMEOUT"), 10, 64) + if err != nil || idleTimeout < 0 { + idleTimeout = poolIdleTimeout + } + + pool = &redis.Pool{ + Dial: func() (redis.Conn, error) { + url := config.GetRedisOfRegURL() + if url == "" { + url = "redis://localhost:6379/1" + } + + return redis.DialURL(url) + }, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + _, err := c.Do("PING") + return err + }, + MaxIdle: maxIdle, + MaxActive: maxActive, + IdleTimeout: time.Duration(idleTimeout) * time.Second, + Wait: true, + } + }) + + return pool +} + +// RequireLock returns lock by key +func RequireLock(key string, conns ...redis.Conn) (*Mutex, error) { + var conn redis.Conn + if len(conns) > 0 { + conn = conns[0] + } else { + conn = DefaultPool().Get() + } + + m := New(conn, key, utils.GenerateRandomString()) + ok, err := m.Require() + if err != nil { + return nil, fmt.Errorf("require redis lock failed: %v", err) + } + + if !ok { + return nil, fmt.Errorf("unable to require lock for %s", key) + } + + return m, nil +} + +// FreeLock free lock +func FreeLock(m *Mutex) error { + if _, err := m.Free(); err != nil { + log.Warningf("failed to free lock %s, error: %v", m.key, err) + return err + } + + if err := m.Conn.Close(); err != nil { + log.Warningf("failed to close the redis con for lock %s, error: %v", m.key, err) + return err + } + + return nil +} diff --git a/src/common/utils/redis/helper_test.go b/src/common/utils/redis/helper_test.go new file mode 100644 index 000000000..71572bc01 --- /dev/null +++ b/src/common/utils/redis/helper_test.go @@ -0,0 +1,102 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package redis + +import ( + "fmt" + "os" + "testing" + "time" + + "github.com/garyburd/redigo/redis" + "github.com/goharbor/harbor/src/common/utils" + "github.com/stretchr/testify/assert" +) + +const testingRedisHost = "REDIS_HOST" + +func init() { + os.Setenv("REDIS_LOCK_MAX_RETRY", "5") +} + +func TestRedisLock(t *testing.T) { + con, err := redis.Dial("tcp", fmt.Sprintf("%s:%d", getRedisHost(), 6379)) + assert.Nil(t, err) + defer con.Close() + + rm := New(con, "test-redis-lock", "test-value") + + successLock, err := rm.Require() + assert.Nil(t, err) + assert.True(t, successLock) + + time.Sleep(2 * time.Second) + _, err = rm.Require() + assert.NotNil(t, err) + + successUnLock, err := rm.Free() + assert.Nil(t, err) + assert.True(t, successUnLock) + +} + +func TestRequireLock(t *testing.T) { + assert := assert.New(t) + + conn, err := redis.Dial("tcp", fmt.Sprintf("%s:%d", getRedisHost(), 6379)) + assert.Nil(err) + defer conn.Close() + + if l, err := RequireLock(utils.GenerateRandomString(), conn); assert.Nil(err) { + l.Free() + } + + if l, err := RequireLock(utils.GenerateRandomString()); assert.Nil(err) { + FreeLock(l) + } + + key := utils.GenerateRandomString() + if l, err := RequireLock(key); assert.Nil(err) { + defer FreeLock(l) + + _, err = RequireLock(key) + assert.Error(err) + } +} + +func TestFreeLock(t *testing.T) { + assert := assert.New(t) + + if l, err := RequireLock(utils.GenerateRandomString()); assert.Nil(err) { + assert.Nil(FreeLock(l)) + } + + conn, err := redis.Dial("tcp", fmt.Sprintf("%s:%d", getRedisHost(), 6379)) + assert.Nil(err) + + if l, err := RequireLock(utils.GenerateRandomString(), conn); assert.Nil(err) { + conn.Close() + assert.Error(FreeLock(l)) + } +} + +func getRedisHost() string { + redisHost := os.Getenv(testingRedisHost) + if redisHost == "" { + redisHost = "127.0.0.1" // for local test + } + + return redisHost +} diff --git a/src/common/utils/registry/auth/tokenauthorizer.go b/src/common/utils/registry/auth/tokenauthorizer.go index cac7c1c4a..1f1c95569 100644 --- a/src/common/utils/registry/auth/tokenauthorizer.go +++ b/src/common/utils/registry/auth/tokenauthorizer.go @@ -15,6 +15,7 @@ package auth import ( + "errors" "fmt" "net/http" "net/url" @@ -111,7 +112,12 @@ func (t *tokenAuthorizer) Modify(req *http.Request) error { } } - req.Header.Add(http.CanonicalHeaderKey("Authorization"), fmt.Sprintf("Bearer %s", token.Token)) + tk := token.GetToken() + if len(tk) == 0 { + return errors.New("empty token content") + } + + req.Header.Add(http.CanonicalHeaderKey("Authorization"), fmt.Sprintf("Bearer %s", tk)) return nil } diff --git a/src/common/utils/registry/auth/util.go b/src/common/utils/registry/auth/util.go index ad86229d8..c3e8e217e 100644 --- a/src/common/utils/registry/auth/util.go +++ b/src/common/utils/registry/auth/util.go @@ -30,7 +30,7 @@ const ( service = "harbor-registry" ) -// GetToken requests a token against the endpoint using credetial provided +// GetToken requests a token against the endpoint using credential provided func GetToken(endpoint string, insecure bool, credential Credential, scopes []*token.ResourceActions) (*models.Token, error) { client := &http.Client{ diff --git a/src/common/utils/registry/registry.go b/src/common/utils/registry/registry.go index c835d4892..563c25ed2 100644 --- a/src/common/utils/registry/registry.go +++ b/src/common/utils/registry/registry.go @@ -22,8 +22,6 @@ import ( "net/url" "strings" - // "time" - commonhttp "github.com/goharbor/harbor/src/common/http" "github.com/goharbor/harbor/src/common/utils" ) @@ -130,9 +128,18 @@ func (r *Registry) Catalog() ([]string, error) { return repos, nil } -// Ping ... +// Ping checks by Head method func (r *Registry) Ping() error { - req, err := http.NewRequest(http.MethodHead, buildPingURL(r.Endpoint.String()), nil) + return r.ping(http.MethodHead) +} + +// PingGet checks by Get method +func (r *Registry) PingGet() error { + return r.ping(http.MethodGet) +} + +func (r *Registry) ping(method string) error { + req, err := http.NewRequest(method, buildPingURL(r.Endpoint.String()), nil) if err != nil { return err } diff --git a/src/common/utils/registry/repository.go b/src/common/utils/registry/repository.go index 87f06dc43..7a4a1c6c7 100644 --- a/src/common/utils/registry/repository.go +++ b/src/common/utils/registry/repository.go @@ -25,11 +25,9 @@ import ( "sort" "strconv" "strings" - // "time" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" - commonhttp "github.com/goharbor/harbor/src/common/http" "github.com/goharbor/harbor/src/common/utils" ) @@ -211,7 +209,7 @@ func (r *Repository) PushManifest(reference, mediaType string, payload []byte) ( defer resp.Body.Close() - if resp.StatusCode == http.StatusCreated { + if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusOK { digest = resp.Header.Get(http.CanonicalHeaderKey("Docker-Content-Digest")) return } @@ -407,6 +405,7 @@ func (r *Repository) monolithicBlobUpload(location, digest string, size int64, d if err != nil { return err } + req.ContentLength = size resp, err := r.client.Do(req) if err != nil { diff --git a/src/common/utils/test/test.go b/src/common/utils/test/test.go index 28046e5db..ad8048296 100644 --- a/src/common/utils/test/test.go +++ b/src/common/utils/test/test.go @@ -22,10 +22,11 @@ import ( "strings" "fmt" - "github.com/goharbor/harbor/src/common" - "github.com/gorilla/mux" "os" "sort" + + "github.com/goharbor/harbor/src/common" + "github.com/gorilla/mux" ) // RequestHandlerMapping is a mapping between request and its handler @@ -120,7 +121,7 @@ func GetUnitTestConfig() map[string]interface{} { common.LDAPGroupBaseDN: "dc=example,dc=com", common.LDAPGroupAttributeName: "cn", common.LDAPGroupSearchScope: 2, - common.LdapGroupAdminDn: "cn=harbor_users,ou=groups,dc=example,dc=com", + common.LDAPGroupAdminDn: "cn=harbor_users,ou=groups,dc=example,dc=com", common.WithNotary: "false", common.WithChartMuseum: "false", common.SelfRegistration: "true", @@ -141,3 +142,33 @@ func TraceCfgMap(cfgs map[string]interface{}) { fmt.Printf("%v=%v\n", k, cfgs[k]) } } + +// CheckSetsEqual - check int set if they are equals +func CheckSetsEqual(setA, setB []int) bool { + if len(setA) != len(setB) { + return false + } + type void struct{} + var exist void + setAll := make(map[int]void) + for _, r := range setA { + setAll[r] = exist + } + for _, r := range setB { + if _, ok := setAll[r]; !ok { + return false + } + } + + setAll = make(map[int]void) + for _, r := range setB { + setAll[r] = exist + } + for _, r := range setA { + if _, ok := setAll[r]; !ok { + return false + } + } + return true + +} diff --git a/src/common/utils/utils.go b/src/common/utils/utils.go index cea54d342..24a12258d 100644 --- a/src/common/utils/utils.go +++ b/src/common/utils/utils.go @@ -230,7 +230,14 @@ func GetStrValueOfAnyType(value interface{}) string { } strVal = string(b) } else { - strVal = fmt.Sprintf("%v", value) + switch val := value.(type) { + case float64: + strVal = strconv.FormatFloat(val, 'f', -1, 64) + case float32: + strVal = strconv.FormatFloat(float64(val), 'f', -1, 32) + default: + strVal = fmt.Sprintf("%v", value) + } } return strVal } @@ -255,3 +262,8 @@ func IsContainIllegalChar(s string, illegalChar []string) bool { } return false } + +// IsDigest A sha256 is a string with 64 characters. +func IsDigest(ref string) bool { + return strings.HasPrefix(ref, "sha256:") && len(ref) == 71 +} diff --git a/src/common/utils/utils_test.go b/src/common/utils/utils_test.go index 66c4bca0f..437f16152 100644 --- a/src/common/utils/utils_test.go +++ b/src/common/utils/utils_test.go @@ -381,3 +381,37 @@ func TestTrimLower(t *testing.T) { }) } } + +func TestGetStrValueOfAnyType(t *testing.T) { + type args struct { + value interface{} + } + tests := []struct { + name string + args args + want string + }{ + {"float", args{float32(1048576.1)}, "1048576.1"}, + {"float", args{float64(1048576.12)}, "1048576.12"}, + {"float", args{1048576.000}, "1048576"}, + {"int", args{1048576}, "1048576"}, + {"int", args{9223372036854775807}, "9223372036854775807"}, + {"string", args{"hello world"}, "hello world"}, + {"bool", args{true}, "true"}, + {"bool", args{false}, "false"}, + {"map", args{map[string]interface{}{"key1": "value1"}}, "{\"key1\":\"value1\"}"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := GetStrValueOfAnyType(tt.args.value); got != tt.want { + t.Errorf("GetStrValueOfAnyType() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestIsDigest(t *testing.T) { + assert := assert.New(t) + assert.False(IsDigest("latest")) + assert.True(IsDigest("sha256:1359608115b94599e5641638bac5aef1ddfaa79bb96057ebf41ebc8d33acf8a7")) +} diff --git a/src/core/api/api_test.go b/src/core/api/api_test.go index 8b9d3bfaf..f8e1ccdd0 100644 --- a/src/core/api/api_test.go +++ b/src/core/api/api_test.go @@ -207,6 +207,17 @@ func TestMain(m *testing.M) { if err := prepare(); err != nil { panic(err) } + dao.ExecuteBatchSQL([]string{ + "insert into user_group (group_name, group_type, ldap_group_dn) values ('test_group_01_api', 1, 'cn=harbor_users,ou=sample,ou=vmware,dc=harbor,dc=com')", + "insert into user_group (group_name, group_type, ldap_group_dn) values ('vsphere.local\\administrators', 2, '')", + }) + + defer dao.ExecuteBatchSQL([]string{ + "delete from harbor_label", + "delete from robot", + "delete from user_group", + "delete from project_member", + }) ret := m.Run() clean() diff --git a/src/core/api/base.go b/src/core/api/base.go index bea127d0b..7b4b4bade 100644 --- a/src/core/api/base.go +++ b/src/core/api/base.go @@ -15,9 +15,15 @@ package api import ( + "encoding/json" + "errors" + "fmt" + + "github.com/goharbor/harbor/src/pkg/retention" + "github.com/goharbor/harbor/src/pkg/scheduler" + "net/http" - "errors" "github.com/ghodss/yaml" "github.com/goharbor/harbor/src/common/api" "github.com/goharbor/harbor/src/common/security" @@ -25,12 +31,24 @@ import ( "github.com/goharbor/harbor/src/core/config" "github.com/goharbor/harbor/src/core/filter" "github.com/goharbor/harbor/src/core/promgr" + "github.com/goharbor/harbor/src/pkg/project" + "github.com/goharbor/harbor/src/pkg/repository" ) const ( yamlFileContentType = "application/x-yaml" ) +// the managers/controllers used globally +var ( + projectMgr project.Manager + repositoryMgr repository.Manager + retentionScheduler scheduler.Scheduler + retentionMgr retention.Manager + retentionLauncher retention.Launcher + retentionController retention.APIController +) + // BaseController ... type BaseController struct { api.BaseAPI @@ -41,13 +59,6 @@ type BaseController struct { ProjectMgr promgr.ProjectManager } -const ( - // ReplicationJobType ... - ReplicationJobType = "replication" - // ScanJobType ... - ScanJobType = "scan" -) - // Prepare inits security context and project manager from request // context func (b *BaseController) Prepare() { @@ -85,12 +96,50 @@ func (b *BaseController) WriteYamlData(object interface{}) { w := b.Ctx.ResponseWriter w.Header().Set("Content-Type", yamlFileContentType) w.WriteHeader(http.StatusOK) - w.Write(yData) + _, _ = w.Write(yData) } // Init related objects/configurations for the API controllers func Init() error { registerHealthCheckers() + + // init chart controller + if err := initChartController(); err != nil { + return err + } + + // init project manager + initProjectManager() + + // init repository manager + initRepositoryManager() + + initRetentionScheduler() + + retentionMgr = retention.NewManager() + + retentionLauncher = retention.NewLauncher(projectMgr, repositoryMgr, retentionMgr) + + retentionController = retention.NewAPIController(retentionMgr, projectMgr, repositoryMgr, retentionScheduler, retentionLauncher) + + callbackFun := func(p interface{}) error { + str, ok := p.(string) + if !ok { + return fmt.Errorf("the type of param %v isn't string", p) + } + param := &retention.TriggerParam{} + if err := json.Unmarshal([]byte(str), param); err != nil { + return fmt.Errorf("failed to unmarshal the param: %v", err) + } + _, err := retentionController.TriggerRetentionExec(param.PolicyID, param.Trigger, false) + return err + } + err := scheduler.Register(retention.SchedulerCallback, callbackFun) + + return err +} + +func initChartController() error { // If chart repository is not enabled then directly return if !config.WithChartMuseum() { return nil @@ -102,6 +151,17 @@ func Init() error { } chartController = chartCtl - return nil } + +func initProjectManager() { + projectMgr = project.New() +} + +func initRepositoryManager() { + repositoryMgr = repository.New(projectMgr, chartController) +} + +func initRetentionScheduler() { + retentionScheduler = scheduler.GlobalScheduler +} diff --git a/src/core/api/chart_repository.go b/src/core/api/chart_repository.go old mode 100644 new mode 100755 index e44f65174..dd9be934f --- a/src/core/api/chart_repository.go +++ b/src/core/api/chart_repository.go @@ -12,13 +12,14 @@ import ( "net/url" "strings" - "github.com/goharbor/harbor/src/common" - "github.com/goharbor/harbor/src/core/label" - "github.com/goharbor/harbor/src/chartserver" + "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common/rbac" hlog "github.com/goharbor/harbor/src/common/utils/log" "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/core/label" + + "github.com/goharbor/harbor/src/core/middlewares" rep_event "github.com/goharbor/harbor/src/replication/event" "github.com/goharbor/harbor/src/replication/model" ) @@ -46,6 +47,11 @@ const ( // chartController is a singleton instance var chartController *chartserver.Controller +// GetChartController returns the chart controller +func GetChartController() *chartserver.Controller { + return chartController +} + // ChartRepositoryAPI provides related API handlers for the chart repository APIs type ChartRepositoryAPI struct { // The base controller to provide common utilities @@ -526,7 +532,7 @@ func initializeChartController() (*chartserver.Controller, error) { return nil, errors.New("Endpoint URL of chart storage server is malformed") } - controller, err := chartserver.NewController(url) + controller, err := chartserver.NewController(url, middlewares.New(middlewares.ChartMiddlewares).Create()) if err != nil { return nil, errors.New("Failed to initialize chart API controller") } diff --git a/src/core/api/email_test.go b/src/core/api/email_test.go index c38fbbb29..7fff60776 100644 --- a/src/core/api/email_test.go +++ b/src/core/api/email_test.go @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +// +build !darwin + package api import ( diff --git a/src/core/api/harborapi_test.go b/src/core/api/harborapi_test.go index a0ca5c8e4..b6ed840b2 100644 --- a/src/core/api/harborapi_test.go +++ b/src/core/api/harborapi_test.go @@ -35,12 +35,14 @@ import ( testutils "github.com/goharbor/harbor/src/common/utils/test" api_models "github.com/goharbor/harbor/src/core/api/models" apimodels "github.com/goharbor/harbor/src/core/api/models" + quota "github.com/goharbor/harbor/src/core/api/quota" _ "github.com/goharbor/harbor/src/core/auth/db" _ "github.com/goharbor/harbor/src/core/auth/ldap" "github.com/goharbor/harbor/src/core/config" "github.com/goharbor/harbor/src/core/filter" + "github.com/goharbor/harbor/src/pkg/notification" "github.com/goharbor/harbor/src/replication/model" - "github.com/goharbor/harbor/tests/apitests/apilib" + "github.com/goharbor/harbor/src/testing/apitests/apilib" ) const ( @@ -103,6 +105,7 @@ func init() { beego.Router("/api/users/:id/permissions", &UserAPI{}, "get:ListUserPermissions") beego.Router("/api/users/:id/sysadmin", &UserAPI{}, "put:ToggleUserAdminRole") beego.Router("/api/projects/:id([0-9]+)/logs", &ProjectAPI{}, "get:Logs") + beego.Router("/api/projects/:id([0-9]+)/summary", &ProjectAPI{}, "get:Summary") beego.Router("/api/projects/:id([0-9]+)/_deletable", &ProjectAPI{}, "get:Deletable") beego.Router("/api/projects/:id([0-9]+)/metadatas/?:name", &MetadataAPI{}, "get:Get") beego.Router("/api/projects/:id([0-9]+)/metadatas/", &MetadataAPI{}, "post:Post") @@ -144,6 +147,8 @@ func init() { beego.Router("/api/system/gc/:id([0-9]+)/log", &GCAPI{}, "get:GetLog") beego.Router("/api/system/gc/schedule", &GCAPI{}, "get:Get;put:Put;post:Post") beego.Router("/api/system/scanAll/schedule", &ScanAllAPI{}, "get:Get;put:Put;post:Post") + beego.Router("/api/system/CVEWhitelist", &SysCVEWhitelistAPI{}, "get:Get;put:Put") + beego.Router("/api/system/oidc/ping", &OIDCAPI{}, "post:Ping") beego.Router("/api/projects/:pid([0-9]+)/robots/", &RobotAPI{}, "post:Post;get:List") beego.Router("/api/projects/:pid([0-9]+)/robots/:id([0-9]+)", &RobotAPI{}, "get:Get;put:Put;delete:Delete") @@ -157,6 +162,22 @@ func init() { beego.Router("/api/replication/policies", &ReplicationPolicyAPI{}, "get:List;post:Create") beego.Router("/api/replication/policies/:id([0-9]+)", &ReplicationPolicyAPI{}, "get:Get;put:Update;delete:Delete") + beego.Router("/api/retentions/metadatas", &RetentionAPI{}, "get:GetMetadatas") + beego.Router("/api/retentions/:id", &RetentionAPI{}, "get:GetRetention") + beego.Router("/api/retentions", &RetentionAPI{}, "post:CreateRetention") + beego.Router("/api/retentions/:id", &RetentionAPI{}, "put:UpdateRetention") + beego.Router("/api/retentions/:id/executions", &RetentionAPI{}, "post:TriggerRetentionExec") + beego.Router("/api/retentions/:id/executions/:eid", &RetentionAPI{}, "patch:OperateRetentionExec") + beego.Router("/api/retentions/:id/executions", &RetentionAPI{}, "get:ListRetentionExecs") + beego.Router("/api/retentions/:id/executions/:eid/tasks", &RetentionAPI{}, "get:ListRetentionExecTasks") + beego.Router("/api/retentions/:id/executions/:eid/tasks/:tid", &RetentionAPI{}, "get:GetRetentionExecTaskLog") + + beego.Router("/api/projects/:pid([0-9]+)/webhook/policies", &NotificationPolicyAPI{}, "get:List;post:Post") + beego.Router("/api/projects/:pid([0-9]+)/webhook/policies/:id([0-9]+)", &NotificationPolicyAPI{}) + beego.Router("/api/projects/:pid([0-9]+)/webhook/policies/test", &NotificationPolicyAPI{}, "post:Test") + beego.Router("/api/projects/:pid([0-9]+)/webhook/lasttrigger", &NotificationPolicyAPI{}, "get:ListGroupByEventType") + beego.Router("/api/projects/:pid([0-9]+)/webhook/jobs/", &NotificationJobAPI{}, "get:List") + // Charts are controlled under projects chartRepositoryAPIType := &ChartRepositoryAPI{} beego.Router("/api/chartrepo/health", chartRepositoryAPIType, "get:GetHealthStatus") @@ -178,16 +199,30 @@ func init() { beego.Router("/api/chartrepo/:repo/charts/:name/:version/labels", chartLabelAPIType, "get:GetLabels;post:MarkLabel") beego.Router("/api/chartrepo/:repo/charts/:name/:version/labels/:id([0-9]+)", chartLabelAPIType, "delete:RemoveLabel") + quotaAPIType := &QuotaAPI{} + beego.Router("/api/quotas", quotaAPIType, "get:List") + beego.Router("/api/quotas/:id([0-9]+)", quotaAPIType, "get:Get;put:Put") + + beego.Router("/api/internal/switchquota", &InternalAPI{}, "put:SwitchQuota") + beego.Router("/api/internal/syncquota", &InternalAPI{}, "post:SyncQuota") + // syncRegistry if err := SyncRegistry(config.GlobalProjectMgr); err != nil { log.Fatalf("failed to sync repositories from registry: %v", err) } + if err := quota.Sync(config.GlobalProjectMgr, false); err != nil { + log.Fatalf("failed to sync quota from backend: %v", err) + } + // Init user Info admin = &usrInfo{adminName, adminPwd} unknownUsr = &usrInfo{"unknown", "unknown"} testUser = &usrInfo{TestUserName, TestUserPwd} + // Init notification related check map + notification.Init() + // Init mock jobservice mockServer := test.NewJobServiceServer() defer mockServer.Close() @@ -452,6 +487,23 @@ func (a testapi) ProjectDeletable(prjUsr usrInfo, projectID int64) (int, bool, e return code, deletable.Deletable, nil } +// ProjectSummary returns summary for the project +func (a testapi) ProjectSummary(prjUsr usrInfo, projectID string) (int, apilib.ProjectSummary, error) { + _sling := sling.New().Get(a.basePath) + + // create api path + path := "api/projects/" + projectID + "/summary" + _sling = _sling.Path(path) + + var successPayload apilib.ProjectSummary + + httpStatusCode, body, err := request(_sling, jsonAcceptHeader, prjUsr) + if err == nil && httpStatusCode == 200 { + err = json.Unmarshal(body, &successPayload) + } + return httpStatusCode, successPayload, err +} + // -------------------------Member Test---------------------------------------// // Return relevant role members of projectID @@ -554,7 +606,7 @@ func (a testapi) GetRepos(authInfo usrInfo, projectID, keyword string) ( return code, nil, nil } -func (a testapi) GetTag(authInfo usrInfo, repository string, tag string) (int, *tagResp, error) { +func (a testapi) GetTag(authInfo usrInfo, repository string, tag string) (int, *models.TagResp, error) { _sling := sling.New().Get(a.basePath).Path(fmt.Sprintf("/api/repositories/%s/tags/%s", repository, tag)) code, data, err := request(_sling, jsonAcceptHeader, authInfo) if err != nil { @@ -566,7 +618,7 @@ func (a testapi) GetTag(authInfo usrInfo, repository string, tag string) (int, * return code, nil, nil } - result := tagResp{} + result := models.TagResp{} if err := json.Unmarshal(data, &result); err != nil { return 0, nil, err } @@ -590,7 +642,7 @@ func (a testapi) GetReposTags(authInfo usrInfo, repoName string) (int, interface return httpStatusCode, body, nil } - result := []tagResp{} + result := []models.TagResp{} if err := json.Unmarshal(body, &result); err != nil { return 0, nil, err } @@ -1211,3 +1263,55 @@ func (a testapi) RegistryUpdate(authInfo usrInfo, registryID int64, req *apimode return code, nil } + +// QuotasGet returns quotas +func (a testapi) QuotasGet(query *apilib.QuotaQuery, authInfo ...usrInfo) (int, []apilib.Quota, error) { + _sling := sling.New().Get(a.basePath). + Path("api/quotas"). + QueryStruct(query) + + var successPayload []apilib.Quota + + var httpStatusCode int + var err error + var body []byte + if len(authInfo) > 0 { + httpStatusCode, body, err = request(_sling, jsonAcceptHeader, authInfo[0]) + } else { + httpStatusCode, body, err = request(_sling, jsonAcceptHeader) + } + + if err == nil && httpStatusCode == 200 { + err = json.Unmarshal(body, &successPayload) + } else { + log.Println(string(body)) + } + + return httpStatusCode, successPayload, err +} + +// Return specific quota +func (a testapi) QuotasGetByID(authInfo usrInfo, quotaID string) (int, apilib.Quota, error) { + _sling := sling.New().Get(a.basePath) + + // create api path + path := "api/quotas/" + quotaID + _sling = _sling.Path(path) + + var successPayload apilib.Quota + + httpStatusCode, body, err := request(_sling, jsonAcceptHeader, authInfo) + if err == nil && httpStatusCode == 200 { + err = json.Unmarshal(body, &successPayload) + } + return httpStatusCode, successPayload, err +} + +// Update spec for the quota +func (a testapi) QuotasPut(authInfo usrInfo, quotaID string, req models.QuotaUpdateRequest) (int, error) { + path := "/api/quotas/" + quotaID + _sling := sling.New().Put(a.basePath).Path(path).BodyJSON(req) + + httpStatusCode, _, err := request(_sling, jsonAcceptHeader, authInfo) + return httpStatusCode, err +} diff --git a/src/core/api/health.go b/src/core/api/health.go index 1a43ab68e..0d4ef2cac 100644 --- a/src/core/api/health.go +++ b/src/core/api/health.go @@ -34,8 +34,9 @@ import ( ) var ( - timeout = 60 * time.Second - healthCheckerRegistry = map[string]health.Checker{} + timeout = 60 * time.Second + // HealthCheckerRegistry ... + HealthCheckerRegistry = map[string]health.Checker{} ) type overallHealthStatus struct { @@ -67,11 +68,11 @@ type HealthAPI struct { func (h *HealthAPI) CheckHealth() { var isHealthy healthy = true components := []*componentHealthStatus{} - c := make(chan *componentHealthStatus, len(healthCheckerRegistry)) - for name, checker := range healthCheckerRegistry { + c := make(chan *componentHealthStatus, len(HealthCheckerRegistry)) + for name, checker := range HealthCheckerRegistry { go check(name, checker, timeout, c) } - for i := 0; i < len(healthCheckerRegistry); i++ { + for i := 0; i < len(HealthCheckerRegistry); i++ { componentStatus := <-c if len(componentStatus.Error) != 0 { isHealthy = false @@ -212,10 +213,10 @@ func jobserviceHealthChecker() health.Checker { } func registryHealthChecker() health.Checker { - url := getRegistryURL() + "/v2" + url := getRegistryURL() + "/" timeout := 60 * time.Second period := 10 * time.Second - checker := HTTPStatusCodeHealthChecker(http.MethodGet, url, nil, timeout, http.StatusUnauthorized) + checker := HTTPStatusCodeHealthChecker(http.MethodGet, url, nil, timeout, http.StatusOK) return PeriodicHealthChecker(checker, period) } @@ -290,21 +291,21 @@ func redisHealthChecker() health.Checker { } func registerHealthCheckers() { - healthCheckerRegistry["core"] = coreHealthChecker() - healthCheckerRegistry["portal"] = portalHealthChecker() - healthCheckerRegistry["jobservice"] = jobserviceHealthChecker() - healthCheckerRegistry["registry"] = registryHealthChecker() - healthCheckerRegistry["registryctl"] = registryCtlHealthChecker() - healthCheckerRegistry["database"] = databaseHealthChecker() - healthCheckerRegistry["redis"] = redisHealthChecker() + HealthCheckerRegistry["core"] = coreHealthChecker() + HealthCheckerRegistry["portal"] = portalHealthChecker() + HealthCheckerRegistry["jobservice"] = jobserviceHealthChecker() + HealthCheckerRegistry["registry"] = registryHealthChecker() + HealthCheckerRegistry["registryctl"] = registryCtlHealthChecker() + HealthCheckerRegistry["database"] = databaseHealthChecker() + HealthCheckerRegistry["redis"] = redisHealthChecker() if config.WithChartMuseum() { - healthCheckerRegistry["chartmuseum"] = chartmuseumHealthChecker() + HealthCheckerRegistry["chartmuseum"] = chartmuseumHealthChecker() } if config.WithClair() { - healthCheckerRegistry["clair"] = clairHealthChecker() + HealthCheckerRegistry["clair"] = clairHealthChecker() } if config.WithNotary() { - healthCheckerRegistry["notary"] = notaryHealthChecker() + HealthCheckerRegistry["notary"] = notaryHealthChecker() } } diff --git a/src/core/api/health_test.go b/src/core/api/health_test.go index 8426a74b1..c98d021b5 100644 --- a/src/core/api/health_test.go +++ b/src/core/api/health_test.go @@ -92,9 +92,9 @@ func fakeHealthChecker(healthy bool) health.Checker { } func TestCheckHealth(t *testing.T) { // component01: healthy, component02: healthy => status: healthy - healthCheckerRegistry = map[string]health.Checker{} - healthCheckerRegistry["component01"] = fakeHealthChecker(true) - healthCheckerRegistry["component02"] = fakeHealthChecker(true) + HealthCheckerRegistry = map[string]health.Checker{} + HealthCheckerRegistry["component01"] = fakeHealthChecker(true) + HealthCheckerRegistry["component02"] = fakeHealthChecker(true) status := map[string]interface{}{} err := handleAndParse(&testingRequest{ method: http.MethodGet, @@ -104,9 +104,9 @@ func TestCheckHealth(t *testing.T) { assert.Equal(t, "healthy", status["status"].(string)) // component01: healthy, component02: unhealthy => status: unhealthy - healthCheckerRegistry = map[string]health.Checker{} - healthCheckerRegistry["component01"] = fakeHealthChecker(true) - healthCheckerRegistry["component02"] = fakeHealthChecker(false) + HealthCheckerRegistry = map[string]health.Checker{} + HealthCheckerRegistry["component01"] = fakeHealthChecker(true) + HealthCheckerRegistry["component02"] = fakeHealthChecker(false) status = map[string]interface{}{} err = handleAndParse(&testingRequest{ method: http.MethodGet, @@ -128,7 +128,7 @@ func TestDatabaseHealthChecker(t *testing.T) { } func TestRegisterHealthCheckers(t *testing.T) { - healthCheckerRegistry = map[string]health.Checker{} + HealthCheckerRegistry = map[string]health.Checker{} registerHealthCheckers() - assert.NotNil(t, healthCheckerRegistry["core"]) + assert.NotNil(t, HealthCheckerRegistry["core"]) } diff --git a/src/core/api/internal.go b/src/core/api/internal.go index 71f1f317e..06e6c45a2 100644 --- a/src/core/api/internal.go +++ b/src/core/api/internal.go @@ -15,12 +15,21 @@ package api import ( - "errors" - + "fmt" "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common/dao" "github.com/goharbor/harbor/src/common/models" + common_quota "github.com/goharbor/harbor/src/common/quota" "github.com/goharbor/harbor/src/common/utils/log" + + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/jobservice/logger" + "github.com/pkg/errors" + "strconv" + + quota "github.com/goharbor/harbor/src/core/api/quota" + + comcfg "github.com/goharbor/harbor/src/common/config" ) // InternalAPI handles request of harbor admin... @@ -69,3 +78,103 @@ func (ia *InternalAPI) RenameAdmin() { log.Debugf("The super user has been renamed to: %s", newName) ia.DestroySession() } + +// QuotaSwitcher ... +type QuotaSwitcher struct { + Enabled bool +} + +// SwitchQuota ... +func (ia *InternalAPI) SwitchQuota() { + var req QuotaSwitcher + if err := ia.DecodeJSONReq(&req); err != nil { + ia.SendBadRequestError(err) + return + } + // quota per project from disable to enable, it needs to update the quota usage bases on the DB records. + if !config.QuotaPerProjectEnable() && req.Enabled { + if err := ia.ensureQuota(); err != nil { + ia.SendInternalServerError(err) + return + } + } + defer func() { + config.GetCfgManager().Set(common.QuotaPerProjectEnable, req.Enabled) + config.GetCfgManager().Save() + }() + return +} + +func (ia *InternalAPI) ensureQuota() error { + projects, err := dao.GetProjects(nil) + if err != nil { + return err + } + for _, project := range projects { + pSize, err := dao.CountSizeOfProject(project.ProjectID) + if err != nil { + logger.Warningf("error happen on counting size of project:%d , error:%v, just skip it.", project.ProjectID, err) + continue + } + afQuery := &models.ArtifactQuery{ + PID: project.ProjectID, + } + afs, err := dao.ListArtifacts(afQuery) + if err != nil { + logger.Warningf("error happen on counting number of project:%d , error:%v, just skip it.", project.ProjectID, err) + continue + } + pCount := int64(len(afs)) + + // it needs to append the chart count + if config.WithChartMuseum() { + count, err := chartController.GetCountOfCharts([]string{project.Name}) + if err != nil { + err = errors.Wrap(err, fmt.Sprintf("get chart count of project %d failed", project.ProjectID)) + logger.Error(err) + continue + } + pCount = pCount + int64(count) + } + + quotaMgr, err := common_quota.NewManager("project", strconv.FormatInt(project.ProjectID, 10)) + if err != nil { + logger.Errorf("Error occurred when to new quota manager %v, just skip it.", err) + continue + } + used := common_quota.ResourceList{ + common_quota.ResourceStorage: pSize, + common_quota.ResourceCount: pCount, + } + if err := quotaMgr.EnsureQuota(used); err != nil { + logger.Errorf("cannot ensure quota for the project: %d, err: %v, just skip it.", project.ProjectID, err) + continue + } + } + return nil +} + +// SyncQuota ... +func (ia *InternalAPI) SyncQuota() { + cur := config.ReadOnly() + cfgMgr := comcfg.NewDBCfgManager() + if cur != true { + cfgMgr.Set(common.ReadOnly, true) + } + // For api call, to avoid the timeout, it should be asynchronous + go func() { + defer func() { + if cur != true { + cfgMgr.Set(common.ReadOnly, false) + } + }() + log.Info("start to sync quota(API), the system will be set to ReadOnly and back it normal once it done.") + err := quota.Sync(ia.ProjectMgr, false) + if err != nil { + log.Errorf("fail to sync quota(API), but with error: %v, please try to do it again.", err) + return + } + log.Info("success to sync quota(API).") + }() + return +} diff --git a/src/core/api/internal_test.go b/src/core/api/internal_test.go new file mode 100644 index 000000000..02903a98b --- /dev/null +++ b/src/core/api/internal_test.go @@ -0,0 +1,89 @@ +// Copyright 2018 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "net/http" + "testing" +) + +// cannot verify the real scenario here +func TestSwitchQuota(t *testing.T) { + cases := []*codeCheckingCase{ + // 401 + { + request: &testingRequest{ + method: http.MethodPut, + url: "/api/internal/switchquota", + }, + code: http.StatusUnauthorized, + }, + // 200 + { + request: &testingRequest{ + method: http.MethodPut, + url: "/api/internal/switchquota", + credential: sysAdmin, + bodyJSON: &QuotaSwitcher{ + Enabled: true, + }, + }, + code: http.StatusOK, + }, + // 403 + { + request: &testingRequest{ + url: "/api/internal/switchquota", + method: http.MethodPut, + credential: nonSysAdmin, + }, + code: http.StatusForbidden, + }, + } + runCodeCheckingCases(t, cases...) +} + +// cannot verify the real scenario here +func TestSyncQuota(t *testing.T) { + cases := []*codeCheckingCase{ + // 401 + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/internal/syncquota", + }, + code: http.StatusUnauthorized, + }, + // 200 + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/internal/syncquota", + credential: sysAdmin, + }, + code: http.StatusOK, + }, + // 403 + { + request: &testingRequest{ + url: "/api/internal/syncquota", + method: http.MethodPost, + credential: nonSysAdmin, + }, + code: http.StatusForbidden, + }, + } + runCodeCheckingCases(t, cases...) +} diff --git a/src/core/api/notification_job.go b/src/core/api/notification_job.go new file mode 100755 index 000000000..775c9fc9f --- /dev/null +++ b/src/core/api/notification_job.go @@ -0,0 +1,108 @@ +package api + +import ( + "errors" + "fmt" + + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/rbac" + "github.com/goharbor/harbor/src/pkg/notification" +) + +// NotificationJobAPI ... +type NotificationJobAPI struct { + BaseController + project *models.Project +} + +// Prepare ... +func (w *NotificationJobAPI) Prepare() { + w.BaseController.Prepare() + if !w.SecurityCtx.IsAuthenticated() { + w.SendUnAuthorizedError(errors.New("UnAuthorized")) + return + } + + pid, err := w.GetInt64FromPath(":pid") + if err != nil { + w.SendBadRequestError(fmt.Errorf("failed to get project ID: %v", err)) + return + } + if pid <= 0 { + w.SendBadRequestError(fmt.Errorf("invalid project ID: %d", pid)) + return + } + + project, err := w.ProjectMgr.Get(pid) + if err != nil { + w.SendInternalServerError(fmt.Errorf("failed to get project %d: %v", pid, err)) + return + } + if project == nil { + w.SendNotFoundError(fmt.Errorf("project %d not found", pid)) + return + } + w.project = project +} + +// List ... +func (w *NotificationJobAPI) List() { + if !w.validateRBAC(rbac.ActionList, w.project.ProjectID) { + return + } + + policyID, err := w.GetInt64("policy_id") + if err != nil || policyID <= 0 { + w.SendBadRequestError(fmt.Errorf("invalid policy_id: %s", w.GetString("policy_id"))) + return + } + + policy, err := notification.PolicyMgr.Get(policyID) + if err != nil { + w.SendInternalServerError(fmt.Errorf("failed to get policy %d: %v", policyID, err)) + return + } + if policy == nil { + w.SendBadRequestError(fmt.Errorf("policy %d not found", policyID)) + return + } + + query := &models.NotificationJobQuery{ + PolicyID: policyID, + } + + query.Statuses = w.GetStrings("status") + + query.Page, query.Size, err = w.GetPaginationParams() + if err != nil { + w.SendBadRequestError(err) + return + } + + total, jobs, err := notification.JobMgr.List(query) + if err != nil { + w.SendInternalServerError(fmt.Errorf("failed to list notification jobs: %v", err)) + return + } + w.SetPaginationHeader(total, query.Page, query.Size) + w.WriteJSONData(jobs) +} + +func (w *NotificationJobAPI) validateRBAC(action rbac.Action, projectID int64) bool { + if w.SecurityCtx.IsSysAdmin() { + return true + } + + project, err := w.ProjectMgr.Get(projectID) + if err != nil { + w.ParseAndHandleError(fmt.Sprintf("failed to get project %d", projectID), err) + return false + } + + resource := rbac.NewProjectNamespace(project.ProjectID).Resource(rbac.ResourceNotificationPolicy) + if !w.SecurityCtx.Can(action, resource) { + w.SendForbiddenError(errors.New(w.SecurityCtx.GetUsername())) + return false + } + return true +} diff --git a/src/core/api/notification_job_test.go b/src/core/api/notification_job_test.go new file mode 100644 index 000000000..d6a9ac099 --- /dev/null +++ b/src/core/api/notification_job_test.go @@ -0,0 +1,107 @@ +package api + +import ( + "net/http" + "testing" + "time" + + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/pkg/notification" + "github.com/goharbor/harbor/src/pkg/notification/model" +) + +type fakedNotificationJobMgr struct { +} + +func (f *fakedNotificationJobMgr) Create(job *models.NotificationJob) (int64, error) { + return 1, nil +} + +func (f *fakedNotificationJobMgr) List(...*models.NotificationJobQuery) (int64, []*models.NotificationJob, error) { + return 0, nil, nil +} + +func (f *fakedNotificationJobMgr) Update(job *models.NotificationJob, props ...string) error { + return nil +} + +func (f *fakedNotificationJobMgr) ListJobsGroupByEventType(policyID int64) ([]*models.NotificationJob, error) { + return []*models.NotificationJob{ + { + EventType: model.EventTypePullImage, + CreationTime: time.Now(), + }, + { + EventType: model.EventTypeDeleteImage, + CreationTime: time.Now(), + }, + }, nil +} + +func TestNotificationJobAPI_List(t *testing.T) { + policyMgr := notification.PolicyMgr + jobMgr := notification.JobMgr + defer func() { + notification.PolicyMgr = policyMgr + notification.JobMgr = jobMgr + }() + notification.PolicyMgr = &fakedNotificationPlyMgr{} + notification.JobMgr = &fakedNotificationJobMgr{} + + cases := []*codeCheckingCase{ + // 401 + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/projects/1/webhook/jobs?policy_id=1", + }, + code: http.StatusUnauthorized, + }, + // 403 + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/projects/1/webhook/jobs?policy_id=1", + credential: nonSysAdmin, + }, + code: http.StatusForbidden, + }, + // 400 policyID invalid + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/projects/1/webhook/jobs?policy_id=0", + credential: sysAdmin, + }, + code: http.StatusBadRequest, + }, + // 400 policyID not found + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/projects/1/webhook/jobs?policy_id=123", + credential: sysAdmin, + }, + code: http.StatusBadRequest, + }, + // 404 project not found + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/projects/123/webhook/jobs?policy_id=1", + credential: sysAdmin, + }, + code: http.StatusNotFound, + }, + // 200 + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/projects/1/webhook/jobs?policy_id=1", + credential: sysAdmin, + }, + code: http.StatusOK, + }, + } + runCodeCheckingCases(t, cases...) +} diff --git a/src/core/api/notification_policy.go b/src/core/api/notification_policy.go new file mode 100755 index 000000000..c7acdbea2 --- /dev/null +++ b/src/core/api/notification_policy.go @@ -0,0 +1,384 @@ +package api + +import ( + "errors" + "fmt" + "net/http" + "strconv" + "time" + + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/rbac" + "github.com/goharbor/harbor/src/common/utils" + "github.com/goharbor/harbor/src/pkg/notification" +) + +// NotificationPolicyAPI ... +type NotificationPolicyAPI struct { + BaseController + project *models.Project +} + +// notificationPolicyForUI defines the structure of notification policy info display in UI +type notificationPolicyForUI struct { + EventType string `json:"event_type"` + Enabled bool `json:"enabled"` + CreationTime *time.Time `json:"creation_time"` + LastTriggerTime *time.Time `json:"last_trigger_time,omitempty"` +} + +// Prepare ... +func (w *NotificationPolicyAPI) Prepare() { + w.BaseController.Prepare() + if !w.SecurityCtx.IsAuthenticated() { + w.SendUnAuthorizedError(errors.New("UnAuthorized")) + return + } + + pid, err := w.GetInt64FromPath(":pid") + if err != nil { + w.SendBadRequestError(fmt.Errorf("failed to get project ID: %v", err)) + return + } + if pid <= 0 { + w.SendBadRequestError(fmt.Errorf("invalid project ID: %d", pid)) + return + } + + project, err := w.ProjectMgr.Get(pid) + if err != nil { + w.SendInternalServerError(fmt.Errorf("failed to get project %d: %v", pid, err)) + return + } + if project == nil { + w.SendNotFoundError(fmt.Errorf("project %d not found", pid)) + return + } + w.project = project +} + +// Get ... +func (w *NotificationPolicyAPI) Get() { + if !w.validateRBAC(rbac.ActionRead, w.project.ProjectID) { + return + } + + id, err := w.GetIDFromURL() + if err != nil { + w.SendBadRequestError(err) + return + } + + policy, err := notification.PolicyMgr.Get(id) + if err != nil { + w.SendInternalServerError(fmt.Errorf("failed to get the notification policy %d: %v", id, err)) + return + } + if policy == nil { + w.SendNotFoundError(fmt.Errorf("notification policy %d not found", id)) + return + } + + if w.project.ProjectID != policy.ProjectID { + w.SendBadRequestError(fmt.Errorf("notification policy %d with projectID %d not belong to project %d in URL", id, policy.ProjectID, w.project.ProjectID)) + return + } + + w.WriteJSONData(policy) +} + +// Post ... +func (w *NotificationPolicyAPI) Post() { + if !w.validateRBAC(rbac.ActionCreate, w.project.ProjectID) { + return + } + + policy := &models.NotificationPolicy{} + isValid, err := w.DecodeJSONReqAndValidate(policy) + if !isValid { + w.SendBadRequestError(err) + return + } + + if !w.validateTargets(policy) { + return + } + + if !w.validateEventTypes(policy) { + return + } + + if policy.ID != 0 { + w.SendBadRequestError(fmt.Errorf("cannot accept policy creating request with ID: %d", policy.ID)) + return + } + + policy.Creator = w.SecurityCtx.GetUsername() + policy.ProjectID = w.project.ProjectID + + id, err := notification.PolicyMgr.Create(policy) + if err != nil { + w.SendInternalServerError(fmt.Errorf("failed to create the notification policy: %v", err)) + return + } + w.Redirect(http.StatusCreated, strconv.FormatInt(id, 10)) +} + +// Put ... +func (w *NotificationPolicyAPI) Put() { + if !w.validateRBAC(rbac.ActionUpdate, w.project.ProjectID) { + return + } + + id, err := w.GetIDFromURL() + if id < 0 || err != nil { + w.SendBadRequestError(errors.New("invalid notification policy ID")) + return + } + + oriPolicy, err := notification.PolicyMgr.Get(id) + if err != nil { + w.SendInternalServerError(fmt.Errorf("failed to get the notification policy %d: %v", id, err)) + return + } + if oriPolicy == nil { + w.SendNotFoundError(fmt.Errorf("notification policy %d not found", id)) + return + } + + policy := &models.NotificationPolicy{} + isValid, err := w.DecodeJSONReqAndValidate(policy) + if !isValid { + w.SendBadRequestError(err) + return + } + + if !w.validateTargets(policy) { + return + } + + if !w.validateEventTypes(policy) { + return + } + + if w.project.ProjectID != oriPolicy.ProjectID { + w.SendBadRequestError(fmt.Errorf("notification policy %d with projectID %d not belong to project %d in URL", id, oriPolicy.ProjectID, w.project.ProjectID)) + return + } + + policy.ID = id + policy.ProjectID = w.project.ProjectID + + if err = notification.PolicyMgr.Update(policy); err != nil { + w.SendInternalServerError(fmt.Errorf("failed to update the notification policy: %v", err)) + return + } +} + +// List ... +func (w *NotificationPolicyAPI) List() { + projectID := w.project.ProjectID + if !w.validateRBAC(rbac.ActionList, projectID) { + return + } + + res, err := notification.PolicyMgr.List(projectID) + if err != nil { + w.SendInternalServerError(fmt.Errorf("failed to list notification policies by projectID %d: %v", projectID, err)) + return + } + + policies := []*models.NotificationPolicy{} + if res != nil { + for _, policy := range res { + policies = append(policies, policy) + } + } + + w.WriteJSONData(policies) +} + +// ListGroupByEventType lists notification policy trigger info grouped by event type for UI, +// displays event type, status(enabled/disabled), create time, last trigger time +func (w *NotificationPolicyAPI) ListGroupByEventType() { + projectID := w.project.ProjectID + if !w.validateRBAC(rbac.ActionList, projectID) { + return + } + + res, err := notification.PolicyMgr.List(projectID) + if err != nil { + w.SendInternalServerError(fmt.Errorf("failed to list notification policies by projectID %d: %v", projectID, err)) + return + } + + policies, err := constructPolicyWithTriggerTime(res) + if err != nil { + w.SendInternalServerError(fmt.Errorf("failed to list the notification policy trigger information: %v", err)) + return + } + w.WriteJSONData(policies) +} + +// Delete ... +func (w *NotificationPolicyAPI) Delete() { + projectID := w.project.ProjectID + if !w.validateRBAC(rbac.ActionDelete, projectID) { + return + } + + id, err := w.GetIDFromURL() + if id < 0 || err != nil { + w.SendBadRequestError(errors.New("invalid notification policy ID")) + return + } + + policy, err := notification.PolicyMgr.Get(id) + if err != nil { + w.SendInternalServerError(fmt.Errorf("failed to get the notification policy %d: %v", id, err)) + return + } + if policy == nil { + w.SendNotFoundError(fmt.Errorf("notification policy %d not found", id)) + return + } + + if projectID != policy.ProjectID { + w.SendBadRequestError(fmt.Errorf("notification policy %d with projectID %d not belong to project %d in URL", id, policy.ProjectID, projectID)) + return + } + + if err = notification.PolicyMgr.Delete(id); err != nil { + w.SendInternalServerError(fmt.Errorf("failed to delete notification policy %d: %v", id, err)) + return + } +} + +// Test ... +func (w *NotificationPolicyAPI) Test() { + projectID := w.project.ProjectID + if !w.validateRBAC(rbac.ActionCreate, projectID) { + return + } + + policy := &models.NotificationPolicy{} + isValid, err := w.DecodeJSONReqAndValidate(policy) + if !isValid { + w.SendBadRequestError(err) + return + } + + if !w.validateTargets(policy) { + return + } + + if err := notification.PolicyMgr.Test(policy); err != nil { + w.SendBadRequestError(fmt.Errorf("notification policy %s test failed: %v", policy.Name, err)) + return + } +} + +func (w *NotificationPolicyAPI) validateRBAC(action rbac.Action, projectID int64) bool { + if w.SecurityCtx.IsSysAdmin() { + return true + } + + project, err := w.ProjectMgr.Get(projectID) + if err != nil { + w.ParseAndHandleError(fmt.Sprintf("failed to get project %d", projectID), err) + return false + } + + resource := rbac.NewProjectNamespace(project.ProjectID).Resource(rbac.ResourceNotificationPolicy) + if !w.SecurityCtx.Can(action, resource) { + w.SendForbiddenError(errors.New(w.SecurityCtx.GetUsername())) + return false + } + return true +} + +func (w *NotificationPolicyAPI) validateTargets(policy *models.NotificationPolicy) bool { + if len(policy.Targets) == 0 { + w.SendBadRequestError(fmt.Errorf("empty notification target with policy %s", policy.Name)) + return false + } + + for _, target := range policy.Targets { + url, err := utils.ParseEndpoint(target.Address) + if err != nil { + w.SendBadRequestError(err) + return false + } + // Prevent SSRF security issue #3755 + target.Address = url.Scheme + "://" + url.Host + url.Path + + _, ok := notification.SupportedNotifyTypes[target.Type] + if !ok { + w.SendBadRequestError(fmt.Errorf("unsupport target type %s with policy %s", target.Type, policy.Name)) + return false + } + } + + return true +} + +func (w *NotificationPolicyAPI) validateEventTypes(policy *models.NotificationPolicy) bool { + if len(policy.EventTypes) == 0 { + w.SendBadRequestError(errors.New("empty event type")) + return false + } + + for _, eventType := range policy.EventTypes { + _, ok := notification.SupportedEventTypes[eventType] + if !ok { + w.SendBadRequestError(fmt.Errorf("unsupport event type %s", eventType)) + return false + } + } + + return true +} + +func getLastTriggerTimeGroupByEventType(eventType string, policyID int64) (time.Time, error) { + jobs, err := notification.JobMgr.ListJobsGroupByEventType(policyID) + if err != nil { + return time.Time{}, err + } + + for _, job := range jobs { + if eventType == job.EventType { + return job.CreationTime, nil + } + } + return time.Time{}, nil +} + +// constructPolicyWithTriggerTime construct notification policy information displayed in UI +// including event type, enabled, creation time, last trigger time +func constructPolicyWithTriggerTime(policies []*models.NotificationPolicy) ([]*notificationPolicyForUI, error) { + res := []*notificationPolicyForUI{} + if policies != nil { + for _, policy := range policies { + for _, t := range policy.EventTypes { + ply := ¬ificationPolicyForUI{ + EventType: t, + Enabled: policy.Enabled, + CreationTime: &policy.CreationTime, + } + if !policy.CreationTime.IsZero() { + ply.CreationTime = &policy.CreationTime + } + + ltTime, err := getLastTriggerTimeGroupByEventType(t, policy.ID) + if err != nil { + return nil, err + } + if !ltTime.IsZero() { + ply.LastTriggerTime = <Time + } + res = append(res, ply) + } + } + } + return res, nil +} diff --git a/src/core/api/notification_policy_test.go b/src/core/api/notification_policy_test.go new file mode 100644 index 000000000..a63f6b72e --- /dev/null +++ b/src/core/api/notification_policy_test.go @@ -0,0 +1,637 @@ +package api + +import ( + "net/http" + "testing" + + "github.com/pkg/errors" + + "github.com/goharbor/harbor/src/pkg/notification/model" + + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/pkg/notification" +) + +type fakedNotificationPlyMgr struct { +} + +func (f *fakedNotificationPlyMgr) Create(*models.NotificationPolicy) (int64, error) { + return 0, nil +} + +func (f *fakedNotificationPlyMgr) List(id int64) ([]*models.NotificationPolicy, error) { + return []*models.NotificationPolicy{ + { + ID: 1, + EventTypes: []string{ + model.EventTypePullImage, + model.EventTypePushImage, + }, + }, + }, nil +} + +func (f *fakedNotificationPlyMgr) Get(id int64) (*models.NotificationPolicy, error) { + switch id { + case 1: + return &models.NotificationPolicy{ID: 1, ProjectID: 1}, nil + case 2: + return &models.NotificationPolicy{ID: 2, ProjectID: 222}, nil + case 3: + return nil, errors.New("") + default: + return nil, nil + } +} + +func (f *fakedNotificationPlyMgr) GetByNameAndProjectID(string, int64) (*models.NotificationPolicy, error) { + return nil, nil +} + +func (f *fakedNotificationPlyMgr) Update(*models.NotificationPolicy) error { + + return nil +} + +func (f *fakedNotificationPlyMgr) Delete(int64) error { + return nil +} + +func (f *fakedNotificationPlyMgr) Test(*models.NotificationPolicy) error { + return nil +} + +func (f *fakedNotificationPlyMgr) GetRelatedPolices(int64, string) ([]*models.NotificationPolicy, error) { + return nil, nil +} + +func TestNotificationPolicyAPI_List(t *testing.T) { + policyCtl := notification.PolicyMgr + defer func() { + notification.PolicyMgr = policyCtl + }() + + notification.PolicyMgr = &fakedNotificationPlyMgr{} + + cases := []*codeCheckingCase{ + // 401 + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/projects/1/webhook/policies", + }, + code: http.StatusUnauthorized, + }, + // 403 + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/projects/1/webhook/policies", + credential: nonSysAdmin, + }, + code: http.StatusForbidden, + }, + // 404 + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/projects/123/webhook/policies", + credential: sysAdmin, + }, + code: http.StatusNotFound, + }, + // 200 + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/projects/1/webhook/policies", + credential: sysAdmin, + }, + code: http.StatusOK, + }, + } + runCodeCheckingCases(t, cases...) + +} + +func TestNotificationPolicyAPI_Post(t *testing.T) { + policyCtl := notification.PolicyMgr + defer func() { + notification.PolicyMgr = policyCtl + }() + + notification.PolicyMgr = &fakedNotificationPlyMgr{} + + cases := []*codeCheckingCase{ + // 401 + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/projects/1/webhook/policies", + }, + code: http.StatusUnauthorized, + }, + // 403 + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/projects/1/webhook/policies", + credential: nonSysAdmin, + }, + code: http.StatusForbidden, + }, + // 400 invalid json body + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/projects/1/webhook/policies", + credential: sysAdmin, + bodyJSON: "invalid json body", + }, + code: http.StatusBadRequest, + }, + // 400 empty targets + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/projects/1/webhook/policies", + credential: sysAdmin, + bodyJSON: &models.NotificationPolicy{ + Targets: []models.EventTarget{}, + }}, + code: http.StatusBadRequest, + }, + // 400 invalid event target address + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/projects/1/webhook/policies", + credential: sysAdmin, + bodyJSON: &models.NotificationPolicy{ + EventTypes: []string{"pullImage", "pushImage", "deleteImage"}, + Targets: []models.EventTarget{ + { + Address: "tcp://127.0.0.1:8080", + }, + }, + }}, + code: http.StatusBadRequest, + }, + // 400 invalid event target type + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/projects/1/webhook/policies", + credential: sysAdmin, + bodyJSON: &models.NotificationPolicy{ + EventTypes: []string{"pullImage", "pushImage", "deleteImage"}, + Targets: []models.EventTarget{ + { + Type: "smn", + Address: "http://127.0.0.1:8080", + }, + }, + }}, + code: http.StatusBadRequest, + }, + // 400 invalid event type + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/projects/1/webhook/policies", + credential: sysAdmin, + bodyJSON: &models.NotificationPolicy{ + EventTypes: []string{"invalidType"}, + Targets: []models.EventTarget{ + { + Address: "tcp://127.0.0.1:8080", + }, + }, + }}, + code: http.StatusBadRequest, + }, + // 400 policy ID != 0 + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/projects/1/webhook/policies", + credential: sysAdmin, + bodyJSON: &models.NotificationPolicy{ + ID: 111, + EventTypes: []string{"pullImage", "pushImage", "deleteImage"}, + Targets: []models.EventTarget{ + { + Type: "http", + Address: "http://10.173.32.58:9009", + AuthHeader: "xxxxxxxxx", + SkipCertVerify: true, + }, + }, + }, + }, + code: http.StatusBadRequest, + }, + // 201 + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/projects/1/webhook/policies", + credential: sysAdmin, + bodyJSON: &models.NotificationPolicy{ + EventTypes: []string{"pullImage", "pushImage", "deleteImage"}, + Targets: []models.EventTarget{ + { + Type: "http", + Address: "http://10.173.32.58:9009", + AuthHeader: "xxxxxxxxx", + SkipCertVerify: true, + }, + }, + }, + }, + code: http.StatusCreated, + }, + } + runCodeCheckingCases(t, cases...) +} + +func TestNotificationPolicyAPI_Get(t *testing.T) { + policyCtl := notification.PolicyMgr + defer func() { + notification.PolicyMgr = policyCtl + }() + + notification.PolicyMgr = &fakedNotificationPlyMgr{} + + cases := []*codeCheckingCase{ + // 401 + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/projects/1/webhook/policies/111", + }, + code: http.StatusUnauthorized, + }, + // 403 + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/projects/1/webhook/policies/111", + credential: nonSysAdmin, + }, + code: http.StatusForbidden, + }, + // 404 + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/projects/1/webhook/policies/1234", + credential: sysAdmin, + }, + code: http.StatusNotFound, + }, + // 400 projectID not match with projectID in URL + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/projects/1/webhook/policies/2", + credential: sysAdmin, + }, + code: http.StatusBadRequest, + }, + // 500 + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/projects/1/webhook/policies/3", + credential: sysAdmin, + }, + code: http.StatusInternalServerError, + }, + // 200 + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/projects/1/webhook/policies/1", + credential: sysAdmin, + }, + code: http.StatusOK, + }, + } + runCodeCheckingCases(t, cases...) +} + +func TestNotificationPolicyAPI_Put(t *testing.T) { + policyCtl := notification.PolicyMgr + defer func() { + notification.PolicyMgr = policyCtl + }() + + notification.PolicyMgr = &fakedNotificationPlyMgr{} + + cases := []*codeCheckingCase{ + // 401 + { + request: &testingRequest{ + method: http.MethodPut, + url: "/api/projects/1/webhook/policies/111", + }, + code: http.StatusUnauthorized, + }, + // 403 + { + request: &testingRequest{ + method: http.MethodPut, + url: "/api/projects/1/webhook/policies/111", + credential: nonSysAdmin, + }, + code: http.StatusForbidden, + }, + // 404 + { + request: &testingRequest{ + method: http.MethodPut, + url: "/api/projects/1/webhook/policies/1234", + credential: sysAdmin, + }, + code: http.StatusNotFound, + }, + // 400 invalid json body + { + request: &testingRequest{ + method: http.MethodPut, + url: "/api/projects/1/webhook/policies/1", + credential: sysAdmin, + bodyJSON: "invalidJSONBody", + }, + code: http.StatusBadRequest, + }, + // 400 empty targets + { + request: &testingRequest{ + method: http.MethodPut, + url: "/api/projects/1/webhook/policies/1", + credential: sysAdmin, + bodyJSON: &models.NotificationPolicy{ + EventTypes: []string{"pullImage", "pushImage", "deleteImage"}, + Targets: []models.EventTarget{}, + }}, + code: http.StatusBadRequest, + }, + // 400 invalid event target address + { + request: &testingRequest{ + method: http.MethodPut, + url: "/api/projects/1/webhook/policies/1", + credential: sysAdmin, + bodyJSON: &models.NotificationPolicy{ + EventTypes: []string{"pullImage", "pushImage", "deleteImage"}, + Targets: []models.EventTarget{ + { + Address: "tcp://127.0.0.1:8080", + }, + }, + }}, + code: http.StatusBadRequest, + }, + // 400 invalid event target type + { + request: &testingRequest{ + method: http.MethodPut, + url: "/api/projects/1/webhook/policies/1", + credential: sysAdmin, + bodyJSON: &models.NotificationPolicy{ + EventTypes: []string{"pullImage", "pushImage", "deleteImage"}, + Targets: []models.EventTarget{ + { + Type: "smn", + Address: "http://127.0.0.1:8080", + }, + }, + }}, + code: http.StatusBadRequest, + }, + // 400 invalid event type + { + request: &testingRequest{ + method: http.MethodPut, + url: "/api/projects/1/webhook/policies/1", + credential: sysAdmin, + bodyJSON: &models.NotificationPolicy{ + EventTypes: []string{"invalidType"}, + Targets: []models.EventTarget{ + { + Address: "tcp://127.0.0.1:8080", + }, + }, + }}, + code: http.StatusBadRequest, + }, + // 200 + { + request: &testingRequest{ + method: http.MethodPut, + url: "/api/projects/1/webhook/policies/1", + credential: sysAdmin, + bodyJSON: &models.NotificationPolicy{ + Name: "imagePolicyTest", + EventTypes: []string{"pullImage", "pushImage", "deleteImage"}, + Targets: []models.EventTarget{ + { + Type: "http", + Address: "http://10.173.32.58:9009", + AuthHeader: "xxxxxxxxx", + SkipCertVerify: true, + }, + }, + }, + }, + code: http.StatusOK, + }, + } + runCodeCheckingCases(t, cases...) +} + +func TestNotificationPolicyAPI_Test(t *testing.T) { + policyCtl := notification.PolicyMgr + defer func() { + notification.PolicyMgr = policyCtl + }() + + notification.PolicyMgr = &fakedNotificationPlyMgr{} + cases := []*codeCheckingCase{ + // 401 + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/projects/1/webhook/policies/test", + }, + code: http.StatusUnauthorized, + }, + // 403 + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/projects/1/webhook/policies/test", + credential: nonSysAdmin, + }, + code: http.StatusForbidden, + }, + // 404 + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/projects/123/webhook/policies/test", + credential: sysAdmin, + }, + code: http.StatusNotFound, + }, + // 400 invalid json body + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/projects/1/webhook/policies/test", + credential: sysAdmin, + bodyJSON: 1234125, + }, + code: http.StatusBadRequest, + }, + // 200 + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/projects/1/webhook/policies/test", + credential: sysAdmin, + bodyJSON: &models.NotificationPolicy{ + Targets: []models.EventTarget{ + { + Type: "http", + Address: "http://10.173.32.58:9009", + AuthHeader: "xxxxxxxxx", + SkipCertVerify: true, + }, + }, + }, + }, + code: http.StatusOK, + }, + } + runCodeCheckingCases(t, cases...) +} + +func TestNotificationPolicyAPI_ListGroupByEventType(t *testing.T) { + policyCtl := notification.PolicyMgr + jobMgr := notification.JobMgr + defer func() { + notification.PolicyMgr = policyCtl + notification.JobMgr = jobMgr + }() + + notification.PolicyMgr = &fakedNotificationPlyMgr{} + notification.JobMgr = &fakedNotificationJobMgr{} + + cases := []*codeCheckingCase{ + // 401 + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/projects/1/webhook/lasttrigger", + }, + code: http.StatusUnauthorized, + }, + // 403 + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/projects/1/webhook/lasttrigger", + credential: nonSysAdmin, + }, + code: http.StatusForbidden, + }, + // 404 + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/projects/123/webhook/lasttrigger", + credential: sysAdmin, + }, + code: http.StatusNotFound, + }, + // 200 + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/projects/1/webhook/lasttrigger", + credential: sysAdmin, + }, + code: http.StatusOK, + }, + } + runCodeCheckingCases(t, cases...) +} + +func TestNotificationPolicyAPI_Delete(t *testing.T) { + policyCtl := notification.PolicyMgr + defer func() { + notification.PolicyMgr = policyCtl + }() + + notification.PolicyMgr = &fakedNotificationPlyMgr{} + + cases := []*codeCheckingCase{ + // 401 + { + request: &testingRequest{ + method: http.MethodDelete, + url: "/api/projects/1/webhook/policies/111", + }, + code: http.StatusUnauthorized, + }, + // 403 + { + request: &testingRequest{ + method: http.MethodDelete, + url: "/api/projects/1/webhook/policies/111", + credential: nonSysAdmin, + }, + code: http.StatusForbidden, + }, + // 404 + { + request: &testingRequest{ + method: http.MethodDelete, + url: "/api/projects/1/webhook/policies/1234", + credential: sysAdmin, + }, + code: http.StatusNotFound, + }, + // 400 projectID not match + { + request: &testingRequest{ + method: http.MethodDelete, + url: "/api/projects/1/webhook/policies/2", + credential: sysAdmin, + }, + code: http.StatusBadRequest, + }, + // 500 failed to get policy + { + request: &testingRequest{ + method: http.MethodDelete, + url: "/api/projects/1/webhook/policies/3", + credential: sysAdmin, + }, + code: http.StatusInternalServerError, + }, + // 200 + { + request: &testingRequest{ + method: http.MethodDelete, + url: "/api/projects/1/webhook/policies/1", + credential: sysAdmin, + }, + code: http.StatusOK, + }, + } + runCodeCheckingCases(t, cases...) +} diff --git a/src/core/api/oidc.go b/src/core/api/oidc.go new file mode 100644 index 000000000..ed4688cf8 --- /dev/null +++ b/src/core/api/oidc.go @@ -0,0 +1,56 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "errors" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/common/utils/oidc" +) + +// OIDCAPI handles the requests to /api/system/oidc/xxx +type OIDCAPI struct { + BaseController +} + +// Prepare validates the request initially +func (oa *OIDCAPI) Prepare() { + oa.BaseController.Prepare() + if !oa.SecurityCtx.IsAuthenticated() { + oa.SendUnAuthorizedError(errors.New("unauthorized")) + return + } + if !oa.SecurityCtx.IsSysAdmin() { + msg := "only system admin has permission to access this API" + log.Errorf(msg) + oa.SendForbiddenError(errors.New(msg)) + return + } +} + +// Ping will handles the request to test connection to OIDC endpoint +func (oa *OIDCAPI) Ping() { + var c oidc.Conn + if err := oa.DecodeJSONReq(&c); err != nil { + log.Error("Failed to decode JSON request.") + oa.SendBadRequestError(err) + return + } + if err := oidc.TestEndpoint(c); err != nil { + log.Errorf("Failed to verify connection: %+v, err: %v", c, err) + oa.SendBadRequestError(err) + return + } +} diff --git a/src/core/api/oidc_test.go b/src/core/api/oidc_test.go new file mode 100644 index 000000000..ec9ada990 --- /dev/null +++ b/src/core/api/oidc_test.go @@ -0,0 +1,69 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "github.com/goharbor/harbor/src/common/utils/oidc" + "net/http" + "testing" +) + +func TestOIDCAPI_Ping(t *testing.T) { + url := "/api/system/oidc/ping" + cases := []*codeCheckingCase{ + { // 401 + request: &testingRequest{ + method: http.MethodPost, + bodyJSON: oidc.Conn{}, + url: url, + }, + code: http.StatusUnauthorized, + }, + { // 403 + request: &testingRequest{ + method: http.MethodPost, + bodyJSON: oidc.Conn{}, + url: url, + credential: nonSysAdmin, + }, + code: http.StatusForbidden, + }, + { // 400 + request: &testingRequest{ + method: http.MethodPost, + bodyJSON: oidc.Conn{ + URL: "https://www.baidu.com", + VerifyCert: true, + }, + url: url, + credential: sysAdmin, + }, + code: http.StatusBadRequest, + }, + { // 200 + request: &testingRequest{ + method: http.MethodPost, + bodyJSON: oidc.Conn{ + URL: "https://accounts.google.com", + VerifyCert: true, + }, + url: url, + credential: sysAdmin, + }, + code: http.StatusOK, + }, + } + runCodeCheckingCases(t, cases...) +} diff --git a/src/core/api/project.go b/src/core/api/project.go index aaf0f2e02..1c98242ca 100644 --- a/src/core/api/project.go +++ b/src/core/api/project.go @@ -18,19 +18,22 @@ import ( "fmt" "net/http" "regexp" + "strconv" + "sync" + "time" "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/dao/project" "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/quota" "github.com/goharbor/harbor/src/common/rbac" "github.com/goharbor/harbor/src/common/utils" errutil "github.com/goharbor/harbor/src/common/utils/error" "github.com/goharbor/harbor/src/common/utils/log" "github.com/goharbor/harbor/src/core/config" - - "errors" - "strconv" - "time" + "github.com/goharbor/harbor/src/pkg/types" + "github.com/pkg/errors" ) type deletableResp struct { @@ -128,6 +131,7 @@ func (p *ProjectAPI) Post() { p.SendBadRequestError(err) return } + err = validateProjectReq(pro) if err != nil { log.Errorf("Invalid project request, error: %v", err) @@ -135,6 +139,28 @@ func (p *ProjectAPI) Post() { return } + var hardLimits types.ResourceList + if config.QuotaPerProjectEnable() { + setting, err := config.QuotaSetting() + if err != nil { + log.Errorf("failed to get quota setting: %v", err) + p.SendInternalServerError(fmt.Errorf("failed to get quota setting: %v", err)) + return + } + + if !p.SecurityCtx.IsSysAdmin() { + pro.CountLimit = &setting.CountPerProject + pro.StorageLimit = &setting.StoragePerProject + } + + hardLimits, err = projectQuotaHardLimits(pro, setting) + if err != nil { + log.Errorf("Invalid project request, error: %v", err) + p.SendBadRequestError(fmt.Errorf("invalid request: %v", err)) + return + } + } + exist, err := p.ProjectMgr.Exists(pro.Name) if err != nil { p.ParseAndHandleError(fmt.Sprintf("failed to check the existence of project %s", @@ -158,6 +184,7 @@ func (p *ProjectAPI) Post() { if _, ok := pro.Metadata[models.ProMetaPublic]; !ok { pro.Metadata[models.ProMetaPublic] = strconv.FormatBool(false) } + // populate owner := p.SecurityCtx.GetUsername() // set the owner as the system admin when the API being called by replication @@ -188,6 +215,18 @@ func (p *ProjectAPI) Post() { return } + if config.QuotaPerProjectEnable() { + quotaMgr, err := quota.NewManager("project", strconv.FormatInt(projectID, 10)) + if err != nil { + p.SendInternalServerError(fmt.Errorf("failed to get quota manager: %v", err)) + return + } + if _, err := quotaMgr.NewQuota(hardLimits); err != nil { + p.SendInternalServerError(fmt.Errorf("failed to create quota for project: %v", err)) + return + } + } + go func() { if err = dao.AddAccessLog( models.AccessLog{ @@ -231,7 +270,10 @@ func (p *ProjectAPI) Get() { return } - p.populateProperties(p.project) + err := p.populateProperties(p.project) + if err != nil { + log.Errorf("populate project properties failed with : %+v", err) + } p.Data["json"] = p.project p.ServeJSON() @@ -259,6 +301,16 @@ func (p *ProjectAPI) Delete() { return } + quotaMgr, err := quota.NewManager("project", strconv.FormatInt(p.project.ProjectID, 10)) + if err != nil { + p.SendInternalServerError(fmt.Errorf("failed to get quota manager: %v", err)) + return + } + if err := quotaMgr.DeleteQuota(); err != nil { + p.SendInternalServerError(fmt.Errorf("failed to delete quota for project: %v", err)) + return + } + go func() { if err := dao.AddAccessLog(models.AccessLog{ Username: p.SecurityCtx.GetUsername(), @@ -401,15 +453,17 @@ func (p *ProjectAPI) List() { } for _, project := range result.Projects { - p.populateProperties(project) + err = p.populateProperties(project) + if err != nil { + log.Errorf("populate project properties failed %v", err) + } } - p.SetPaginationHeader(result.Total, page, size) p.Data["json"] = result.Projects p.ServeJSON() } -func (p *ProjectAPI) populateProperties(project *models.Project) { +func (p *ProjectAPI) populateProperties(project *models.Project) error { if p.SecurityCtx.IsAuthenticated() { roles := p.SecurityCtx.GetProjectRoles(project.ProjectID) if len(roles) != 0 { @@ -426,9 +480,8 @@ func (p *ProjectAPI) populateProperties(project *models.Project) { ProjectIDs: []int64{project.ProjectID}, }) if err != nil { - log.Errorf("failed to get total of repositories of project %d: %v", project.ProjectID, err) - p.SendInternalServerError(errors.New("")) - return + err = errors.Wrap(err, fmt.Sprintf("get repo count of project %d failed", project.ProjectID)) + return err } project.RepoCount = total @@ -437,13 +490,13 @@ func (p *ProjectAPI) populateProperties(project *models.Project) { if config.WithChartMuseum() { count, err := chartController.GetCountOfCharts([]string{project.Name}) if err != nil { - log.Errorf("Failed to get total of charts under project %s: %v", project.Name, err) - p.SendInternalServerError(errors.New("")) - return + err = errors.Wrap(err, fmt.Sprintf("get chart count of project %d failed", project.ProjectID)) + return err } project.ChartCount = count } + return nil } // Put ... @@ -460,7 +513,8 @@ func (p *ProjectAPI) Put() { if err := p.ProjectMgr.Update(p.project.ProjectID, &models.Project{ - Metadata: req.Metadata, + Metadata: req.Metadata, + CVEWhitelist: req.CVEWhitelist, }); err != nil { p.ParseAndHandleError(fmt.Sprintf("failed to update project %d", p.project.ProjectID), err) @@ -530,6 +584,37 @@ func (p *ProjectAPI) Logs() { p.ServeJSON() } +// Summary returns the summary of the project +func (p *ProjectAPI) Summary() { + if !p.requireAccess(rbac.ActionRead) { + return + } + + if err := p.populateProperties(p.project); err != nil { + log.Warningf("populate project properties failed with : %+v", err) + } + + summary := &models.ProjectSummary{ + RepoCount: p.project.RepoCount, + ChartCount: p.project.ChartCount, + } + + var wg sync.WaitGroup + for _, fn := range []func(int64, *models.ProjectSummary){getProjectQuotaSummary, getProjectMemberSummary} { + fn := fn + + wg.Add(1) + go func() { + defer wg.Done() + fn(p.project.ProjectID, summary) + }() + } + wg.Wait() + + p.Data["json"] = summary + p.ServeJSON() +} + // TODO move this to pa ckage models func validateProjectReq(req *models.ProjectRequest) error { pn := req.Name @@ -550,3 +635,76 @@ func validateProjectReq(req *models.ProjectRequest) error { req.Metadata = metas return nil } + +func projectQuotaHardLimits(req *models.ProjectRequest, setting *models.QuotaSetting) (types.ResourceList, error) { + hardLimits := types.ResourceList{} + if req.CountLimit != nil { + hardLimits[types.ResourceCount] = *req.CountLimit + } else { + hardLimits[types.ResourceCount] = setting.CountPerProject + } + + if req.StorageLimit != nil { + hardLimits[types.ResourceStorage] = *req.StorageLimit + } else { + hardLimits[types.ResourceStorage] = setting.StoragePerProject + } + + if err := quota.Validate("project", hardLimits); err != nil { + return nil, err + } + + return hardLimits, nil +} + +func getProjectQuotaSummary(projectID int64, summary *models.ProjectSummary) { + if !config.QuotaPerProjectEnable() { + log.Debug("Quota per project disabled") + return + } + + quotas, err := dao.ListQuotas(&models.QuotaQuery{Reference: "project", ReferenceID: strconv.FormatInt(projectID, 10)}) + if err != nil { + log.Debugf("failed to get quota for project: %d", projectID) + return + } + + if len(quotas) == 0 { + log.Debugf("quota not found for project: %d", projectID) + return + } + + quota := quotas[0] + + summary.Quota.Hard, _ = types.NewResourceList(quota.Hard) + summary.Quota.Used, _ = types.NewResourceList(quota.Used) +} + +func getProjectMemberSummary(projectID int64, summary *models.ProjectSummary) { + var wg sync.WaitGroup + + for _, e := range []struct { + role int + count *int64 + }{ + {common.RoleProjectAdmin, &summary.ProjectAdminCount}, + {common.RoleMaster, &summary.MasterCount}, + {common.RoleDeveloper, &summary.DeveloperCount}, + {common.RoleGuest, &summary.GuestCount}, + } { + wg.Add(1) + go func(role int, count *int64) { + defer wg.Done() + + total, err := project.GetTotalOfProjectMembers(projectID, role) + if err != nil { + log.Debugf("failed to get total of project members of role %d", role) + return + } + + *count = total + }(e.role, e.count) + } + + wg.Wait() +} diff --git a/src/core/api/project_test.go b/src/core/api/project_test.go index 8f42ed35d..2c2d3d8fe 100644 --- a/src/core/api/project_test.go +++ b/src/core/api/project_test.go @@ -22,7 +22,7 @@ import ( "github.com/goharbor/harbor/src/common/dao" "github.com/goharbor/harbor/src/common/models" - "github.com/goharbor/harbor/tests/apitests/apilib" + "github.com/goharbor/harbor/src/testing/apitests/apilib" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -30,6 +30,42 @@ import ( var addProject *apilib.ProjectReq var addPID int +func addProjectByName(apiTest *testapi, projectName string) (int32, error) { + req := apilib.ProjectReq{ProjectName: projectName} + code, err := apiTest.ProjectsPost(*admin, req) + if err != nil { + return 0, err + } + if code != http.StatusCreated { + return 0, fmt.Errorf("created failed") + } + + code, projects, err := apiTest.ProjectsGet(&apilib.ProjectQuery{Name: projectName}, *admin) + if err != nil { + return 0, err + } + if code != http.StatusOK { + return 0, fmt.Errorf("get failed") + } + + if len(projects) == 0 { + return 0, fmt.Errorf("oops") + } + + return projects[0].ProjectId, nil +} + +func deleteProjectByIDs(apiTest *testapi, projectIDs ...int32) error { + for _, projectID := range projectIDs { + _, err := apiTest.ProjectsDelete(*admin, fmt.Sprintf("%d", projectID)) + if err != nil { + return err + } + } + + return nil +} + func InitAddPro() { addProject = &apilib.ProjectReq{ProjectName: "add_project", Metadata: map[string]string{models.ProMetaPublic: "true"}} } @@ -90,6 +126,31 @@ func TestAddProject(t *testing.T) { assert.Equal(int(400), result, "case 4 : response code = 400 : Project name is illegal in length ") } + // case 5: response code = 201 : expect project creation with quota success. + fmt.Println("case 5 : response code = 201 : expect project creation with quota success ") + + var countLimit, storageLimit int64 + countLimit, storageLimit = 100, 10 + result, err = apiTest.ProjectsPost(*admin, apilib.ProjectReq{ProjectName: "with_quota", CountLimit: &countLimit, StorageLimit: &storageLimit}) + if err != nil { + t.Error("Error while creat project", err.Error()) + t.Log(err) + } else { + assert.Equal(int(201), result, "case 5 : response code = 201 : expect project creation with quota success ") + } + + // case 6: response code = 400 : bad quota value, create project fail + fmt.Println("case 6: response code = 400 : bad quota value, create project fail") + + countLimit, storageLimit = 100, -2 + result, err = apiTest.ProjectsPost(*admin, apilib.ProjectReq{ProjectName: "with_quota", CountLimit: &countLimit, StorageLimit: &storageLimit}) + if err != nil { + t.Error("Error while creat project", err.Error()) + t.Log(err) + } else { + assert.Equal(int(400), result, "case 6: response code = 400 : bad quota value, create project fail") + } + fmt.Printf("\n") } @@ -111,7 +172,7 @@ func TestListProjects(t *testing.T) { }() // ----------------------------case 1 : Response Code=200----------------------------// - fmt.Println("case 1: respose code:200") + fmt.Println("case 1: response code:200") httpStatusCode, result, err := apiTest.ProjectsGet( &apilib.ProjectQuery{ Name: addProject.ProjectName, @@ -202,7 +263,7 @@ func TestProGetByID(t *testing.T) { }() // ----------------------------case 1 : Response Code=200----------------------------// - fmt.Println("case 1: respose code:200") + fmt.Println("case 1: response code:200") httpStatusCode, result, err := apiTest.ProjectsGetByPID(projectID) if err != nil { t.Error("Error while search project by proID", err.Error()) @@ -230,17 +291,17 @@ func TestDeleteProject(t *testing.T) { t.Error("Error while delete project", err.Error()) t.Log(err) } else { - assert.Equal(int(401), httpStatusCode, "Case 1: Project creation status should be 401") + assert.Equal(int(401), httpStatusCode, "Case 1: Project deletion status should be 401") } // --------------------------case 2: Response Code=200---------------------------------// - fmt.Println("case2: respose code:200") + fmt.Println("case2: response code:200") httpStatusCode, err = apiTest.ProjectsDelete(*admin, projectID) if err != nil { t.Error("Error while delete project", err.Error()) t.Log(err) } else { - assert.Equal(int(200), httpStatusCode, "Case 2: Project creation status should be 200") + assert.Equal(int(200), httpStatusCode, "Case 2: Project deletion status should be 200") } // --------------------------case 3: Response Code=404,Project does not exist---------------------------------// @@ -251,7 +312,7 @@ func TestDeleteProject(t *testing.T) { t.Error("Error while delete project", err.Error()) t.Log(err) } else { - assert.Equal(int(404), httpStatusCode, "Case 3: Project creation status should be 404") + assert.Equal(int(404), httpStatusCode, "Case 3: Project deletion status should be 404") } // --------------------------case 4: Response Code=400,Invalid project id.---------------------------------// @@ -262,7 +323,7 @@ func TestDeleteProject(t *testing.T) { t.Error("Error while delete project", err.Error()) t.Log(err) } else { - assert.Equal(int(400), httpStatusCode, "Case 4: Project creation status should be 400") + assert.Equal(int(400), httpStatusCode, "Case 4: Project deletion status should be 400") } fmt.Printf("\n") @@ -274,7 +335,7 @@ func TestProHead(t *testing.T) { apiTest := newHarborAPI() // ----------------------------case 1 : Response Code=200----------------------------// - fmt.Println("case 1: respose code:200") + fmt.Println("case 1: response code:200") httpStatusCode, err := apiTest.ProjectsHead(*admin, "library") if err != nil { t.Error("Error while search project by proName", err.Error()) @@ -284,7 +345,7 @@ func TestProHead(t *testing.T) { } // ----------------------------case 2 : Response Code=404:Project name does not exist.----------------------------// - fmt.Println("case 2: respose code:404,Project name does not exist.") + fmt.Println("case 2: response code:404,Project name does not exist.") httpStatusCode, err = apiTest.ProjectsHead(*admin, "libra") if err != nil { t.Error("Error while search project by proName", err.Error()) @@ -308,22 +369,22 @@ func TestPut(t *testing.T) { }, } - fmt.Println("case 1: respose code:200") + fmt.Println("case 1: response code:200") code, err := apiTest.ProjectsPut(*admin, "1", project) require.Nil(t, err) assert.Equal(int(200), code) - fmt.Println("case 2: respose code:401, User need to log in first.") + fmt.Println("case 2: response code:401, User need to log in first.") code, err = apiTest.ProjectsPut(*unknownUsr, "1", project) require.Nil(t, err) assert.Equal(int(401), code) - fmt.Println("case 3: respose code:400, Invalid project id") + fmt.Println("case 3: response code:400, Invalid project id") code, err = apiTest.ProjectsPut(*admin, "cc", project) require.Nil(t, err) assert.Equal(int(400), code) - fmt.Println("case 4: respose code:404, Not found the project") + fmt.Println("case 4: response code:404, Not found the project") code, err = apiTest.ProjectsPut(*admin, "1234", project) require.Nil(t, err) assert.Equal(int(404), code) @@ -346,7 +407,7 @@ func TestProjectLogsFilter(t *testing.T) { } // -------------------case1: Response Code=200------------------------------// - fmt.Println("case 1: respose code:200") + fmt.Println("case 1: response code:200") projectID := "1" httpStatusCode, _, err := apiTest.ProjectLogs(*admin, projectID, query) if err != nil { @@ -356,7 +417,7 @@ func TestProjectLogsFilter(t *testing.T) { assert.Equal(int(200), httpStatusCode, "httpStatusCode should be 200") } // -------------------case2: Response Code=401:User need to log in first.------------------------------// - fmt.Println("case 2: respose code:401:User need to log in first.") + fmt.Println("case 2: response code:401:User need to log in first.") projectID = "1" httpStatusCode, _, err = apiTest.ProjectLogs(*unknownUsr, projectID, query) if err != nil { @@ -366,7 +427,7 @@ func TestProjectLogsFilter(t *testing.T) { assert.Equal(int(401), httpStatusCode, "httpStatusCode should be 401") } // -------------------case3: Response Code=404:Project does not exist.-------------------------// - fmt.Println("case 3: respose code:404:Illegal format of provided ID value.") + fmt.Println("case 3: response code:404:Illegal format of provided ID value.") projectID = "11111" httpStatusCode, _, err = apiTest.ProjectLogs(*admin, projectID, query) if err != nil { @@ -423,3 +484,30 @@ func TestDeletable(t *testing.T) { assert.Equal(t, http.StatusOK, code) assert.False(t, del) } + +func TestProjectSummary(t *testing.T) { + fmt.Println("\nTest for Project Summary API") + assert := assert.New(t) + + apiTest := newHarborAPI() + + projectID, err := addProjectByName(apiTest, "project-summary") + assert.Nil(err) + defer func() { + deleteProjectByIDs(apiTest, projectID) + }() + + // ----------------------------case 1 : Response Code=200----------------------------// + fmt.Println("case 1: response code:200") + httpStatusCode, summary, err := apiTest.ProjectSummary(*admin, fmt.Sprintf("%d", projectID)) + if err != nil { + t.Error("Error while search project by proName", err.Error()) + t.Log(err) + } else { + assert.Equal(int(200), httpStatusCode, "httpStatusCode should be 200") + assert.Equal(int64(1), summary.ProjectAdminCount) + assert.Equal(map[string]int64{"count": -1, "storage": -1}, summary.Quota.Hard) + } + + fmt.Printf("\n") +} diff --git a/src/core/api/projectmember.go b/src/core/api/projectmember.go index 0ede80dd4..5495ac26a 100644 --- a/src/core/api/projectmember.go +++ b/src/core/api/projectmember.go @@ -23,11 +23,13 @@ import ( "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/dao/group" "github.com/goharbor/harbor/src/common/dao/project" "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/common/rbac" "github.com/goharbor/harbor/src/common/utils/log" "github.com/goharbor/harbor/src/core/auth" + "github.com/goharbor/harbor/src/core/config" ) // ProjectMemberAPI handles request to /api/projects/{}/members/{} @@ -37,6 +39,7 @@ type ProjectMemberAPI struct { entityID int entityType string project *models.Project + groupType int } // ErrDuplicateProjectMember ... @@ -84,6 +87,15 @@ func (pma *ProjectMemberAPI) Prepare() { return } pma.id = int(pmid) + authMode, err := config.AuthMode() + if err != nil { + pma.SendInternalServerError(fmt.Errorf("failed to get authentication mode")) + } + if authMode == common.LDAPAuth { + pma.groupType = common.LDAPGroupType + } else if authMode == common.HTTPAuth { + pma.groupType = common.HTTPGroupType + } } func (pma *ProjectMemberAPI) requireAccess(action rbac.Action) bool { @@ -131,7 +143,7 @@ func (pma *ProjectMemberAPI) Get() { return } if len(memberList) == 0 { - pma.SendNotFoundError(fmt.Errorf("The project member does not exit, pmid:%v", pma.id)) + pma.SendNotFoundError(fmt.Errorf("The project member does not exist, pmid:%v", pma.id)) return } @@ -161,10 +173,10 @@ func (pma *ProjectMemberAPI) Post() { pma.SendBadRequestError(fmt.Errorf("Failed to add project member, error: %v", err)) return } else if err == auth.ErrDuplicateLDAPGroup { - pma.SendConflictError(fmt.Errorf("Failed to add project member, already exist LDAP group or project member, groupDN:%v", request.MemberGroup.LdapGroupDN)) + pma.SendConflictError(fmt.Errorf("Failed to add project member, already exist group or project member, groupDN:%v", request.MemberGroup.LdapGroupDN)) return } else if err == ErrDuplicateProjectMember { - pma.SendConflictError(fmt.Errorf("Failed to add project member, already exist LDAP group or project member, groupMemberID:%v", request.MemberGroup.ID)) + pma.SendConflictError(fmt.Errorf("Failed to add project member, already exist group or project member, groupMemberID:%v", request.MemberGroup.ID)) return } else if err == ErrInvalidRole { pma.SendBadRequestError(fmt.Errorf("Invalid role ID, role ID %v", request.Role)) @@ -220,12 +232,13 @@ func AddProjectMember(projectID int64, request models.MemberReq) (int, error) { var member models.Member member.ProjectID = projectID member.Role = request.Role + member.EntityType = common.GroupMember + if request.MemberUser.UserID > 0 { member.EntityID = request.MemberUser.UserID member.EntityType = common.UserMember } else if request.MemberGroup.ID > 0 { member.EntityID = request.MemberGroup.ID - member.EntityType = common.GroupMember } else if len(request.MemberUser.Username) > 0 { var userID int member.EntityType = common.UserMember @@ -243,14 +256,28 @@ func AddProjectMember(projectID int64, request models.MemberReq) (int, error) { } member.EntityID = userID } else if len(request.MemberGroup.LdapGroupDN) > 0 { - + request.MemberGroup.GroupType = common.LDAPGroupType // If groupname provided, use the provided groupname to name this group groupID, err := auth.SearchAndOnBoardGroup(request.MemberGroup.LdapGroupDN, request.MemberGroup.GroupName) if err != nil { return 0, err } member.EntityID = groupID - member.EntityType = common.GroupMember + } else if len(request.MemberGroup.GroupName) > 0 && request.MemberGroup.GroupType == common.HTTPGroupType { + ugs, err := group.QueryUserGroup(models.UserGroup{GroupName: request.MemberGroup.GroupName, GroupType: common.HTTPGroupType}) + if err != nil { + return 0, err + } + if len(ugs) == 0 { + groupID, err := auth.SearchAndOnBoardGroup(request.MemberGroup.GroupName, "") + if err != nil { + return 0, err + } + member.EntityID = groupID + } else { + member.EntityID = ugs[0].ID + } + } if member.EntityID <= 0 { return 0, fmt.Errorf("Can not get valid member entity, request: %+v", request) diff --git a/src/core/api/projectmember_test.go b/src/core/api/projectmember_test.go index 6cbef32ea..88e47851f 100644 --- a/src/core/api/projectmember_test.go +++ b/src/core/api/projectmember_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/dao/group" "github.com/goharbor/harbor/src/common/dao/project" "github.com/goharbor/harbor/src/common/models" ) @@ -52,6 +53,15 @@ func TestProjectMemberAPI_Get(t *testing.T) { }, code: http.StatusBadRequest, }, + // 200 + { + request: &testingRequest{ + method: http.MethodGet, + url: fmt.Sprintf("/api/projects/1/members/%d", projAdminPMID), + credential: admin, + }, + code: http.StatusOK, + }, // 404 { request: &testingRequest{ @@ -85,6 +95,21 @@ func TestProjectMemberAPI_Post(t *testing.T) { t.Errorf("Error occurred when create user: %v", err) } + ugList, err := group.QueryUserGroup(models.UserGroup{GroupType: 1, LdapGroupDN: "cn=harbor_users,ou=sample,ou=vmware,dc=harbor,dc=com"}) + if err != nil { + t.Errorf("Failed to query the user group") + } + if len(ugList) <= 0 { + t.Errorf("Failed to query the user group") + } + httpUgList, err := group.QueryUserGroup(models.UserGroup{GroupType: 2, GroupName: "vsphere.local\\administrators"}) + if err != nil { + t.Errorf("Failed to query the user group") + } + if len(httpUgList) <= 0 { + t.Errorf("Failed to query the user group") + } + cases := []*codeCheckingCase{ // 401 { @@ -158,6 +183,66 @@ func TestProjectMemberAPI_Post(t *testing.T) { }, code: http.StatusOK, }, + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/projects/1/members", + credential: admin, + bodyJSON: &models.MemberReq{ + Role: 1, + MemberGroup: models.UserGroup{ + GroupType: 1, + LdapGroupDN: "cn=harbor_users,ou=groups,dc=example,dc=com", + }, + }, + }, + code: http.StatusBadRequest, + }, + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/projects/1/members", + credential: admin, + bodyJSON: &models.MemberReq{ + Role: 1, + MemberGroup: models.UserGroup{ + GroupType: 2, + ID: httpUgList[0].ID, + }, + }, + }, + code: http.StatusCreated, + }, + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/projects/1/members", + credential: admin, + bodyJSON: &models.MemberReq{ + Role: 1, + MemberGroup: models.UserGroup{ + GroupType: 1, + ID: ugList[0].ID, + }, + }, + }, + code: http.StatusCreated, + }, + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/projects/1/members", + credential: admin, + bodyJSON: &models.MemberReq{ + Role: 1, + MemberGroup: models.UserGroup{ + GroupType: 2, + GroupName: "vsphere.local/users", + }, + }, + }, + code: http.StatusBadRequest, + }, } runCodeCheckingCases(t, cases...) } diff --git a/src/core/api/quota.go b/src/core/api/quota.go new file mode 100644 index 000000000..eb55a6df3 --- /dev/null +++ b/src/core/api/quota.go @@ -0,0 +1,155 @@ +// Copyright 2018 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "fmt" + + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/quota" + "github.com/pkg/errors" +) + +// QuotaAPI handles request to /api/quotas/ +type QuotaAPI struct { + BaseController + quota *models.Quota +} + +// Prepare validates the URL and the user +func (qa *QuotaAPI) Prepare() { + qa.BaseController.Prepare() + + if !qa.SecurityCtx.IsAuthenticated() { + qa.SendUnAuthorizedError(errors.New("Unauthorized")) + return + } + + if !qa.SecurityCtx.IsSysAdmin() { + qa.SendForbiddenError(errors.New(qa.SecurityCtx.GetUsername())) + return + } + + if len(qa.GetStringFromPath(":id")) != 0 { + id, err := qa.GetInt64FromPath(":id") + if err != nil || id <= 0 { + text := "invalid quota ID: " + if err != nil { + text += err.Error() + } else { + text += fmt.Sprintf("%d", id) + } + qa.SendBadRequestError(errors.New(text)) + return + } + + quota, err := dao.GetQuota(id) + if err != nil { + qa.SendInternalServerError(fmt.Errorf("failed to get quota %d, error: %v", id, err)) + return + } + + if quota == nil { + qa.SendNotFoundError(fmt.Errorf("quota %d not found", id)) + return + } + + qa.quota = quota + } +} + +// Get returns quota by id +func (qa *QuotaAPI) Get() { + query := &models.QuotaQuery{ + ID: qa.quota.ID, + } + + quotas, err := dao.ListQuotas(query) + if err != nil { + qa.SendInternalServerError(fmt.Errorf("failed to get quota %d, error: %v", qa.quota.ID, err)) + return + } + + if len(quotas) == 0 { + qa.SendNotFoundError(fmt.Errorf("quota %d not found", qa.quota.ID)) + return + } + + qa.Data["json"] = quotas[0] + qa.ServeJSON() +} + +// Put update the quota +func (qa *QuotaAPI) Put() { + var req *models.QuotaUpdateRequest + if err := qa.DecodeJSONReq(&req); err != nil { + qa.SendBadRequestError(err) + return + } + + if err := quota.Validate(qa.quota.Reference, req.Hard); err != nil { + qa.SendBadRequestError(err) + return + } + + mgr, err := quota.NewManager(qa.quota.Reference, qa.quota.ReferenceID) + if err != nil { + qa.SendInternalServerError(fmt.Errorf("failed to create quota manager, error: %v", err)) + return + } + + if err := mgr.UpdateQuota(req.Hard); err != nil { + qa.SendInternalServerError(fmt.Errorf("failed to update hard limits of the quota, error: %v", err)) + return + } +} + +// List returns quotas by query +func (qa *QuotaAPI) List() { + page, size, err := qa.GetPaginationParams() + if err != nil { + qa.SendBadRequestError(err) + return + } + + query := &models.QuotaQuery{ + Reference: qa.GetString("reference"), + ReferenceID: qa.GetString("reference_id"), + Pagination: models.Pagination{ + Page: page, + Size: size, + }, + Sorting: models.Sorting{ + Sort: qa.GetString("sort"), + }, + } + + total, err := dao.GetTotalOfQuotas(query) + if err != nil { + qa.SendInternalServerError(fmt.Errorf("failed to query database for total of quotas, error: %v", err)) + return + } + + quotas, err := dao.ListQuotas(query) + if err != nil { + qa.SendInternalServerError(fmt.Errorf("failed to query database for quotas, error: %v", err)) + return + } + + qa.SetPaginationHeader(total, page, size) + qa.Data["json"] = quotas + qa.ServeJSON() +} diff --git a/src/core/api/quota/chart/chart.go b/src/core/api/quota/chart/chart.go new file mode 100644 index 000000000..f3ebc1f11 --- /dev/null +++ b/src/core/api/quota/chart/chart.go @@ -0,0 +1,226 @@ +// Copyright 2018 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package chart + +import ( + "fmt" + "github.com/goharbor/harbor/src/chartserver" + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" + common_quota "github.com/goharbor/harbor/src/common/quota" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/api" + quota "github.com/goharbor/harbor/src/core/api/quota" + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/core/promgr" + "github.com/pkg/errors" + "net/url" + "strings" + "sync" +) + +// Migrator ... +type Migrator struct { + pm promgr.ProjectManager +} + +// NewChartMigrator returns a new RegistryMigrator. +func NewChartMigrator(pm promgr.ProjectManager) quota.QuotaMigrator { + migrator := Migrator{ + pm: pm, + } + return &migrator +} + +var ( + controller *chartserver.Controller + controllerErr error + controllerOnce sync.Once +) + +// Ping ... +func (rm *Migrator) Ping() error { + return api.HealthCheckerRegistry["chartmuseum"].Check() +} + +// Dump ... +// Depends on DB to dump chart data, as chart cannot get all of namespaces. +func (rm *Migrator) Dump() ([]quota.ProjectInfo, error) { + var ( + projects []quota.ProjectInfo + wg sync.WaitGroup + err error + ) + + all, err := dao.GetProjects(nil) + if err != nil { + return nil, err + } + + wg.Add(len(all)) + errChan := make(chan error, 1) + infoChan := make(chan interface{}) + done := make(chan bool, 1) + + go func() { + defer func() { + done <- true + }() + + for { + select { + case result := <-infoChan: + if result == nil { + return + } + project, ok := result.(quota.ProjectInfo) + if ok { + projects = append(projects, project) + } + + case e := <-errChan: + if err == nil { + err = errors.Wrap(e, "quota sync error on getting info of project") + } else { + err = errors.Wrap(e, err.Error()) + } + } + } + }() + + for _, project := range all { + go func(project *models.Project) { + defer wg.Done() + + var repos []quota.RepoData + ctr, err := chartController() + if err != nil { + errChan <- err + return + } + + chartInfo, err := ctr.ListCharts(project.Name) + if err != nil { + errChan <- err + return + } + + // repo + for _, chart := range chartInfo { + var afs []*models.Artifact + chartVersions, err := ctr.GetChart(project.Name, chart.Name) + if err != nil { + errChan <- err + continue + } + for _, chart := range chartVersions { + af := &models.Artifact{ + PID: project.ProjectID, + Repo: chart.Name, + Tag: chart.Version, + Digest: chart.Digest, + Kind: "Chart", + } + afs = append(afs, af) + } + repoData := quota.RepoData{ + Name: project.Name, + Afs: afs, + } + repos = append(repos, repoData) + } + + projectInfo := quota.ProjectInfo{ + Name: project.Name, + Repos: repos, + } + + infoChan <- projectInfo + }(project) + } + + wg.Wait() + close(infoChan) + + <-done + + if err != nil { + return nil, err + } + + return projects, nil +} + +// Usage ... +// Chart will not cover size. +func (rm *Migrator) Usage(projects []quota.ProjectInfo) ([]quota.ProjectUsage, error) { + var pros []quota.ProjectUsage + for _, project := range projects { + var count int64 + // usage count + for _, repo := range project.Repos { + count = count + int64(len(repo.Afs)) + } + proUsage := quota.ProjectUsage{ + Project: project.Name, + Used: common_quota.ResourceList{ + common_quota.ResourceCount: count, + common_quota.ResourceStorage: 0, + }, + } + pros = append(pros, proUsage) + } + return pros, nil + +} + +// Persist ... +// Chart will not persist data into db. +func (rm *Migrator) Persist(projects []quota.ProjectInfo) error { + return nil +} + +func chartController() (*chartserver.Controller, error) { + controllerOnce.Do(func() { + addr, err := config.GetChartMuseumEndpoint() + if err != nil { + controllerErr = fmt.Errorf("failed to get the endpoint URL of chart storage server: %s", err.Error()) + return + } + + addr = strings.TrimSuffix(addr, "/") + url, err := url.Parse(addr) + if err != nil { + controllerErr = errors.New("endpoint URL of chart storage server is malformed") + return + } + + ctr, err := chartserver.NewController(url) + if err != nil { + controllerErr = errors.New("failed to initialize chart API controller") + } + + controller = ctr + + log.Debugf("Chart storage server is set to %s", url.String()) + log.Info("API controller for chart repository server is successfully initialized") + }) + + return controller, controllerErr +} + +func init() { + quota.Register("chart", NewChartMigrator) +} diff --git a/src/core/api/quota/migrator.go b/src/core/api/quota/migrator.go new file mode 100644 index 000000000..bfd2fc164 --- /dev/null +++ b/src/core/api/quota/migrator.go @@ -0,0 +1,173 @@ +// Copyright 2018 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/quota" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/core/promgr" + "github.com/goharbor/harbor/src/pkg/types" + "strconv" +) + +// QuotaMigrator ... +type QuotaMigrator interface { + // Ping validates and wait for backend service ready. + Ping() error + + // Dump exports all data from backend service, registry, chartmuseum + Dump() ([]ProjectInfo, error) + + // Usage computes the quota usage of all the projects + Usage([]ProjectInfo) ([]ProjectUsage, error) + + // Persist record the data to DB, artifact, artifact_blob and blob tabel. + Persist([]ProjectInfo) error +} + +// ProjectInfo ... +type ProjectInfo struct { + Name string + Repos []RepoData +} + +// RepoData ... +type RepoData struct { + Name string + Afs []*models.Artifact + Afnbs []*models.ArtifactAndBlob + Blobs []*models.Blob +} + +// ProjectUsage ... +type ProjectUsage struct { + Project string + Used quota.ResourceList +} + +// Instance ... +type Instance func(promgr.ProjectManager) QuotaMigrator + +var adapters = make(map[string]Instance) + +// Register ... +func Register(name string, adapter Instance) { + if adapter == nil { + panic("quota: Register adapter is nil") + } + if _, ok := adapters[name]; ok { + panic("quota: Register called twice for adapter " + name) + } + adapters[name] = adapter +} + +// Sync ... +func Sync(pm promgr.ProjectManager, populate bool) error { + totalUsage := make(map[string][]ProjectUsage) + for name, instanceFunc := range adapters { + if !config.WithChartMuseum() { + if name == "chart" { + continue + } + } + adapter := instanceFunc(pm) + if err := adapter.Ping(); err != nil { + return err + } + data, err := adapter.Dump() + if err != nil { + return err + } + usage, err := adapter.Usage(data) + if err != nil { + return err + } + totalUsage[name] = usage + if populate { + if err := adapter.Persist(data); err != nil { + return err + } + } + } + merged := mergeUsage(totalUsage) + if err := ensureQuota(merged); err != nil { + return err + } + return nil +} + +// mergeUsage merges the usage of adapters +func mergeUsage(total map[string][]ProjectUsage) []ProjectUsage { + if !config.WithChartMuseum() { + return total["registry"] + } + regUsgs := total["registry"] + chartUsgs := total["chart"] + + var mergedUsage []ProjectUsage + temp := make(map[string]quota.ResourceList) + + for _, regUsg := range regUsgs { + _, exist := temp[regUsg.Project] + if !exist { + temp[regUsg.Project] = regUsg.Used + mergedUsage = append(mergedUsage, ProjectUsage{ + Project: regUsg.Project, + Used: regUsg.Used, + }) + } + } + for _, chartUsg := range chartUsgs { + var usedTemp quota.ResourceList + _, exist := temp[chartUsg.Project] + if !exist { + usedTemp = chartUsg.Used + } else { + usedTemp = types.Add(temp[chartUsg.Project], chartUsg.Used) + } + temp[chartUsg.Project] = usedTemp + mergedUsage = append(mergedUsage, ProjectUsage{ + Project: chartUsg.Project, + Used: usedTemp, + }) + } + return mergedUsage +} + +// ensureQuota updates the quota and quota usage in the data base. +func ensureQuota(usages []ProjectUsage) error { + var pid int64 + for _, usage := range usages { + project, err := dao.GetProjectByName(usage.Project) + if err != nil { + log.Error(err) + return err + } + pid = project.ProjectID + quotaMgr, err := quota.NewManager("project", strconv.FormatInt(pid, 10)) + if err != nil { + log.Errorf("Error occurred when to new quota manager %v", err) + return err + } + if err := quotaMgr.EnsureQuota(usage.Used); err != nil { + log.Errorf("cannot ensure quota for the project: %d, err: %v", pid, err) + return err + } + } + return nil +} diff --git a/src/core/api/quota/registry/registry.go b/src/core/api/quota/registry/registry.go new file mode 100644 index 000000000..18a7f87c0 --- /dev/null +++ b/src/core/api/quota/registry/registry.go @@ -0,0 +1,433 @@ +// Copyright 2018 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package registry + +import ( + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" + common_quota "github.com/goharbor/harbor/src/common/quota" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/common/utils/registry" + "github.com/goharbor/harbor/src/core/api" + quota "github.com/goharbor/harbor/src/core/api/quota" + "github.com/goharbor/harbor/src/core/promgr" + coreutils "github.com/goharbor/harbor/src/core/utils" + "github.com/pkg/errors" + "strings" + "sync" + "time" +) + +// Migrator ... +type Migrator struct { + pm promgr.ProjectManager +} + +// NewRegistryMigrator returns a new Migrator. +func NewRegistryMigrator(pm promgr.ProjectManager) quota.QuotaMigrator { + migrator := Migrator{ + pm: pm, + } + return &migrator +} + +// Ping ... +func (rm *Migrator) Ping() error { + return api.HealthCheckerRegistry["registry"].Check() +} + +// Dump ... +func (rm *Migrator) Dump() ([]quota.ProjectInfo, error) { + var ( + projects []quota.ProjectInfo + wg sync.WaitGroup + err error + ) + + reposInRegistry, err := api.Catalog() + if err != nil { + return nil, err + } + + // repoMap : map[project_name : []repo list] + repoMap := make(map[string][]string) + for _, item := range reposInRegistry { + projectName := strings.Split(item, "/")[0] + pro, err := rm.pm.Get(projectName) + if err != nil { + log.Errorf("failed to get project %s: %v", projectName, err) + continue + } + _, exist := repoMap[pro.Name] + if !exist { + repoMap[pro.Name] = []string{item} + } else { + repos := repoMap[pro.Name] + repos = append(repos, item) + repoMap[pro.Name] = repos + } + } + + wg.Add(len(repoMap)) + errChan := make(chan error, 1) + infoChan := make(chan interface{}) + done := make(chan bool, 1) + + go func() { + defer func() { + done <- true + }() + + for { + select { + case result := <-infoChan: + if result == nil { + return + } + project, ok := result.(quota.ProjectInfo) + if ok { + projects = append(projects, project) + } + + case e := <-errChan: + if err == nil { + err = errors.Wrap(e, "quota sync error on getting info of project") + } else { + err = errors.Wrap(e, err.Error()) + } + } + } + }() + + for project, repos := range repoMap { + go func(project string, repos []string) { + defer wg.Done() + info, err := infoOfProject(project, repos) + if err != nil { + errChan <- err + return + } + infoChan <- info + }(project, repos) + } + + wg.Wait() + close(infoChan) + + // wait for all of project info + <-done + + if err != nil { + return nil, err + } + + return projects, nil +} + +// Usage ... +// registry needs to merge the shard blobs of different repositories. +func (rm *Migrator) Usage(projects []quota.ProjectInfo) ([]quota.ProjectUsage, error) { + var pros []quota.ProjectUsage + + for _, project := range projects { + var size, count int64 + var blobs = make(map[string]int64) + + // usage count + for _, repo := range project.Repos { + count = count + int64(len(repo.Afs)) + // Because that there are some shared blobs between repositories, it needs to remove the duplicate items. + for _, blob := range repo.Blobs { + _, exist := blobs[blob.Digest] + if !exist { + blobs[blob.Digest] = blob.Size + } + } + } + // size + for _, item := range blobs { + size = size + item + } + + proUsage := quota.ProjectUsage{ + Project: project.Name, + Used: common_quota.ResourceList{ + common_quota.ResourceCount: count, + common_quota.ResourceStorage: size, + }, + } + pros = append(pros, proUsage) + } + + return pros, nil +} + +// Persist ... +func (rm *Migrator) Persist(projects []quota.ProjectInfo) error { + for _, project := range projects { + for _, repo := range project.Repos { + if err := persistAf(repo.Afs); err != nil { + return err + } + if err := persistAfnbs(repo.Afnbs); err != nil { + return err + } + if err := persistBlob(repo.Blobs); err != nil { + return err + } + } + } + if err := persistPB(projects); err != nil { + return err + } + return nil +} + +func persistAf(afs []*models.Artifact) error { + if len(afs) != 0 { + for _, af := range afs { + _, err := dao.AddArtifact(af) + if err != nil { + if err == dao.ErrDupRows { + continue + } + log.Error(err) + return err + } + } + } + return nil +} + +func persistAfnbs(afnbs []*models.ArtifactAndBlob) error { + if len(afnbs) != 0 { + for _, afnb := range afnbs { + _, err := dao.AddArtifactNBlob(afnb) + if err != nil { + if err == dao.ErrDupRows { + continue + } + log.Error(err) + return err + } + } + } + return nil +} + +func persistBlob(blobs []*models.Blob) error { + if len(blobs) != 0 { + for _, blob := range blobs { + _, err := dao.AddBlob(blob) + if err != nil { + if err == dao.ErrDupRows { + continue + } + log.Error(err) + return err + } + } + } + return nil +} + +func persistPB(projects []quota.ProjectInfo) error { + for _, project := range projects { + var blobs = make(map[string]int64) + var blobsOfPro []*models.Blob + for _, repo := range project.Repos { + for _, blob := range repo.Blobs { + _, exist := blobs[blob.Digest] + if exist { + continue + } + blobs[blob.Digest] = blob.Size + blobInDB, err := dao.GetBlob(blob.Digest) + if err != nil { + log.Error(err) + return err + } + if blobInDB != nil { + blobsOfPro = append(blobsOfPro, blobInDB) + } + } + } + pro, err := dao.GetProjectByName(project.Name) + if err != nil { + log.Error(err) + return err + } + _, err = dao.AddBlobsToProject(pro.ProjectID, blobsOfPro...) + if err != nil { + log.Error(err) + return err + } + } + return nil +} + +func infoOfProject(project string, repoList []string) (quota.ProjectInfo, error) { + var ( + repos []quota.RepoData + wg sync.WaitGroup + err error + ) + wg.Add(len(repoList)) + + errChan := make(chan error, 1) + infoChan := make(chan interface{}) + done := make(chan bool, 1) + + pro, err := dao.GetProjectByName(project) + if err != nil { + log.Error(err) + return quota.ProjectInfo{}, err + } + + go func() { + defer func() { + done <- true + }() + + for { + select { + case result := <-infoChan: + if result == nil { + return + } + repoData, ok := result.(quota.RepoData) + if ok { + repos = append(repos, repoData) + } + + case e := <-errChan: + if err == nil { + err = errors.Wrap(e, "quota sync error on getting info of repo") + } else { + err = errors.Wrap(e, err.Error()) + } + } + } + }() + + for _, repo := range repoList { + go func(pid int64, repo string) { + defer func() { + wg.Done() + }() + info, err := infoOfRepo(pid, repo) + if err != nil { + errChan <- err + return + } + infoChan <- info + }(pro.ProjectID, repo) + } + + wg.Wait() + close(infoChan) + + <-done + + if err != nil { + return quota.ProjectInfo{}, err + } + + return quota.ProjectInfo{ + Name: project, + Repos: repos, + }, nil +} + +func infoOfRepo(pid int64, repo string) (quota.RepoData, error) { + repoClient, err := coreutils.NewRepositoryClientForUI("harbor-core", repo) + if err != nil { + return quota.RepoData{}, err + } + tags, err := repoClient.ListTag() + if err != nil { + return quota.RepoData{}, err + } + var afnbs []*models.ArtifactAndBlob + var afs []*models.Artifact + var blobs []*models.Blob + + for _, tag := range tags { + _, mediaType, payload, err := repoClient.PullManifest(tag, []string{ + schema1.MediaTypeManifest, + schema1.MediaTypeSignedManifest, + schema2.MediaTypeManifest, + }) + if err != nil { + log.Error(err) + return quota.RepoData{}, err + } + manifest, desc, err := registry.UnMarshal(mediaType, payload) + if err != nil { + log.Error(err) + return quota.RepoData{}, err + } + // self + afnb := &models.ArtifactAndBlob{ + DigestAF: desc.Digest.String(), + DigestBlob: desc.Digest.String(), + } + afnbs = append(afnbs, afnb) + // add manifest as a blob. + blob := &models.Blob{ + Digest: desc.Digest.String(), + ContentType: desc.MediaType, + Size: desc.Size, + CreationTime: time.Now(), + } + blobs = append(blobs, blob) + for _, layer := range manifest.References() { + afnb := &models.ArtifactAndBlob{ + DigestAF: desc.Digest.String(), + DigestBlob: layer.Digest.String(), + } + afnbs = append(afnbs, afnb) + blob := &models.Blob{ + Digest: layer.Digest.String(), + ContentType: layer.MediaType, + Size: layer.Size, + CreationTime: time.Now(), + } + blobs = append(blobs, blob) + } + af := &models.Artifact{ + PID: pid, + Repo: strings.Split(repo, "/")[1], + Tag: tag, + Digest: desc.Digest.String(), + Kind: "Docker-Image", + CreationTime: time.Now(), + } + afs = append(afs, af) + } + return quota.RepoData{ + Name: repo, + Afs: afs, + Afnbs: afnbs, + Blobs: blobs, + }, nil +} + +func init() { + quota.Register("registry", NewRegistryMigrator) +} diff --git a/src/core/api/quota_test.go b/src/core/api/quota_test.go new file mode 100644 index 000000000..ddda51457 --- /dev/null +++ b/src/core/api/quota_test.go @@ -0,0 +1,133 @@ +// Copyright 2018 Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "fmt" + "testing" + + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/quota" + "github.com/goharbor/harbor/src/common/quota/driver" + "github.com/goharbor/harbor/src/common/quota/driver/mocks" + "github.com/goharbor/harbor/src/pkg/types" + "github.com/goharbor/harbor/src/testing/apitests/apilib" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +var ( + reference = "mock" + hardLimits = types.ResourceList{types.ResourceCount: -1, types.ResourceStorage: -1} +) + +func init() { + mockDriver := &mocks.Driver{} + + mockHardLimitsFn := func() types.ResourceList { + return hardLimits + } + + mockLoadFn := func(key string) driver.RefObject { + return driver.RefObject{"id": key} + } + + mockValidateFn := func(hardLimits types.ResourceList) error { + if len(hardLimits) == 0 { + return fmt.Errorf("no resources found") + } + + return nil + } + + mockDriver.On("HardLimits").Return(mockHardLimitsFn) + mockDriver.On("Load", mock.AnythingOfType("string")).Return(mockLoadFn, nil) + mockDriver.On("Validate", mock.AnythingOfType("types.ResourceList")).Return(mockValidateFn) + + driver.Register(reference, mockDriver) +} + +func TestQuotaAPIList(t *testing.T) { + assert := assert.New(t) + apiTest := newHarborAPI() + + count := 10 + for i := 0; i < count; i++ { + mgr, err := quota.NewManager(reference, fmt.Sprintf("%d", i)) + assert.Nil(err) + + _, err = mgr.NewQuota(hardLimits) + assert.Nil(err) + } + + code, quotas, err := apiTest.QuotasGet(&apilib.QuotaQuery{Reference: reference}, *admin) + assert.Nil(err) + assert.Equal(int(200), code) + assert.Len(quotas, count, fmt.Sprintf("quotas len should be %d", count)) + + code, quotas, err = apiTest.QuotasGet(&apilib.QuotaQuery{Reference: reference, PageSize: 1}, *admin) + assert.Nil(err) + assert.Equal(int(200), code) + assert.Len(quotas, 1) +} + +func TestQuotaAPIGet(t *testing.T) { + assert := assert.New(t) + apiTest := newHarborAPI() + + mgr, err := quota.NewManager(reference, "quota-get") + assert.Nil(err) + + quotaID, err := mgr.NewQuota(hardLimits) + assert.Nil(err) + + code, quota, err := apiTest.QuotasGetByID(*admin, fmt.Sprintf("%d", quotaID)) + assert.Nil(err) + assert.Equal(int(200), code) + assert.Equal(map[string]int64{"storage": -1, "count": -1}, quota.Hard) + + code, _, err = apiTest.QuotasGetByID(*admin, "100") + assert.Nil(err) + assert.Equal(int(404), code) +} + +func TestQuotaPut(t *testing.T) { + assert := assert.New(t) + apiTest := newHarborAPI() + + mgr, err := quota.NewManager(reference, "quota-put") + assert.Nil(err) + + quotaID, err := mgr.NewQuota(hardLimits) + assert.Nil(err) + + code, quota, err := apiTest.QuotasGetByID(*admin, fmt.Sprintf("%d", quotaID)) + assert.Nil(err) + assert.Equal(int(200), code) + assert.Equal(map[string]int64{"count": -1, "storage": -1}, quota.Hard) + + code, err = apiTest.QuotasPut(*admin, fmt.Sprintf("%d", quotaID), models.QuotaUpdateRequest{}) + assert.Nil(err, err) + assert.Equal(int(400), code) + + code, err = apiTest.QuotasPut(*admin, fmt.Sprintf("%d", quotaID), models.QuotaUpdateRequest{Hard: types.ResourceList{types.ResourceCount: 100, types.ResourceStorage: 100}}) + assert.Nil(err) + assert.Equal(int(200), code) + + code, quota, err = apiTest.QuotasGetByID(*admin, fmt.Sprintf("%d", quotaID)) + assert.Nil(err) + assert.Equal(int(200), code) + assert.Equal(map[string]int64{"count": 100, "storage": 100}, quota.Hard) +} diff --git a/src/core/api/reg_gc_test.go b/src/core/api/reg_gc_test.go index a8e13891c..f9a81601f 100644 --- a/src/core/api/reg_gc_test.go +++ b/src/core/api/reg_gc_test.go @@ -3,7 +3,7 @@ package api import ( "testing" - "github.com/goharbor/harbor/tests/apitests/apilib" + "github.com/goharbor/harbor/src/testing/apitests/apilib" "github.com/stretchr/testify/assert" ) diff --git a/src/core/api/registry.go b/src/core/api/registry.go index 452d50e72..3885e11a0 100644 --- a/src/core/api/registry.go +++ b/src/core/api/registry.go @@ -49,6 +49,7 @@ func (t *RegistryAPI) Ping() { ID *int64 `json:"id"` Type *string `json:"type"` URL *string `json:"url"` + Region *string `json:"region"` CredentialType *string `json:"credential_type"` AccessKey *string `json:"access_key"` AccessSecret *string `json:"access_secret"` diff --git a/src/core/api/repository.go b/src/core/api/repository.go old mode 100644 new mode 100755 index 12659b48c..46c6976f9 --- a/src/core/api/repository.go +++ b/src/core/api/repository.go @@ -16,6 +16,7 @@ package api import ( "encoding/json" + "errors" "fmt" "io/ioutil" "net/http" @@ -24,7 +25,6 @@ import ( "strings" "time" - "errors" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/goharbor/harbor/src/common" @@ -33,12 +33,14 @@ import ( "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/common/rbac" "github.com/goharbor/harbor/src/common/utils" - "github.com/goharbor/harbor/src/common/utils/clair" "github.com/goharbor/harbor/src/common/utils/log" "github.com/goharbor/harbor/src/common/utils/notary" + notarymodel "github.com/goharbor/harbor/src/common/utils/notary/model" "github.com/goharbor/harbor/src/common/utils/registry" "github.com/goharbor/harbor/src/core/config" + notifierEvt "github.com/goharbor/harbor/src/core/notifier/event" coreutils "github.com/goharbor/harbor/src/core/utils" + "github.com/goharbor/harbor/src/pkg/scan" "github.com/goharbor/harbor/src/replication" "github.com/goharbor/harbor/src/replication/event" "github.com/goharbor/harbor/src/replication/model" @@ -78,30 +80,6 @@ func (r reposSorter) Less(i, j int) bool { return r[i].Index < r[j].Index } -type tagDetail struct { - Digest string `json:"digest"` - Name string `json:"name"` - Size int64 `json:"size"` - Architecture string `json:"architecture"` - OS string `json:"os"` - OSVersion string `json:"os.version"` - DockerVersion string `json:"docker_version"` - Author string `json:"author"` - Created time.Time `json:"created"` - Config *cfg `json:"config"` -} - -type cfg struct { - Labels map[string]string `json:"labels"` -} - -type tagResp struct { - tagDetail - Signature *notary.Target `json:"signature"` - ScanOverview *models.ImgScanOverview `json:"scan_overview,omitempty"` - Labels []*models.Label `json:"labels"` -} - type manifestResp struct { Manifest interface{} `json:"manifest"` Config interface{} `json:"config,omitempty" ` @@ -261,7 +239,7 @@ func (ra *RepositoryAPI) Delete() { return } - rc, err := coreutils.NewRepositoryClientForUI(ra.SecurityCtx.GetUsername(), repoName) + rc, err := coreutils.NewRepositoryClientForLocal(ra.SecurityCtx.GetUsername(), repoName) if err != nil { log.Errorf("error occurred while initializing repository client for %s: %v", repoName, err) ra.SendInternalServerError(errors.New("internal error")) @@ -331,7 +309,7 @@ func (ra *RepositoryAPI) Delete() { go func(tag string) { e := &event.Event{ - Type: event.EventTypeImagePush, + Type: event.EventTypeImageDelete, Resource: &model.Resource{ Type: model.ResourceTypeImage, Metadata: &model.ResourceMetadata{ @@ -362,6 +340,24 @@ func (ra *RepositoryAPI) Delete() { }(t) } + // build and publish image delete event + evt := ¬ifierEvt.Event{} + imgDelMetadata := ¬ifierEvt.ImageDelMetaData{ + Project: project, + Tags: tags, + RepoName: repoName, + OccurAt: time.Now(), + Operator: ra.SecurityCtx.GetUsername(), + } + if err := evt.Build(imgDelMetadata); err != nil { + // do not return when building event metadata failed + log.Errorf("failed to build image delete event metadata: %v", err) + } + if err := evt.Publish(); err != nil { + // do not return when publishing event failed + log.Errorf("failed to publish image delete event: %v", err) + } + exist, err := repositoryExist(repoName, rc) if err != nil { log.Errorf("failed to check the existence of repository %s: %v", repoName, err) @@ -588,7 +584,12 @@ func (ra *RepositoryAPI) GetTags() { } labeledTags := map[string]struct{}{} for _, rl := range rls { - labeledTags[strings.Split(rl.ResourceName, ":")[1]] = struct{}{} + strs := strings.SplitN(rl.ResourceName, ":", 2) + // the "rls" may contain images which don't belong to the repository + if strs[0] != repoName { + continue + } + labeledTags[strs[1]] = struct{}{} } ts := []string{} for _, tag := range tags { @@ -599,32 +600,52 @@ func (ra *RepositoryAPI) GetTags() { tags = ts } + detail, err := ra.GetBool("detail", true) + if !detail && err == nil { + ra.Data["json"] = simpleTags(tags) + ra.ServeJSON() + return + } + ra.Data["json"] = assembleTagsInParallel(client, repoName, tags, ra.SecurityCtx.GetUsername()) ra.ServeJSON() } +func simpleTags(tags []string) []*models.TagResp { + var tagsResp []*models.TagResp + for _, tag := range tags { + tagsResp = append(tagsResp, &models.TagResp{ + TagDetail: models.TagDetail{ + Name: tag, + }, + }) + } + + return tagsResp +} + // get config, signature and scan overview and assemble them into one // struct for each tag in tags func assembleTagsInParallel(client *registry.Repository, repository string, - tags []string, username string) []*tagResp { + tags []string, username string) []*models.TagResp { var err error - signatures := map[string][]notary.Target{} + signatures := map[string][]notarymodel.Target{} if config.WithNotary() { signatures, err = getSignatures(username, repository) if err != nil { - signatures = map[string][]notary.Target{} + signatures = map[string][]notarymodel.Target{} log.Errorf("failed to get signatures of %s: %v", repository, err) } } - c := make(chan *tagResp) + c := make(chan *models.TagResp) for _, tag := range tags { go assembleTag(c, client, repository, tag, config.WithClair(), config.WithNotary(), signatures) } - result := []*tagResp{} - var item *tagResp + result := []*models.TagResp{} + var item *models.TagResp for i := 0; i < len(tags); i++ { item = <-c if item == nil { @@ -635,10 +656,10 @@ func assembleTagsInParallel(client *registry.Repository, repository string, return result } -func assembleTag(c chan *tagResp, client *registry.Repository, +func assembleTag(c chan *models.TagResp, client *registry.Repository, repository, tag string, clairEnabled, notaryEnabled bool, - signatures map[string][]notary.Target) { - item := &tagResp{} + signatures map[string][]notarymodel.Target) { + item := &models.TagResp{} // labels image := fmt.Sprintf("%s:%s", repository, tag) labels, err := dao.GetLabelsOfResource(common.ResourceTypeImage, image) @@ -654,7 +675,7 @@ func assembleTag(c chan *tagResp, client *registry.Repository, log.Errorf("failed to get v2 manifest of %s:%s: %v", repository, tag, err) } if tagDetail != nil { - item.tagDetail = *tagDetail + item.TagDetail = *tagDetail } // scan overview @@ -672,24 +693,41 @@ func assembleTag(c chan *tagResp, client *registry.Repository, } } } + + // pull/push time + artifact, err := dao.GetArtifact(repository, tag) + if err != nil { + log.Errorf("failed to get artifact %s:%s: %v", repository, tag, err) + } else { + if artifact == nil { + log.Warningf("artifact %s:%s not found", repository, tag) + } else { + item.PullTime = artifact.PullTime + item.PushTime = artifact.PushTime + } + } + c <- item } // getTagDetail returns the detail information for v2 manifest image // The information contains architecture, os, author, size, etc. -func getTagDetail(client *registry.Repository, tag string) (*tagDetail, error) { - detail := &tagDetail{ +func getTagDetail(client *registry.Repository, tag string) (*models.TagDetail, error) { + detail := &models.TagDetail{ Name: tag, } - digest, _, payload, err := client.PullManifest(tag, []string{schema2.MediaTypeManifest}) + digest, mediaType, payload, err := client.PullManifest(tag, []string{schema2.MediaTypeManifest}) if err != nil { return detail, err } detail.Digest = digest - manifest := &schema2.DeserializedManifest{} - if err = manifest.UnmarshalJSON(payload); err != nil { + if strings.Contains(mediaType, "application/json") { + mediaType = schema1.MediaTypeManifest + } + manifest, _, err := registry.UnMarshal(mediaType, payload) + if err != nil { return detail, err } @@ -699,7 +737,21 @@ func getTagDetail(client *registry.Repository, tag string) (*tagDetail, error) { detail.Size += ref.Size } - _, reader, err := client.PullBlob(manifest.Target().Digest.String()) + // if the media type of the manifest isn't v2, doesn't parse image config + // and return directly + // this impacts that some detail information(os, arch, ...) of old images + // cannot be got + if mediaType != schema2.MediaTypeManifest { + log.Debugf("the media type of the manifest is %s, not v2, skip", mediaType) + return detail, nil + } + v2Manifest, ok := manifest.(*schema2.DeserializedManifest) + if !ok { + log.Debug("the manifest cannot be convert to DeserializedManifest, skip") + return detail, nil + } + + _, reader, err := client.PullBlob(v2Manifest.Target().Digest.String()) if err != nil { return detail, err } @@ -718,7 +770,7 @@ func getTagDetail(client *registry.Repository, tag string) (*tagDetail, error) { return detail, nil } -func populateAuthor(detail *tagDetail) { +func populateAuthor(detail *models.TagDetail) { // has author info already if len(detail.Author) > 0 { return @@ -1018,34 +1070,22 @@ func (ra *RepositoryAPI) VulnerabilityDetails() { ra.SendForbiddenError(errors.New(ra.SecurityCtx.GetUsername())) return } - res := []*models.VulnerabilityItem{} - overview, err := dao.GetImgScanOverview(digest) + res, err := scan.VulnListByDigest(digest) if err != nil { - ra.SendInternalServerError(fmt.Errorf("failed to get the scan overview, error: %v", err)) - return - } - if overview != nil && len(overview.DetailsKey) > 0 { - clairClient := clair.NewClient(config.ClairEndpoint(), nil) - log.Debugf("The key for getting details: %s", overview.DetailsKey) - details, err := clairClient.GetResult(overview.DetailsKey) - if err != nil { - ra.SendInternalServerError(fmt.Errorf("Failed to get scan details from Clair, error: %v", err)) - return - } - res = transformVulnerabilities(details) + log.Errorf("Failed to get vulnerability list for image: %s:%s", repository, tag) } ra.Data["json"] = res ra.ServeJSON() } -func getSignatures(username, repository string) (map[string][]notary.Target, error) { +func getSignatures(username, repository string) (map[string][]notarymodel.Target, error) { targets, err := notary.GetInternalTargets(config.InternalNotaryEndpoint(), username, repository) if err != nil { return nil, err } - signatures := map[string][]notary.Target{} + signatures := map[string][]notarymodel.Target{} for _, tgt := range targets { digest, err := notary.DigestFromTarget(tgt) if err != nil { diff --git a/src/core/api/repository_label_test.go b/src/core/api/repository_label_test.go index 02797bd12..3ab7e13f3 100644 --- a/src/core/api/repository_label_test.go +++ b/src/core/api/repository_label_test.go @@ -28,7 +28,7 @@ import ( var ( resourceLabelAPIBasePath = "/api/repositories" - repository = "library/hello-world" + repo = "library/hello-world" tag = "latest" proLibraryLabelID int64 ) @@ -63,7 +63,7 @@ func TestAddToImage(t *testing.T) { { request: &testingRequest{ url: fmt.Sprintf("%s/%s/tags/%s/labels", resourceLabelAPIBasePath, - repository, tag), + repo, tag), method: http.MethodPost, }, code: http.StatusUnauthorized, @@ -72,13 +72,13 @@ func TestAddToImage(t *testing.T) { { request: &testingRequest{ url: fmt.Sprintf("%s/%s/tags/%s/labels", resourceLabelAPIBasePath, - repository, tag), + repo, tag), method: http.MethodPost, credential: projGuest, }, code: http.StatusForbidden, }, - // 404 repository doesn't exist + // 404 repo doesn't exist { request: &testingRequest{ url: fmt.Sprintf("%s/library/non-exist-repo/tags/%s/labels", resourceLabelAPIBasePath, tag), @@ -90,7 +90,7 @@ func TestAddToImage(t *testing.T) { // 404 image doesn't exist { request: &testingRequest{ - url: fmt.Sprintf("%s/%s/tags/non-exist-tag/labels", resourceLabelAPIBasePath, repository), + url: fmt.Sprintf("%s/%s/tags/non-exist-tag/labels", resourceLabelAPIBasePath, repo), method: http.MethodPost, credential: projDeveloper, }, @@ -99,7 +99,7 @@ func TestAddToImage(t *testing.T) { // 400 { request: &testingRequest{ - url: fmt.Sprintf("%s/%s/tags/%s/labels", resourceLabelAPIBasePath, repository, tag), + url: fmt.Sprintf("%s/%s/tags/%s/labels", resourceLabelAPIBasePath, repo, tag), method: http.MethodPost, credential: projDeveloper, }, @@ -109,7 +109,7 @@ func TestAddToImage(t *testing.T) { { request: &testingRequest{ url: fmt.Sprintf("%s/%s/tags/%s/labels", resourceLabelAPIBasePath, - repository, tag), + repo, tag), method: http.MethodPost, credential: projDeveloper, bodyJSON: struct { @@ -124,7 +124,7 @@ func TestAddToImage(t *testing.T) { { request: &testingRequest{ url: fmt.Sprintf("%s/%s/tags/%s/labels", resourceLabelAPIBasePath, - repository, tag), + repo, tag), method: http.MethodPost, credential: projDeveloper, bodyJSON: struct { @@ -139,7 +139,7 @@ func TestAddToImage(t *testing.T) { { request: &testingRequest{ url: fmt.Sprintf("%s/%s/tags/%s/labels", resourceLabelAPIBasePath, - repository, tag), + repo, tag), method: http.MethodPost, credential: projDeveloper, bodyJSON: struct { @@ -154,7 +154,7 @@ func TestAddToImage(t *testing.T) { { request: &testingRequest{ url: fmt.Sprintf("%s/%s/tags/%s/labels", resourceLabelAPIBasePath, - repository, tag), + repo, tag), method: http.MethodPost, credential: projDeveloper, bodyJSON: struct { @@ -172,7 +172,7 @@ func TestAddToImage(t *testing.T) { func TestGetOfImage(t *testing.T) { labels := []*models.Label{} err := handleAndParse(&testingRequest{ - url: fmt.Sprintf("%s/%s/tags/%s/labels", resourceLabelAPIBasePath, repository, tag), + url: fmt.Sprintf("%s/%s/tags/%s/labels", resourceLabelAPIBasePath, repo, tag), method: http.MethodGet, credential: projDeveloper, }, &labels) @@ -185,7 +185,7 @@ func TestRemoveFromImage(t *testing.T) { runCodeCheckingCases(t, &codeCheckingCase{ request: &testingRequest{ url: fmt.Sprintf("%s/%s/tags/%s/labels/%d", resourceLabelAPIBasePath, - repository, tag, proLibraryLabelID), + repo, tag, proLibraryLabelID), method: http.MethodDelete, credential: projDeveloper, }, @@ -195,7 +195,7 @@ func TestRemoveFromImage(t *testing.T) { labels := []*models.Label{} err := handleAndParse(&testingRequest{ url: fmt.Sprintf("%s/%s/tags/%s/labels", resourceLabelAPIBasePath, - repository, tag), + repo, tag), method: http.MethodGet, credential: projDeveloper, }, &labels) @@ -206,7 +206,7 @@ func TestRemoveFromImage(t *testing.T) { func TestAddToRepository(t *testing.T) { runCodeCheckingCases(t, &codeCheckingCase{ request: &testingRequest{ - url: fmt.Sprintf("%s/%s/labels", resourceLabelAPIBasePath, repository), + url: fmt.Sprintf("%s/%s/labels", resourceLabelAPIBasePath, repo), method: http.MethodPost, bodyJSON: struct { ID int64 @@ -222,7 +222,7 @@ func TestAddToRepository(t *testing.T) { func TestGetOfRepository(t *testing.T) { labels := []*models.Label{} err := handleAndParse(&testingRequest{ - url: fmt.Sprintf("%s/%s/labels", resourceLabelAPIBasePath, repository), + url: fmt.Sprintf("%s/%s/labels", resourceLabelAPIBasePath, repo), method: http.MethodGet, credential: projDeveloper, }, &labels) @@ -235,7 +235,7 @@ func TestRemoveFromRepository(t *testing.T) { runCodeCheckingCases(t, &codeCheckingCase{ request: &testingRequest{ url: fmt.Sprintf("%s/%s/labels/%d", resourceLabelAPIBasePath, - repository, proLibraryLabelID), + repo, proLibraryLabelID), method: http.MethodDelete, credential: projDeveloper, }, @@ -244,7 +244,7 @@ func TestRemoveFromRepository(t *testing.T) { labels := []*models.Label{} err := handleAndParse(&testingRequest{ - url: fmt.Sprintf("%s/%s/labels", resourceLabelAPIBasePath, repository), + url: fmt.Sprintf("%s/%s/labels", resourceLabelAPIBasePath, repo), method: http.MethodGet, credential: projDeveloper, }, &labels) diff --git a/src/core/api/repository_test.go b/src/core/api/repository_test.go index 34649d245..7aa17a0b2 100644 --- a/src/core/api/repository_test.go +++ b/src/core/api/repository_test.go @@ -21,7 +21,7 @@ import ( "github.com/goharbor/harbor/src/common/dao" "github.com/goharbor/harbor/src/common/dao/project" "github.com/goharbor/harbor/src/common/models" - "github.com/goharbor/harbor/tests/apitests/apilib" + "github.com/goharbor/harbor/src/testing/apitests/apilib" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -96,7 +96,7 @@ func TestGetReposTags(t *testing.T) { t.Errorf("failed to get tags of repository %s: %v", repository, err) } else { assert.Equal(int(200), code, "httpStatusCode should be 200") - if tg, ok := tags.([]tagResp); ok { + if tg, ok := tags.([]models.TagResp); ok { assert.Equal(1, len(tg), fmt.Sprintf("there should be only one tag, but now %v", tg)) assert.Equal(tg[0].Name, "latest", "the tag should be latest") } else { @@ -207,19 +207,19 @@ func TestGetReposTop(t *testing.T) { func TestPopulateAuthor(t *testing.T) { author := "author" - detail := &tagDetail{ + detail := &models.TagDetail{ Author: author, } populateAuthor(detail) assert.Equal(t, author, detail.Author) - detail = &tagDetail{} + detail = &models.TagDetail{} populateAuthor(detail) assert.Equal(t, "", detail.Author) maintainer := "maintainer" - detail = &tagDetail{ - Config: &cfg{ + detail = &models.TagDetail{ + Config: &models.TagCfg{ Labels: map[string]string{ "Maintainer": maintainer, }, diff --git a/src/core/api/retention.go b/src/core/api/retention.go new file mode 100644 index 000000000..07bc013b2 --- /dev/null +++ b/src/core/api/retention.go @@ -0,0 +1,437 @@ +package api + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "strconv" + + "github.com/goharbor/harbor/src/common/rbac" + "github.com/goharbor/harbor/src/core/filter" + "github.com/goharbor/harbor/src/core/promgr" + "github.com/goharbor/harbor/src/pkg/retention" + "github.com/goharbor/harbor/src/pkg/retention/policy" + "github.com/goharbor/harbor/src/pkg/retention/q" +) + +// RetentionAPI ... +type RetentionAPI struct { + BaseController + pm promgr.ProjectManager +} + +// Prepare validates the user +func (r *RetentionAPI) Prepare() { + r.BaseController.Prepare() + if !r.SecurityCtx.IsAuthenticated() { + r.SendUnAuthorizedError(errors.New("UnAuthorized")) + return + } + pm, e := filter.GetProjectManager(r.Ctx.Request) + if e != nil { + r.SendInternalServerError(e) + return + } + r.pm = pm + +} + +// GetMetadatas Get Metadatas +func (r *RetentionAPI) GetMetadatas() { + data := ` +{ + "templates": [ + { + "rule_template": "latestPushedK", + "display_text": "the most recently pushed # images", + "action": "retain", + "params": [ + { + "type": "int", + "unit": "COUNT", + "required": true + } + ] + }, + { + "rule_template": "latestPulledN", + "display_text": "the most recently pulled # images", + "action": "retain", + "params": [ + { + "type": "int", + "unit": "COUNT", + "required": true + } + ] + }, + { + "rule_template": "nDaysSinceLastPull", + "display_text": "pulled within the last # days", + "action": "retain", + "params": [ + { + "type": "int", + "unit": "DAYS", + "required": true + } + ] + }, + { + "rule_template": "nDaysSinceLastPush", + "display_text": "pushed within the last # days", + "action": "retain", + "params": [ + { + "type": "int", + "unit": "DAYS", + "required": true + } + ] + }, + { + "rule_template": "nothing", + "display_text": "none", + "action": "retain", + "params": [] + }, + { + "rule_template": "always", + "display_text": "always", + "action": "retain", + "params": [] + } + ], + "scope_selectors": [ + { + "display_text": "Repositories", + "kind": "doublestar", + "decorations": [ + "repoMatches", + "repoExcludes" + ] + } + ], + "tag_selectors": [ + { + "display_text": "Labels", + "kind": "label", + "decorations": [ + "withLabels", + "withoutLabels" + ] + }, + { + "display_text": "Tags", + "kind": "doublestar", + "decorations": [ + "matches", + "excludes" + ] + } + ] +} +` + w := r.Ctx.ResponseWriter + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.WriteHeader(http.StatusOK) + w.Write([]byte(data)) +} + +// GetRetention Get Retention +func (r *RetentionAPI) GetRetention() { + id, err := r.GetIDFromURL() + if err != nil { + r.SendBadRequestError(err) + return + } + p, err := retentionController.GetRetention(id) + if err != nil { + r.SendBadRequestError(err) + return + } + if !r.requireAccess(p, rbac.ActionRead) { + return + } + r.WriteJSONData(p) +} + +// CreateRetention Create Retention +func (r *RetentionAPI) CreateRetention() { + p := &policy.Metadata{} + isValid, err := r.DecodeJSONReqAndValidate(p) + if !isValid { + r.SendBadRequestError(err) + return + } + if err = r.checkRuleConflict(p); err != nil { + r.SendConflictError(err) + return + } + if !r.requireAccess(p, rbac.ActionCreate) { + return + } + switch p.Scope.Level { + case policy.ScopeLevelProject: + if p.Scope.Reference <= 0 { + r.SendBadRequestError(fmt.Errorf("invalid Project id %d", p.Scope.Reference)) + return + } + + proj, err := r.pm.Get(p.Scope.Reference) + if err != nil { + r.SendBadRequestError(err) + } + if proj == nil { + r.SendBadRequestError(fmt.Errorf("invalid Project id %d", p.Scope.Reference)) + } + default: + r.SendBadRequestError(fmt.Errorf("scope %s is not support", p.Scope.Level)) + return + } + id, err := retentionController.CreateRetention(p) + if err != nil { + r.SendInternalServerError(err) + return + } + if err := r.pm.GetMetadataManager().Add(p.Scope.Reference, + map[string]string{"retention_id": strconv.FormatInt(id, 10)}); err != nil { + r.SendInternalServerError(err) + } + r.Redirect(http.StatusCreated, strconv.FormatInt(id, 10)) +} + +// UpdateRetention Update Retention +func (r *RetentionAPI) UpdateRetention() { + id, err := r.GetIDFromURL() + if err != nil { + r.SendBadRequestError(err) + return + } + p := &policy.Metadata{} + isValid, err := r.DecodeJSONReqAndValidate(p) + if !isValid { + r.SendBadRequestError(err) + return + } + p.ID = id + if err = r.checkRuleConflict(p); err != nil { + r.SendConflictError(err) + return + } + if !r.requireAccess(p, rbac.ActionUpdate) { + return + } + if err = retentionController.UpdateRetention(p); err != nil { + r.SendInternalServerError(err) + return + } +} + +func (r *RetentionAPI) checkRuleConflict(p *policy.Metadata) error { + temp := make(map[string]int) + for n, rule := range p.Rules { + tid := rule.ID + rule.ID = 0 + bs, _ := json.Marshal(rule) + if old, exists := temp[string(bs)]; exists { + return fmt.Errorf("rule %d is conflict with rule %d", n, old) + } + temp[string(bs)] = n + rule.ID = tid + } + return nil +} + +// TriggerRetentionExec Trigger Retention Execution +func (r *RetentionAPI) TriggerRetentionExec() { + id, err := r.GetIDFromURL() + if err != nil { + r.SendBadRequestError(err) + return + } + d := &struct { + DryRun bool `json:"dry_run"` + }{ + DryRun: false, + } + isValid, err := r.DecodeJSONReqAndValidate(d) + if !isValid { + r.SendBadRequestError(err) + return + } + p, err := retentionController.GetRetention(id) + if err != nil { + r.SendBadRequestError(err) + return + } + if !r.requireAccess(p, rbac.ActionUpdate) { + return + } + eid, err := retentionController.TriggerRetentionExec(id, retention.ExecutionTriggerManual, d.DryRun) + if err != nil { + r.SendInternalServerError(err) + return + } + r.Redirect(http.StatusCreated, strconv.FormatInt(eid, 10)) +} + +// OperateRetentionExec Operate Retention Execution +func (r *RetentionAPI) OperateRetentionExec() { + id, err := r.GetIDFromURL() + if err != nil { + r.SendBadRequestError(err) + return + } + eid, err := r.GetInt64FromPath(":eid") + if err != nil { + r.SendBadRequestError(err) + return + } + a := &struct { + Action string `json:"action" valid:"Required"` + }{} + isValid, err := r.DecodeJSONReqAndValidate(a) + if !isValid { + r.SendBadRequestError(err) + return + } + p, err := retentionController.GetRetention(id) + if err != nil { + r.SendBadRequestError(err) + return + } + if !r.requireAccess(p, rbac.ActionUpdate) { + return + } + if err = retentionController.OperateRetentionExec(eid, a.Action); err != nil { + r.SendInternalServerError(err) + return + } +} + +// ListRetentionExecs List Retention Execution +func (r *RetentionAPI) ListRetentionExecs() { + id, err := r.GetIDFromURL() + if err != nil { + r.SendBadRequestError(err) + return + } + page, size, err := r.GetPaginationParams() + if err != nil { + r.SendInternalServerError(err) + return + } + query := &q.Query{ + PageNumber: page, + PageSize: size, + } + p, err := retentionController.GetRetention(id) + if err != nil { + r.SendBadRequestError(err) + return + } + if !r.requireAccess(p, rbac.ActionList) { + return + } + execs, err := retentionController.ListRetentionExecs(id, query) + if err != nil { + r.SendInternalServerError(err) + return + } + total, err := retentionController.GetTotalOfRetentionExecs(id) + if err != nil { + r.SendInternalServerError(err) + return + } + r.SetPaginationHeader(total, query.PageNumber, query.PageSize) + r.WriteJSONData(execs) +} + +// ListRetentionExecTasks List Retention Execution Tasks +func (r *RetentionAPI) ListRetentionExecTasks() { + id, err := r.GetIDFromURL() + if err != nil { + r.SendBadRequestError(err) + return + } + eid, err := r.GetInt64FromPath(":eid") + if err != nil { + r.SendBadRequestError(err) + return + } + page, size, err := r.GetPaginationParams() + if err != nil { + r.SendInternalServerError(err) + return + } + query := &q.Query{ + PageNumber: page, + PageSize: size, + } + p, err := retentionController.GetRetention(id) + if err != nil { + r.SendBadRequestError(err) + return + } + if !r.requireAccess(p, rbac.ActionList) { + return + } + his, err := retentionController.ListRetentionExecTasks(eid, query) + if err != nil { + r.SendInternalServerError(err) + return + } + total, err := retentionController.GetTotalOfRetentionExecTasks(eid) + if err != nil { + r.SendInternalServerError(err) + return + } + r.SetPaginationHeader(total, query.PageNumber, query.PageSize) + r.WriteJSONData(his) +} + +// GetRetentionExecTaskLog Get Retention Execution Task log +func (r *RetentionAPI) GetRetentionExecTaskLog() { + tid, err := r.GetInt64FromPath(":tid") + if err != nil { + r.SendBadRequestError(err) + return + } + log, err := retentionController.GetRetentionExecTaskLog(tid) + if err != nil { + r.SendInternalServerError(err) + return + } + w := r.Ctx.ResponseWriter + w.Header().Set("Content-Type", "text/plain") + w.WriteHeader(http.StatusOK) + w.Write(log) +} + +func (r *RetentionAPI) requireAccess(p *policy.Metadata, action rbac.Action, subresources ...rbac.Resource) bool { + var hasPermission bool + + switch p.Scope.Level { + case "project": + if len(subresources) == 0 { + subresources = append(subresources, rbac.ResourceTagRetention) + } + resource := rbac.NewProjectNamespace(p.Scope.Reference).Resource(subresources...) + hasPermission = r.SecurityCtx.Can(action, resource) + default: + hasPermission = r.SecurityCtx.IsSysAdmin() + } + + if !hasPermission { + if !r.SecurityCtx.IsAuthenticated() { + r.SendUnAuthorizedError(errors.New("UnAuthorized")) + } else { + r.SendForbiddenError(errors.New(r.SecurityCtx.GetUsername())) + } + return false + } + + return true +} diff --git a/src/core/api/retention_test.go b/src/core/api/retention_test.go new file mode 100644 index 000000000..0fe47f628 --- /dev/null +++ b/src/core/api/retention_test.go @@ -0,0 +1,456 @@ +package api + +import ( + "encoding/json" + "fmt" + "github.com/goharbor/harbor/src/pkg/retention/dao" + "github.com/goharbor/harbor/src/pkg/retention/dao/models" + "github.com/goharbor/harbor/src/pkg/retention/policy" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/stretchr/testify/require" + "net/http" + "testing" + "time" +) + +func TestGetMetadatas(t *testing.T) { + cases := []*codeCheckingCase{ + // 401 + { + request: &testingRequest{ + method: http.MethodGet, + url: "/api/retentions/metadatas", + credential: sysAdmin, + }, + code: http.StatusOK, + }, + } + + runCodeCheckingCases(t, cases...) +} + +func TestCreatePolicy(t *testing.T) { + p1 := &policy.Metadata{ + Algorithm: "or", + Rules: []rule.Metadata{ + { + ID: 1, + Priority: 1, + Template: "recentXdays", + Parameters: rule.Parameters{ + "num": 10, + }, + TagSelectors: []*rule.Selector{ + { + Kind: "label", + Decoration: "with", + Pattern: "latest", + }, + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: "release-[\\d\\.]+", + }, + }, + ScopeSelectors: map[string][]*rule.Selector{ + "repository": { + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: ".+", + }, + }, + }, + }, + }, + Trigger: &policy.Trigger{ + Kind: "Schedule", + Settings: map[string]interface{}{ + "cron": "* 22 11 * * *", + }, + }, + Scope: &policy.Scope{ + Level: "project", + Reference: 1, + }, + } + + cases := []*codeCheckingCase{ + // 401 + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/retentions", + }, + code: http.StatusUnauthorized, + }, + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/retentions", + bodyJSON: p1, + credential: sysAdmin, + }, + code: http.StatusOK, + }, + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/retentions", + bodyJSON: &policy.Metadata{ + Algorithm: "NODEF", + Rules: []rule.Metadata{ + { + ID: 1, + Priority: 1, + Template: "recentXdays", + Parameters: rule.Parameters{ + "num": 10, + }, + TagSelectors: []*rule.Selector{ + { + Kind: "label", + Decoration: "with", + Pattern: "latest", + }, + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: "release-[\\d\\.]+", + }, + }, + ScopeSelectors: map[string][]*rule.Selector{ + "repository": { + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: ".+", + }, + }, + }, + }, + }, + Trigger: &policy.Trigger{ + Kind: "Schedule", + Settings: map[string]interface{}{}, + }, + Scope: &policy.Scope{ + Level: "project", + Reference: 1, + }, + }, + credential: sysAdmin, + }, + code: http.StatusBadRequest, + }, + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/retentions", + bodyJSON: &policy.Metadata{ + Algorithm: "or", + Rules: []rule.Metadata{ + { + ID: 1, + Priority: 1, + Template: "recentXdays", + Parameters: rule.Parameters{ + "num": 10, + }, + TagSelectors: []*rule.Selector{ + { + Kind: "label", + Decoration: "with", + Pattern: "latest", + }, + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: "release-[\\d\\.]+", + }, + }, + ScopeSelectors: map[string][]*rule.Selector{ + "repository": { + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: ".+", + }, + }, + }, + }, + { + ID: 2, + Priority: 1, + Template: "recentXdays", + Parameters: rule.Parameters{ + "num": 10, + }, + TagSelectors: []*rule.Selector{ + { + Kind: "label", + Decoration: "with", + Pattern: "latest", + }, + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: "release-[\\d\\.]+", + }, + }, + ScopeSelectors: map[string][]*rule.Selector{ + "repository": { + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: ".+", + }, + }, + }, + }, + }, + Trigger: &policy.Trigger{ + Kind: "Schedule", + Settings: map[string]interface{}{ + "cron": "* 22 11 * * *", + }, + }, + Scope: &policy.Scope{ + Level: "project", + Reference: 1, + }, + }, + credential: sysAdmin, + }, + code: http.StatusConflict, + }, + } + + runCodeCheckingCases(t, cases...) +} + +func TestPolicy(t *testing.T) { + p := &policy.Metadata{ + Algorithm: "or", + Rules: []rule.Metadata{ + { + ID: 1, + Priority: 1, + Template: "recentXdays", + Parameters: rule.Parameters{ + "num": 10, + }, + TagSelectors: []*rule.Selector{ + { + Kind: "label", + Decoration: "with", + Pattern: "latest", + }, + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: "release-[\\d\\.]+", + }, + }, + ScopeSelectors: map[string][]*rule.Selector{ + "repository": { + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: ".+", + }, + }, + }, + }, + }, + Trigger: &policy.Trigger{ + Kind: "Schedule", + Settings: map[string]interface{}{ + "cron": "* 22 11 * * *", + }, + }, + Scope: &policy.Scope{ + Level: "project", + Reference: 1, + }, + } + p1 := &models.RetentionPolicy{ + ScopeLevel: p.Scope.Level, + TriggerKind: p.Trigger.Kind, + CreateTime: time.Now(), + UpdateTime: time.Now(), + } + data, _ := json.Marshal(p) + p1.Data = string(data) + + id, err := dao.CreatePolicy(p1) + require.Nil(t, err) + require.True(t, id > 0) + + cases := []*codeCheckingCase{ + { + request: &testingRequest{ + method: http.MethodGet, + url: fmt.Sprintf("/api/retentions/%d", id), + credential: sysAdmin, + }, + code: http.StatusOK, + }, + { + request: &testingRequest{ + method: http.MethodPut, + url: fmt.Sprintf("/api/retentions/%d", id), + bodyJSON: &policy.Metadata{ + Algorithm: "or", + Rules: []rule.Metadata{ + { + ID: 1, + Priority: 1, + Template: "recentXdays", + Parameters: rule.Parameters{ + "num": 10, + }, + TagSelectors: []*rule.Selector{ + { + Kind: "label", + Decoration: "with", + Pattern: "latest", + }, + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: "release-[\\d\\.]+", + }, + }, + ScopeSelectors: map[string][]*rule.Selector{ + "repository": { + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: "b.+", + }, + }, + }, + }, + }, + Trigger: &policy.Trigger{ + Kind: "Schedule", + Settings: map[string]interface{}{ + "cron": "* 22 11 * * *", + }, + }, + Scope: &policy.Scope{ + Level: "project", + Reference: 1, + }, + }, + credential: sysAdmin, + }, + code: http.StatusOK, + }, + { + request: &testingRequest{ + method: http.MethodPut, + url: fmt.Sprintf("/api/retentions/%d", id), + bodyJSON: &policy.Metadata{ + Algorithm: "or", + Rules: []rule.Metadata{ + { + ID: 1, + Priority: 1, + Template: "recentXdays", + Parameters: rule.Parameters{ + "num": 10, + }, + TagSelectors: []*rule.Selector{ + { + Kind: "label", + Decoration: "with", + Pattern: "latest", + }, + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: "release-[\\d\\.]+", + }, + }, + ScopeSelectors: map[string][]*rule.Selector{ + "repository": { + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: "b.+", + }, + }, + }, + }, + { + ID: 2, + Priority: 1, + Template: "recentXdays", + Parameters: rule.Parameters{ + "num": 10, + }, + TagSelectors: []*rule.Selector{ + { + Kind: "label", + Decoration: "with", + Pattern: "latest", + }, + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: "release-[\\d\\.]+", + }, + }, + ScopeSelectors: map[string][]*rule.Selector{ + "repository": { + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: "b.+", + }, + }, + }, + }, + }, + Trigger: &policy.Trigger{ + Kind: "Schedule", + Settings: map[string]interface{}{ + "cron": "* 22 11 * * *", + }, + }, + Scope: &policy.Scope{ + Level: "project", + Reference: 1, + }, + }, + credential: sysAdmin, + }, + code: http.StatusConflict, + }, + { + request: &testingRequest{ + method: http.MethodPost, + url: fmt.Sprintf("/api/retentions/%d/executions", id), + bodyJSON: &struct { + DryRun bool `json:"dry_run"` + }{ + DryRun: false, + }, + credential: sysAdmin, + }, + code: http.StatusOK, + }, + { + request: &testingRequest{ + method: http.MethodGet, + url: fmt.Sprintf("/api/retentions/%d/executions", id), + credential: sysAdmin, + }, + code: http.StatusOK, + }, + } + + runCodeCheckingCases(t, cases...) +} diff --git a/src/core/api/robot.go b/src/core/api/robot.go index cc1b880e3..be49983a4 100644 --- a/src/core/api/robot.go +++ b/src/core/api/robot.go @@ -158,7 +158,6 @@ func (r *RobotAPI) Post() { } robotRep := models.RobotRep{ - ID: id, Name: robot.Name, Token: rawTk, } diff --git a/src/core/api/scan_all_test.go b/src/core/api/scan_all_test.go index 4378fb900..347b39182 100644 --- a/src/core/api/scan_all_test.go +++ b/src/core/api/scan_all_test.go @@ -3,7 +3,7 @@ package api import ( "testing" - "github.com/goharbor/harbor/tests/apitests/apilib" + "github.com/goharbor/harbor/src/testing/apitests/apilib" "github.com/stretchr/testify/assert" ) diff --git a/src/core/api/statistic_test.go b/src/core/api/statistic_test.go index 4ef968575..5c9be4229 100644 --- a/src/core/api/statistic_test.go +++ b/src/core/api/statistic_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - // "github.com/goharbor/harbor/tests/apitests/apilib" + // "github.com/goharbor/harbor/src/testing/apitests/apilib" ) func TestStatisticGet(t *testing.T) { diff --git a/src/core/api/sys_cve_whitelist.go b/src/core/api/sys_cve_whitelist.go new file mode 100644 index 000000000..921abb83f --- /dev/null +++ b/src/core/api/sys_cve_whitelist.go @@ -0,0 +1,81 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "errors" + "fmt" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/pkg/scan/whitelist" + "net/http" +) + +// SysCVEWhitelistAPI Handles the requests to manage system level CVE whitelist +type SysCVEWhitelistAPI struct { + BaseController + manager whitelist.Manager +} + +// Prepare validates the request initially +func (sca *SysCVEWhitelistAPI) Prepare() { + sca.BaseController.Prepare() + if !sca.SecurityCtx.IsAuthenticated() { + sca.SendUnAuthorizedError(errors.New("Unauthorized")) + return + } + if !sca.SecurityCtx.IsSysAdmin() && sca.Ctx.Request.Method != http.MethodGet { + msg := fmt.Sprintf("only system admin has permission issue %s request to this API", sca.Ctx.Request.Method) + log.Errorf(msg) + sca.SendForbiddenError(errors.New(msg)) + return + } + sca.manager = whitelist.NewDefaultManager() +} + +// Get handles the GET request to retrieve the system level CVE whitelist +func (sca *SysCVEWhitelistAPI) Get() { + l, err := sca.manager.GetSys() + if err != nil { + sca.SendInternalServerError(err) + return + } + sca.WriteJSONData(l) +} + +// Put handles the PUT request to update the system level CVE whitelist +func (sca *SysCVEWhitelistAPI) Put() { + var l models.CVEWhitelist + if err := sca.DecodeJSONReq(&l); err != nil { + log.Errorf("Failed to decode JSON array from request") + sca.SendBadRequestError(err) + return + } + if l.ProjectID != 0 { + msg := fmt.Sprintf("Non-zero project ID for system CVE whitelist: %d.", l.ProjectID) + log.Error(msg) + sca.SendBadRequestError(errors.New(msg)) + return + } + if err := sca.manager.SetSys(l); err != nil { + if whitelist.IsInvalidErr(err) { + log.Errorf("Invalid CVE whitelist: %v", err) + sca.SendBadRequestError(err) + return + } + sca.SendInternalServerError(err) + return + } +} diff --git a/src/core/api/sys_cve_whitelist_test.go b/src/core/api/sys_cve_whitelist_test.go new file mode 100644 index 000000000..d484b79f2 --- /dev/null +++ b/src/core/api/sys_cve_whitelist_test.go @@ -0,0 +1,126 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package api + +import ( + "github.com/goharbor/harbor/src/common/models" + "net/http" + "testing" +) + +func TestSysCVEWhitelistAPIGet(t *testing.T) { + url := "/api/system/CVEWhitelist" + cases := []*codeCheckingCase{ + // 401 + { + request: &testingRequest{ + method: http.MethodGet, + url: url, + }, + code: http.StatusUnauthorized, + }, + // 200 + { + request: &testingRequest{ + method: http.MethodGet, + url: url, + credential: nonSysAdmin, + }, + code: http.StatusOK, + }, + } + runCodeCheckingCases(t, cases...) +} + +func TestSysCVEWhitelistAPIPut(t *testing.T) { + url := "/api/system/CVEWhitelist" + s := int64(1573254000) + cases := []*codeCheckingCase{ + // 401 + { + request: &testingRequest{ + method: http.MethodPut, + url: url, + }, + code: http.StatusUnauthorized, + }, + // 403 + { + request: &testingRequest{ + method: http.MethodPut, + url: url, + credential: nonSysAdmin, + }, + code: http.StatusForbidden, + }, + // 400 + { + request: &testingRequest{ + method: http.MethodPut, + url: url, + bodyJSON: []string{"CVE-1234-1234"}, + credential: sysAdmin, + }, + code: http.StatusBadRequest, + }, + // 400 + { + request: &testingRequest{ + method: http.MethodPut, + url: url, + bodyJSON: models.CVEWhitelist{ + ExpiresAt: &s, + Items: []models.CVEWhitelistItem{ + {CVEID: "CVE-2019-12310"}, + }, + ProjectID: 2, + }, + credential: sysAdmin, + }, + code: http.StatusBadRequest, + }, + // 400 + { + request: &testingRequest{ + method: http.MethodPut, + url: url, + bodyJSON: models.CVEWhitelist{ + ExpiresAt: &s, + Items: []models.CVEWhitelistItem{ + {CVEID: "CVE-2019-12310"}, + {CVEID: "CVE-2019-12310"}, + }, + }, + credential: sysAdmin, + }, + code: http.StatusBadRequest, + }, + // 200 + { + request: &testingRequest{ + method: http.MethodPut, + url: url, + bodyJSON: models.CVEWhitelist{ + ExpiresAt: &s, + Items: []models.CVEWhitelistItem{ + {CVEID: "CVE-2019-12310"}, + }, + }, + credential: sysAdmin, + }, + code: http.StatusOK, + }, + } + runCodeCheckingCases(t, cases...) +} diff --git a/src/core/api/systeminfo.go b/src/core/api/systeminfo.go index 140a688df..a0929d545 100644 --- a/src/core/api/systeminfo.go +++ b/src/core/api/systeminfo.go @@ -16,13 +16,13 @@ package api import ( "errors" + "fmt" "io/ioutil" "net/http" "os" "strings" "sync" - "fmt" "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common/dao" clairdao "github.com/goharbor/harbor/src/common/dao/clair" @@ -106,6 +106,7 @@ type GeneralInfo struct { RegistryStorageProviderName string `json:"registry_storage_provider_name"` ReadOnly bool `json:"read_only"` WithChartMuseum bool `json:"with_chartmuseum"` + NotificationEnable bool `json:"notification_enable"` } // GetVolumeInfo gets specific volume storage info. @@ -188,6 +189,7 @@ func (sia *SystemInfoAPI) GetGeneralInfo() { RegistryStorageProviderName: utils.SafeCastString(cfg[common.RegistryStorageProviderName]), ReadOnly: config.ReadOnly(), WithChartMuseum: config.WithChartMuseum(), + NotificationEnable: utils.SafeCastBool(cfg[common.NotificationEnable]), } if info.WithClair { info.ClairVulnStatus = getClairVulnStatus() diff --git a/src/core/api/user_test.go b/src/core/api/user_test.go index 75d324322..0c2bbc519 100644 --- a/src/core/api/user_test.go +++ b/src/core/api/user_test.go @@ -23,7 +23,7 @@ import ( "github.com/goharbor/harbor/src/common/api" "github.com/goharbor/harbor/src/common/models" - "github.com/goharbor/harbor/tests/apitests/apilib" + "github.com/goharbor/harbor/src/testing/apitests/apilib" "github.com/stretchr/testify/assert" "github.com/astaxie/beego" diff --git a/src/core/api/usergroup.go b/src/core/api/usergroup.go index 317ab4362..3bfd2d34e 100644 --- a/src/core/api/usergroup.go +++ b/src/core/api/usergroup.go @@ -27,12 +27,14 @@ import ( "github.com/goharbor/harbor/src/common/utils/ldap" "github.com/goharbor/harbor/src/common/utils/log" "github.com/goharbor/harbor/src/core/auth" + "github.com/goharbor/harbor/src/core/config" ) // UserGroupAPI ... type UserGroupAPI struct { BaseController - id int + id int + groupType int } const ( @@ -61,6 +63,15 @@ func (uga *UserGroupAPI) Prepare() { uga.SendForbiddenError(errors.New(uga.SecurityCtx.GetUsername())) return } + authMode, err := config.AuthMode() + if err != nil { + uga.SendInternalServerError(errors.New("failed to get authentication mode")) + } + if authMode == common.LDAPAuth { + uga.groupType = common.LDAPGroupType + } else if authMode == common.HTTPAuth { + uga.groupType = common.HTTPGroupType + } } // Get ... @@ -69,7 +80,7 @@ func (uga *UserGroupAPI) Get() { uga.Data["json"] = make([]models.UserGroup, 0) if ID == 0 { // user group id not set, return all user group - query := models.UserGroup{GroupType: common.LdapGroupType} // Current query LDAP group only + query := models.UserGroup{GroupType: uga.groupType} userGroupList, err := group.QueryUserGroup(query) if err != nil { uga.SendInternalServerError(fmt.Errorf("failed to query database for user group list, error: %v", err)) @@ -103,41 +114,50 @@ func (uga *UserGroupAPI) Post() { } userGroup.ID = 0 - userGroup.GroupType = common.LdapGroupType + if userGroup.GroupType == 0 { + userGroup.GroupType = uga.groupType + } userGroup.LdapGroupDN = strings.TrimSpace(userGroup.LdapGroupDN) userGroup.GroupName = strings.TrimSpace(userGroup.GroupName) if len(userGroup.GroupName) == 0 { uga.SendBadRequestError(errors.New(userNameEmptyMsg)) return } - query := models.UserGroup{GroupType: userGroup.GroupType, LdapGroupDN: userGroup.LdapGroupDN} - result, err := group.QueryUserGroup(query) - if err != nil { - uga.SendInternalServerError(fmt.Errorf("error occurred in add user group, error: %v", err)) - return - } - if len(result) > 0 { - uga.SendConflictError(errors.New("error occurred in add user group, duplicate user group exist")) - return - } - // User can not add ldap group when the ldap server is offline - ldapGroup, err := auth.SearchGroup(userGroup.LdapGroupDN) - if err == ldap.ErrNotFound || ldapGroup == nil { - uga.SendBadRequestError(fmt.Errorf("LDAP Group DN is not found: DN:%v", userGroup.LdapGroupDN)) - return - } - if err == ldap.ErrDNSyntax { - uga.SendBadRequestError(fmt.Errorf("invalid DN syntax. DN: %v", userGroup.LdapGroupDN)) - return - } - if err != nil { - uga.SendInternalServerError(fmt.Errorf("Error occurred in search user group. error: %v", err)) - return + + if userGroup.GroupType == common.LDAPGroupType { + query := models.UserGroup{GroupType: userGroup.GroupType, LdapGroupDN: userGroup.LdapGroupDN} + result, err := group.QueryUserGroup(query) + if err != nil { + uga.SendInternalServerError(fmt.Errorf("error occurred in add user group, error: %v", err)) + return + } + if len(result) > 0 { + uga.SendConflictError(errors.New("error occurred in add user group, duplicate user group exist")) + return + } + // User can not add ldap group when the ldap server is offline + ldapGroup, err := auth.SearchGroup(userGroup.LdapGroupDN) + if err == ldap.ErrNotFound || ldapGroup == nil { + uga.SendBadRequestError(fmt.Errorf("LDAP Group DN is not found: DN:%v", userGroup.LdapGroupDN)) + return + } + if err == ldap.ErrDNSyntax { + uga.SendBadRequestError(fmt.Errorf("invalid DN syntax. DN: %v", userGroup.LdapGroupDN)) + return + } + if err != nil { + uga.SendInternalServerError(fmt.Errorf("error occurred in search user group. error: %v", err)) + return + } } groupID, err := group.AddUserGroup(userGroup) if err != nil { - uga.SendInternalServerError(fmt.Errorf("Error occurred in add user group, error: %v", err)) + if err == group.ErrGroupNameDup { + uga.SendConflictError(fmt.Errorf("duplicated user group name %s", userGroup.GroupName)) + return + } + uga.SendInternalServerError(fmt.Errorf("error occurred in add user group, error: %v", err)) return } uga.Redirect(http.StatusCreated, strconv.FormatInt(int64(groupID), 10)) @@ -150,13 +170,17 @@ func (uga *UserGroupAPI) Put() { uga.SendBadRequestError(err) return } + if userGroup.GroupType == common.HTTPGroupType { + uga.SendBadRequestError(errors.New("HTTP group is not allowed to update")) + return + } ID := uga.id userGroup.GroupName = strings.TrimSpace(userGroup.GroupName) if len(userGroup.GroupName) == 0 { uga.SendBadRequestError(errors.New(userNameEmptyMsg)) return } - userGroup.GroupType = common.LdapGroupType + userGroup.GroupType = common.LDAPGroupType log.Debugf("Updated user group %v", userGroup) err := group.UpdateUserGroupName(ID, userGroup.GroupName) if err != nil { diff --git a/src/core/api/usergroup_test.go b/src/core/api/usergroup_test.go index ebeeefb4d..dad91080e 100644 --- a/src/core/api/usergroup_test.go +++ b/src/core/api/usergroup_test.go @@ -35,7 +35,7 @@ func TestUserGroupAPI_GetAndDelete(t *testing.T) { groupID, err := group.AddUserGroup(models.UserGroup{ GroupName: "harbor_users", LdapGroupDN: "cn=harbor_users,ou=groups,dc=example,dc=com", - GroupType: common.LdapGroupType, + GroupType: common.LDAPGroupType, }) if err != nil { @@ -88,7 +88,7 @@ func TestUserGroupAPI_Post(t *testing.T) { groupID, err := group.AddUserGroup(models.UserGroup{ GroupName: "harbor_group", LdapGroupDN: "cn=harbor_group,ou=groups,dc=example,dc=com", - GroupType: common.LdapGroupType, + GroupType: common.LDAPGroupType, }) if err != nil { t.Errorf("Error occurred when AddUserGroup: %v", err) @@ -104,7 +104,32 @@ func TestUserGroupAPI_Post(t *testing.T) { bodyJSON: &models.UserGroup{ GroupName: "harbor_group", LdapGroupDN: "cn=harbor_group,ou=groups,dc=example,dc=com", - GroupType: common.LdapGroupType, + GroupType: common.LDAPGroupType, + }, + credential: admin, + }, + code: http.StatusConflict, + }, + // 201 + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/usergroups", + bodyJSON: &models.UserGroup{ + GroupName: "vsphere.local\\guest", + GroupType: common.HTTPGroupType, + }, + credential: admin, + }, + code: http.StatusCreated, + }, + { + request: &testingRequest{ + method: http.MethodPost, + url: "/api/usergroups", + bodyJSON: &models.UserGroup{ + GroupName: "vsphere.local\\guest", + GroupType: common.HTTPGroupType, }, credential: admin, }, @@ -118,7 +143,7 @@ func TestUserGroupAPI_Put(t *testing.T) { groupID, err := group.AddUserGroup(models.UserGroup{ GroupName: "harbor_group", LdapGroupDN: "cn=harbor_groups,ou=groups,dc=example,dc=com", - GroupType: common.LdapGroupType, + GroupType: common.LDAPGroupType, }) defer group.DeleteUserGroup(groupID) @@ -149,6 +174,19 @@ func TestUserGroupAPI_Put(t *testing.T) { }, code: http.StatusOK, }, + // 400 + { + request: &testingRequest{ + method: http.MethodPut, + url: fmt.Sprintf("/api/usergroups/%d", groupID), + bodyJSON: &models.UserGroup{ + GroupName: "my_group", + GroupType: common.HTTPGroupType, + }, + credential: admin, + }, + code: http.StatusBadRequest, + }, } runCodeCheckingCases(t, cases...) } diff --git a/src/core/api/utils.go b/src/core/api/utils.go index ae7f30f7a..4fd20d383 100644 --- a/src/core/api/utils.go +++ b/src/core/api/utils.go @@ -24,7 +24,6 @@ import ( commonhttp "github.com/goharbor/harbor/src/common/http" "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/common/utils" - "github.com/goharbor/harbor/src/common/utils/clair" "github.com/goharbor/harbor/src/common/utils/log" "github.com/goharbor/harbor/src/common/utils/registry" "github.com/goharbor/harbor/src/common/utils/registry/auth" @@ -39,7 +38,7 @@ func SyncRegistry(pm promgr.ProjectManager) error { log.Infof("Start syncing repositories from registry to DB... ") - reposInRegistry, err := catalog() + reposInRegistry, err := Catalog() if err != nil { log.Error(err) return err @@ -106,7 +105,8 @@ func SyncRegistry(pm promgr.ProjectManager) error { return nil } -func catalog() ([]string, error) { +// Catalog ... +func Catalog() ([]string, error) { repositories := []string{} rc, err := initRegistryClient() @@ -279,35 +279,3 @@ func repositoryExist(name string, client *registry.Repository) (bool, error) { } return len(tags) != 0, nil } - -// transformVulnerabilities transforms the returned value of Clair API to a list of VulnerabilityItem -func transformVulnerabilities(layerWithVuln *models.ClairLayerEnvelope) []*models.VulnerabilityItem { - res := []*models.VulnerabilityItem{} - l := layerWithVuln.Layer - if l == nil { - return res - } - features := l.Features - if features == nil { - return res - } - for _, f := range features { - vulnerabilities := f.Vulnerabilities - if vulnerabilities == nil { - continue - } - for _, v := range vulnerabilities { - vItem := &models.VulnerabilityItem{ - ID: v.Name, - Pkg: f.Name, - Version: f.Version, - Severity: clair.ParseClairSev(v.Severity), - Fixed: v.FixedBy, - Link: v.Link, - Description: v.Description, - } - res = append(res, vItem) - } - } - return res -} diff --git a/src/core/auth/authproxy/auth.go b/src/core/auth/authproxy/auth.go index e388cbad6..1efe42f3e 100644 --- a/src/core/auth/authproxy/auth.go +++ b/src/core/auth/authproxy/auth.go @@ -16,18 +16,25 @@ package authproxy import ( "crypto/tls" + "encoding/json" + "errors" "fmt" + "io/ioutil" + "net/http" + "strings" + "sync" + "time" + + "github.com/goharbor/harbor/src/common/dao/group" + "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common/dao" "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/common/utils/log" "github.com/goharbor/harbor/src/core/auth" "github.com/goharbor/harbor/src/core/config" - "io/ioutil" - "net/http" - "strings" - "sync" - "time" + "github.com/goharbor/harbor/src/pkg/authproxy" + k8s_api_v1beta1 "k8s.io/api/authentication/v1beta1" ) const refreshDuration = 2 * time.Second @@ -45,11 +52,16 @@ var insecureTransport = &http.Transport{ type Auth struct { auth.DefaultAuthenticateHelper sync.Mutex - Endpoint string - SkipCertVerify bool - AlwaysOnboard bool - settingTimeStamp time.Time - client *http.Client + Endpoint string + TokenReviewEndpoint string + SkipCertVerify bool + SkipSearch bool + settingTimeStamp time.Time + client *http.Client +} + +type session struct { + SessionID string `json:"session_id,omitempty"` } // Authenticate issues http POST request to Endpoint if it returns 200 the authentication is considered success. @@ -72,7 +84,39 @@ func (a *Auth) Authenticate(m models.AuthModel) (*models.User, error) { } defer resp.Body.Close() if resp.StatusCode == http.StatusOK { - return &models.User{Username: m.Principal}, nil + user := &models.User{Username: m.Principal} + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + log.Warningf("Failed to read response body, error: %v", err) + return nil, auth.ErrAuth{} + } + s := session{} + err = json.Unmarshal(data, &s) + if err != nil { + log.Errorf("failed to read session %v", err) + } + + reviewResponse, err := a.tokenReview(s.SessionID) + if err != nil { + return nil, err + } + if reviewResponse == nil { + return nil, auth.ErrAuth{} + } + + // Attach user group ID information + ugList := reviewResponse.Status.User.Groups + log.Debugf("user groups %+v", ugList) + if len(ugList) > 0 { + groupIDList, err := group.GetGroupIDByGroupName(ugList, common.HTTPGroupType) + if err != nil { + return nil, err + } + log.Debugf("current user's group ID list is %+v", groupIDList) + user.GroupIDs = groupIDList + } + return user, nil + } else if resp.StatusCode == http.StatusUnauthorized { return nil, auth.ErrAuth{} } else { @@ -81,10 +125,19 @@ func (a *Auth) Authenticate(m models.AuthModel) (*models.User, error) { log.Warningf("Failed to read response body, error: %v", err) } return nil, fmt.Errorf("failed to authenticate, status code: %d, text: %s", resp.StatusCode, string(data)) + } } +func (a *Auth) tokenReview(sessionID string) (*k8s_api_v1beta1.TokenReview, error) { + httpAuthProxySetting, err := config.HTTPAuthProxySetting() + if err != nil { + return nil, err + } + return authproxy.TokenReview(sessionID, httpAuthProxySetting) +} + // OnBoardUser delegates to dao pkg to insert/update data in DB. func (a *Auth) OnBoardUser(u *models.User) error { return dao.OnBoardUser(u) @@ -102,14 +155,14 @@ func (a *Auth) PostAuthenticate(u *models.User) error { } // SearchUser returns nil as authproxy does not have such capability. -// When AlwaysOnboard is set it always return the default model. +// When SkipSearch is set it always return the default model. func (a *Auth) SearchUser(username string) (*models.User, error) { err := a.ensure() if err != nil { log.Warningf("Failed to refresh configuration for HTTP Auth Proxy Authenticator, error: %v, the default settings will be used", err) } var u *models.User - if a.AlwaysOnboard { + if a.SkipSearch { u = &models.User{Username: username} if err := a.fillInModel(u); err != nil { return nil, err @@ -118,6 +171,37 @@ func (a *Auth) SearchUser(username string) (*models.User, error) { return u, nil } +// SearchGroup search group exist in the authentication provider, for HTTP auth, if SkipSearch is true, it assume this group exist in authentication provider. +func (a *Auth) SearchGroup(groupKey string) (*models.UserGroup, error) { + err := a.ensure() + if err != nil { + log.Warningf("Failed to refresh configuration for HTTP Auth Proxy Authenticator, error: %v, the default settings will be used", err) + } + var ug *models.UserGroup + if a.SkipSearch { + ug = &models.UserGroup{ + GroupName: groupKey, + GroupType: common.HTTPGroupType, + } + return ug, nil + } + return nil, nil +} + +// OnBoardGroup create user group entity in Harbor DB, altGroupName is not used. +func (a *Auth) OnBoardGroup(u *models.UserGroup, altGroupName string) error { + // if group name provided, on board the user group + if len(u.GroupName) == 0 { + return errors.New("Should provide a group name") + } + u.GroupType = common.HTTPGroupType + err := group.OnBoardUserGroup(u) + if err != nil { + return err + } + return nil +} + func (a *Auth) fillInModel(u *models.User) error { if strings.TrimSpace(u.Username) == "" { return fmt.Errorf("username cannot be empty") @@ -127,8 +211,6 @@ func (a *Auth) fillInModel(u *models.User) error { u.Comment = userEntryComment if strings.Contains(u.Username, "@") { u.Email = u.Username - } else { - u.Email = fmt.Sprintf("%s@placeholder.com", u.Username) } return nil } @@ -145,8 +227,9 @@ func (a *Auth) ensure() error { return err } a.Endpoint = setting.Endpoint + a.TokenReviewEndpoint = setting.TokenReviewEndpoint a.SkipCertVerify = !setting.VerifyCert - a.AlwaysOnboard = setting.AlwaysOnBoard + a.SkipSearch = setting.SkipSearch } if a.SkipCertVerify { a.client.Transport = insecureTransport diff --git a/src/core/auth/authproxy/auth_test.go b/src/core/auth/authproxy/auth_test.go index 0e45b7388..b1fb4ab22 100644 --- a/src/core/auth/authproxy/auth_test.go +++ b/src/core/auth/authproxy/auth_test.go @@ -15,18 +15,20 @@ package authproxy import ( + "net/http/httptest" + "os" + "testing" + "time" + "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/dao/group" "github.com/goharbor/harbor/src/common/models" cut "github.com/goharbor/harbor/src/common/utils/test" "github.com/goharbor/harbor/src/core/auth" "github.com/goharbor/harbor/src/core/auth/authproxy/test" "github.com/goharbor/harbor/src/core/config" "github.com/stretchr/testify/assert" - "net/http/httptest" - "os" - "testing" - "time" ) var mockSvr *httptest.Server @@ -41,16 +43,25 @@ func TestMain(m *testing.M) { } mockSvr = test.NewMockServer(map[string]string{"jt": "pp", "Admin@vsphere.local": "Admin!23"}) defer mockSvr.Close() + defer dao.ExecuteBatchSQL([]string{"delete from user_group where group_name='OnBoardTest'"}) a = &Auth{ - Endpoint: mockSvr.URL + "/test/login", - SkipCertVerify: true, + Endpoint: mockSvr.URL + "/test/login", + TokenReviewEndpoint: mockSvr.URL + "/test/tokenreview", + SkipCertVerify: true, // So it won't require mocking the cfgManager settingTimeStamp: time.Now(), } + cfgMap := cut.GetUnitTestConfig() conf := map[string]interface{}{ - common.HTTPAuthProxyEndpoint: "dummy", - common.HTTPAuthProxyTokenReviewEndpoint: "dummy", - common.HTTPAuthProxyVerifyCert: "false", + common.HTTPAuthProxyEndpoint: a.Endpoint, + common.HTTPAuthProxyTokenReviewEndpoint: a.TokenReviewEndpoint, + common.HTTPAuthProxyVerifyCert: !a.SkipCertVerify, + common.PostGreSQLSSLMode: cfgMap[common.PostGreSQLSSLMode], + common.PostGreSQLUsername: cfgMap[common.PostGreSQLUsername], + common.PostGreSQLPort: cfgMap[common.PostGreSQLPort], + common.PostGreSQLHOST: cfgMap[common.PostGreSQLHOST], + common.PostGreSQLPassword: cfgMap[common.PostGreSQLPassword], + common.PostGreSQLDatabase: cfgMap[common.PostGreSQLDatabase], } config.InitWithSettings(conf) @@ -64,6 +75,10 @@ func TestMain(m *testing.M) { } func TestAuth_Authenticate(t *testing.T) { + groupIDs, err := group.GetGroupIDByGroupName([]string{"vsphere.local\\users", "vsphere.local\\administrators"}, common.HTTPGroupType) + if err != nil { + t.Fatal("Failed to get groupIDs") + } t.Log("auth endpoint: ", a.Endpoint) type output struct { user models.User @@ -80,6 +95,7 @@ func TestAuth_Authenticate(t *testing.T) { expect: output{ user: models.User{ Username: "jt", + GroupIDs: groupIDs, }, err: nil, }, @@ -92,6 +108,7 @@ func TestAuth_Authenticate(t *testing.T) { expect: output{ user: models.User{ Username: "Admin@vsphere.local", + GroupIDs: groupIDs, // Email: "Admin@placeholder.com", // Password: pwd, // Comment: fmt.Sprintf(cmtTmpl, path.Join(mockSvr.URL, "/test/login")), @@ -137,7 +154,7 @@ func TestAuth_PostAuthenticate(t *testing.T) { }, expect: models.User{ Username: "jt", - Email: "jt@placeholder.com", + Email: "", Realname: "jt", Password: pwd, Comment: userEntryComment, @@ -165,3 +182,19 @@ func TestAuth_PostAuthenticate(t *testing.T) { } } + +func TestAuth_OnBoardGroup(t *testing.T) { + input := &models.UserGroup{ + GroupName: "OnBoardTest", + GroupType: common.HTTPGroupType, + } + a.OnBoardGroup(input, "") + + assert.True(t, input.ID > 0, "The OnBoardGroup should have a valid group ID") + + emptyGroup := &models.UserGroup{} + err := a.OnBoardGroup(emptyGroup, "") + if err == nil { + t.Fatal("Empty user group should failed to OnBoard") + } +} diff --git a/src/core/auth/authproxy/test/server.go b/src/core/auth/authproxy/test/server.go index b11ec17aa..6fead0196 100644 --- a/src/core/auth/authproxy/test/server.go +++ b/src/core/auth/authproxy/test/server.go @@ -41,9 +41,20 @@ func (ah *authHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { } } +type reviewTokenHandler struct { +} + +func (rth *reviewTokenHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + if req.Method != http.MethodPost { + http.Error(rw, "", http.StatusMethodNotAllowed) + } + rw.Write([]byte(`{"apiVersion": "authentication.k8s.io/v1beta1", "kind": "TokenReview", "status": {"authenticated": true, "user": {"username": "administrator@vsphere.local", "groups": ["vsphere.local\\users", "vsphere.local\\administrators", "vsphere.local\\caadmins", "vsphere.local\\systemconfiguration.bashshelladministrators", "vsphere.local\\systemconfiguration.administrators", "vsphere.local\\licenseservice.administrators", "vsphere.local\\everyone"], "extra": {"method": ["basic"]}}}}`)) +} + // NewMockServer creates the mock server for testing func NewMockServer(creds map[string]string) *httptest.Server { mux := http.NewServeMux() mux.Handle("/test/login", &authHandler{m: creds}) + mux.Handle("/test/tokenreview", &reviewTokenHandler{}) return httptest.NewTLSServer(mux) } diff --git a/src/core/auth/ldap/ldap.go b/src/core/auth/ldap/ldap.go index 4d8b63fdf..c5fd86d29 100644 --- a/src/core/auth/ldap/ldap.go +++ b/src/core/auth/ldap/ldap.go @@ -20,11 +20,11 @@ import ( "strings" "github.com/goharbor/harbor/src/common" + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/dao/group" "github.com/goharbor/harbor/src/common/utils" goldap "gopkg.in/ldap.v2" - "github.com/goharbor/harbor/src/common/dao" - "github.com/goharbor/harbor/src/common/dao/group" "github.com/goharbor/harbor/src/common/models" ldapUtils "github.com/goharbor/harbor/src/common/utils/ldap" "github.com/goharbor/harbor/src/common/utils/log" @@ -79,7 +79,7 @@ func (l *Auth) Authenticate(m models.AuthModel) (*models.User, error) { u.Username = ldapUsers[0].Username u.Email = strings.TrimSpace(ldapUsers[0].Email) u.Realname = ldapUsers[0].Realname - userGroups := make([]*models.UserGroup, 0) + ugIDs := []int{} dn := ldapUsers[0].DN if err = ldapSession.Bind(dn, m.Password); err != nil { @@ -95,6 +95,7 @@ func (l *Auth) Authenticate(m models.AuthModel) (*models.User, error) { for _, groupDN := range ldapUsers[0].GroupDNList { groupDN = utils.TrimLower(groupDN) + // Attach LDAP group admin if len(groupAdminDN) > 0 && groupAdminDN == groupDN { u.HasAdminRole = true } @@ -103,16 +104,16 @@ func (l *Auth) Authenticate(m models.AuthModel) (*models.User, error) { GroupType: 1, LdapGroupDN: groupDN, } - userGroupList, err := group.QueryUserGroup(userGroupQuery) + userGroups, err := group.QueryUserGroup(userGroupQuery) if err != nil { continue } - if len(userGroupList) == 0 { + if len(userGroups) == 0 { continue } - userGroups = append(userGroups, userGroupList[0]) + ugIDs = append(ugIDs, userGroups[0].ID) } - u.GroupList = userGroups + u.GroupIDs = ugIDs return &u, nil } @@ -123,8 +124,6 @@ func (l *Auth) OnBoardUser(u *models.User) error { if u.Email == "" { if strings.Contains(u.Username, "@") { u.Email = u.Username - } else { - u.Email = u.Username + "@placeholder.com" } } u.Password = "12345678AbC" // Password is not kept in local db @@ -204,7 +203,7 @@ func (l *Auth) OnBoardGroup(u *models.UserGroup, altGroupName string) error { if len(altGroupName) > 0 { u.GroupName = altGroupName } - u.GroupType = common.LdapGroupType + u.GroupType = common.LDAPGroupType // Check duplicate LDAP DN in usergroup, if usergroup exist, return error userGroupList, err := group.QueryUserGroup(models.UserGroup{LdapGroupDN: u.LdapGroupDN}) if err != nil { @@ -213,7 +212,7 @@ func (l *Auth) OnBoardGroup(u *models.UserGroup, altGroupName string) error { if len(userGroupList) > 0 { return auth.ErrDuplicateLDAPGroup } - return group.OnBoardUserGroup(u, "LdapGroupDN", "GroupType") + return group.OnBoardUserGroup(u) } // PostAuthenticate -- If user exist in harbor DB, sync email address, if not exist, call OnBoardUser diff --git a/src/core/auth/ldap/ldap_test.go b/src/core/auth/ldap/ldap_test.go index 5eb852fbe..9002bd8bf 100644 --- a/src/core/auth/ldap/ldap_test.go +++ b/src/core/auth/ldap/ldap_test.go @@ -55,7 +55,7 @@ var ldapTestConfig = map[string]interface{}{ common.LDAPGroupBaseDN: "dc=example,dc=com", common.LDAPGroupAttributeName: "cn", common.LDAPGroupSearchScope: 2, - common.LdapGroupAdminDn: "cn=harbor_users,ou=groups,dc=example,dc=com", + common.LDAPGroupAdminDn: "cn=harbor_users,ou=groups,dc=example,dc=com", } func TestMain(m *testing.M) { @@ -92,8 +92,8 @@ func TestMain(m *testing.M) { "delete from user_group", "delete from project_member", } - dao.PrepareTestData(clearSqls, initSqls) - + dao.ExecuteBatchSQL(initSqls) + defer dao.ExecuteBatchSQL(clearSqls) retCode := m.Run() os.Exit(retCode) } @@ -224,7 +224,7 @@ func TestOnBoardUser_02(t *testing.T) { t.Errorf("Failed to onboard user") } - assert.Equal(t, "sample02@placeholder.com", user.Email) + assert.Equal(t, "", user.Email) dao.CleanUser(int64(user.UserID)) } @@ -405,6 +405,7 @@ func TestAddProjectMemberWithLdapGroup(t *testing.T) { ProjectID: currentProject.ProjectID, MemberGroup: models.UserGroup{ LdapGroupDN: "cn=harbor_users,ou=groups,dc=example,dc=com", + GroupType: 1, }, Role: models.PROJECTADMIN, } diff --git a/src/core/auth/uaa/uaa.go b/src/core/auth/uaa/uaa.go index b4889302c..8ca250fc1 100644 --- a/src/core/auth/uaa/uaa.go +++ b/src/core/auth/uaa/uaa.go @@ -77,9 +77,8 @@ func fillEmailRealName(user *models.User) { if len(user.Realname) == 0 { user.Realname = user.Username } - if len(user.Email) == 0 { - // TODO: handle the case when user.Username itself is an email address. - user.Email = user.Username + "@uaa.placeholder" + if len(user.Email) == 0 && strings.Contains(user.Username, "@") { + user.Email = user.Username } } diff --git a/src/core/auth/uaa/uaa_test.go b/src/core/auth/uaa/uaa_test.go index 7b0ff9ea9..a62bd7d7d 100644 --- a/src/core/auth/uaa/uaa_test.go +++ b/src/core/auth/uaa/uaa_test.go @@ -110,7 +110,7 @@ func TestOnBoardUser(t *testing.T) { user, _ := dao.GetUser(models.User{Username: "test"}) assert.Equal("test", user.Realname) assert.Equal("test", user.Username) - assert.Equal("test@uaa.placeholder", user.Email) + assert.Equal("", user.Email) err3 := dao.ClearTable(models.UserTable) assert.Nil(err3) } @@ -128,7 +128,7 @@ func TestPostAuthenticate(t *testing.T) { } assert.Nil(err) user, _ := dao.GetUser(models.User{Username: "test"}) - assert.Equal("test@uaa.placeholder", user.Email) + assert.Equal("", user.Email) um2.Email = "newEmail@new.com" um2.Realname = "newName" err2 := auth.PostAuthenticate(um2) @@ -145,7 +145,7 @@ func TestPostAuthenticate(t *testing.T) { assert.Nil(err3) user3, _ := dao.GetUser(models.User{Username: "test"}) assert.Equal(user3.UserID, um3.UserID) - assert.Equal("test@uaa.placeholder", user3.Email) + assert.Equal("", user3.Email) assert.Equal("test", user3.Realname) err4 := dao.ClearTable(models.UserTable) assert.Nil(err4) diff --git a/src/core/config/config.go b/src/core/config/config.go old mode 100644 new mode 100755 index 31ed8cadc..b3808745d --- a/src/core/config/config.go +++ b/src/core/config/config.go @@ -224,7 +224,7 @@ func LDAPGroupConf() (*models.LdapGroupConf, error) { LdapGroupFilter: cfgMgr.Get(common.LDAPGroupSearchFilter).GetString(), LdapGroupNameAttribute: cfgMgr.Get(common.LDAPGroupAttributeName).GetString(), LdapGroupSearchScope: cfgMgr.Get(common.LDAPGroupSearchScope).GetInt(), - LdapGroupAdminDN: cfgMgr.Get(common.LdapGroupAdminDn).GetString(), + LdapGroupAdminDN: cfgMgr.Get(common.LDAPGroupAdminDn).GetString(), LdapGroupMembershipAttribute: cfgMgr.Get(common.LDAPGroupMembershipAttribute).GetString(), }, nil } @@ -280,7 +280,11 @@ func InternalJobServiceURL() string { // InternalCoreURL returns the local harbor core url func InternalCoreURL() string { return strings.TrimSuffix(cfgMgr.Get(common.CoreURL).GetString(), "/") +} +// LocalCoreURL returns the local harbor core url +func LocalCoreURL() string { + return cfgMgr.Get(common.CoreLocalURL).GetString() } // InternalTokenServiceEndpoint returns token service endpoint for internal communication between Harbor containers @@ -327,12 +331,14 @@ func Database() (*models.Database, error) { database := &models.Database{} database.Type = cfgMgr.Get(common.DatabaseType).GetString() postgresql := &models.PostGreSQL{ - Host: cfgMgr.Get(common.PostGreSQLHOST).GetString(), - Port: cfgMgr.Get(common.PostGreSQLPort).GetInt(), - Username: cfgMgr.Get(common.PostGreSQLUsername).GetString(), - Password: cfgMgr.Get(common.PostGreSQLPassword).GetString(), - Database: cfgMgr.Get(common.PostGreSQLDatabase).GetString(), - SSLMode: cfgMgr.Get(common.PostGreSQLSSLMode).GetString(), + Host: cfgMgr.Get(common.PostGreSQLHOST).GetString(), + Port: cfgMgr.Get(common.PostGreSQLPort).GetInt(), + Username: cfgMgr.Get(common.PostGreSQLUsername).GetString(), + Password: cfgMgr.Get(common.PostGreSQLPassword).GetString(), + Database: cfgMgr.Get(common.PostGreSQLDatabase).GetString(), + SSLMode: cfgMgr.Get(common.PostGreSQLSSLMode).GetString(), + MaxIdleConns: cfgMgr.Get(common.PostGreSQLMaxIdleConns).GetInt(), + MaxOpenConns: cfgMgr.Get(common.PostGreSQLMaxOpenConns).GetInt(), } database.PostGreSQL = postgresql @@ -482,7 +488,7 @@ func HTTPAuthProxySetting() (*models.HTTPAuthProxy, error) { Endpoint: cfgMgr.Get(common.HTTPAuthProxyEndpoint).GetString(), TokenReviewEndpoint: cfgMgr.Get(common.HTTPAuthProxyTokenReviewEndpoint).GetString(), VerifyCert: cfgMgr.Get(common.HTTPAuthProxyVerifyCert).GetBool(), - AlwaysOnBoard: cfgMgr.Get(common.HTTPAuthProxyAlwaysOnboard).GetBool(), + SkipSearch: cfgMgr.Get(common.HTTPAuthProxySkipSearch).GetBool(), }, nil } @@ -510,3 +516,24 @@ func OIDCSetting() (*models.OIDCSetting, error) { Scope: scope, }, nil } + +// NotificationEnable returns a bool to indicates if notification enabled in harbor +func NotificationEnable() bool { + return cfgMgr.Get(common.NotificationEnable).GetBool() +} + +// QuotaPerProjectEnable returns a bool to indicates if quota per project enabled in harbor +func QuotaPerProjectEnable() bool { + return cfgMgr.Get(common.QuotaPerProjectEnable).GetBool() +} + +// QuotaSetting returns the setting of quota. +func QuotaSetting() (*models.QuotaSetting, error) { + if err := cfgMgr.Load(); err != nil { + return nil, err + } + return &models.QuotaSetting{ + CountPerProject: cfgMgr.Get(common.CountPerProject).GetInt64(), + StoragePerProject: cfgMgr.Get(common.StoragePerProject).GetInt64(), + }, nil +} diff --git a/src/core/config/config_test.go b/src/core/config/config_test.go index 89561778d..ae31c04bc 100644 --- a/src/core/config/config_test.go +++ b/src/core/config/config_test.go @@ -21,6 +21,7 @@ import ( "testing" "fmt" + "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common/dao" "github.com/goharbor/harbor/src/common/models" @@ -207,6 +208,10 @@ func TestConfig(t *testing.T) { assert.Equal("http://myjob:8888", InternalJobServiceURL()) assert.Equal("http://myui:8888/service/token", InternalTokenServiceEndpoint()) + localCoreURL := LocalCoreURL() + assert.Equal("http://127.0.0.1:8080", localCoreURL) + + assert.True(NotificationEnable()) } func currPath() string { @@ -228,17 +233,17 @@ func TestConfigureValue_GetMap(t *testing.T) { func TestHTTPAuthProxySetting(t *testing.T) { m := map[string]interface{}{ - common.HTTPAuthProxyAlwaysOnboard: "true", - common.HTTPAuthProxyVerifyCert: "true", - common.HTTPAuthProxyEndpoint: "https://auth.proxy/suffix", + common.HTTPAuthProxySkipSearch: "true", + common.HTTPAuthProxyVerifyCert: "true", + common.HTTPAuthProxyEndpoint: "https://auth.proxy/suffix", } InitWithSettings(m) v, e := HTTPAuthProxySetting() assert.Nil(t, e) assert.Equal(t, *v, models.HTTPAuthProxy{ - Endpoint: "https://auth.proxy/suffix", - AlwaysOnBoard: true, - VerifyCert: true, + Endpoint: "https://auth.proxy/suffix", + SkipSearch: true, + VerifyCert: true, }) } diff --git a/src/core/controllers/controllers_test.go b/src/core/controllers/controllers_test.go index 1381a26d3..f38517ebc 100644 --- a/src/core/controllers/controllers_test.go +++ b/src/core/controllers/controllers_test.go @@ -32,7 +32,7 @@ import ( "github.com/goharbor/harbor/src/common/models" utilstest "github.com/goharbor/harbor/src/common/utils/test" "github.com/goharbor/harbor/src/core/config" - "github.com/goharbor/harbor/src/core/proxy" + "github.com/goharbor/harbor/src/core/middlewares" "github.com/stretchr/testify/assert" ) @@ -102,8 +102,9 @@ func TestRedirectForOIDC(t *testing.T) { // TestMain is a sample to run an endpoint test func TestAll(t *testing.T) { config.InitWithSettings(utilstest.GetUnitTestConfig()) - proxy.Init() assert := assert.New(t) + err := middlewares.Init() + assert.Nil(err) r, _ := http.NewRequest("POST", "/c/login", nil) w := httptest.NewRecorder() diff --git a/src/core/controllers/oidc.go b/src/core/controllers/oidc.go index 1479b8e5a..903b99954 100644 --- a/src/core/controllers/oidc.go +++ b/src/core/controllers/oidc.go @@ -17,6 +17,9 @@ package controllers import ( "encoding/json" "fmt" + "net/http" + "strings" + "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common/dao" "github.com/goharbor/harbor/src/common/models" @@ -26,8 +29,6 @@ import ( "github.com/goharbor/harbor/src/core/api" "github.com/goharbor/harbor/src/core/config" "github.com/pkg/errors" - "net/http" - "strings" ) const tokenKey = "oidc_token" @@ -189,9 +190,6 @@ func (oc *OIDCController) Onboard() { } email := d.Email - if email == "" { - email = utils.GenerateRandomString() + "@placeholder.com" - } user := models.User{ Username: username, Realname: d.Username, diff --git a/src/core/controllers/proxy.go b/src/core/controllers/proxy.go index 1ddaf9ca7..a8fe916ba 100644 --- a/src/core/controllers/proxy.go +++ b/src/core/controllers/proxy.go @@ -2,7 +2,7 @@ package controllers import ( "github.com/astaxie/beego" - "github.com/goharbor/harbor/src/core/proxy" + "github.com/goharbor/harbor/src/core/middlewares" ) // RegistryProxy is the endpoint on UI for a reverse proxy pointing to registry @@ -14,7 +14,7 @@ type RegistryProxy struct { func (p *RegistryProxy) Handle() { req := p.Ctx.Request rw := p.Ctx.ResponseWriter - proxy.Handle(rw, req) + middlewares.Handle(rw, req) } // Render ... diff --git a/src/core/filter/security.go b/src/core/filter/security.go index 54d5d4523..34f7310a5 100644 --- a/src/core/filter/security.go +++ b/src/core/filter/security.go @@ -41,15 +41,7 @@ import ( "github.com/goharbor/harbor/src/core/promgr/pmsdriver/admiral" "strings" - "encoding/json" - k8s_api_v1beta1 "k8s.io/api/authentication/v1beta1" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" + "github.com/goharbor/harbor/src/pkg/authproxy" ) // ContextValueKey for content value @@ -229,8 +221,10 @@ type oidcCliReqCtxModifier struct{} func (oc *oidcCliReqCtxModifier) Modify(ctx *beegoctx.Context) bool { path := ctx.Request.URL.Path - if path != "/service/token" && !strings.HasPrefix(path, "/chartrepo/") { - log.Debug("OIDC CLI modifer only handles request by docker CLI or helm CLI") + if path != "/service/token" && + !strings.HasPrefix(path, "/chartrepo/") && + !strings.HasPrefix(path, "/api/chartrepo/") { + log.Debug("OIDC CLI modifier only handles request by docker CLI or helm CLI") return false } if ctx.Request.Context().Value(AuthModeKey).(string) != common.OIDCAuth { @@ -319,60 +313,17 @@ func (ap *authProxyReqCtxModifier) Modify(ctx *beegoctx.Context) bool { log.Errorf("User name %s doesn't meet the auth proxy name pattern", proxyUserName) return false } - httpAuthProxyConf, err := config.HTTPAuthProxySetting() if err != nil { log.Errorf("fail to get auth proxy settings, %v", err) return false } - - // Init auth client with the auth proxy endpoint. - authClientCfg := &rest.Config{ - Host: httpAuthProxyConf.TokenReviewEndpoint, - ContentConfig: rest.ContentConfig{ - GroupVersion: &schema.GroupVersion{}, - NegotiatedSerializer: serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}, - }, - BearerToken: proxyPwd, - TLSClientConfig: rest.TLSClientConfig{ - Insecure: !httpAuthProxyConf.VerifyCert, - }, - } - authClient, err := rest.RESTClientFor(authClientCfg) + tokenReviewResponse, err := authproxy.TokenReview(proxyPwd, httpAuthProxyConf) if err != nil { - log.Errorf("fail to create auth client, %v", err) + log.Errorf("fail to review token, %v", err) return false } - // Do auth with the token. - tokenReviewRequest := &k8s_api_v1beta1.TokenReview{ - TypeMeta: metav1.TypeMeta{ - Kind: "TokenReview", - APIVersion: "authentication.k8s.io/v1beta1", - }, - Spec: k8s_api_v1beta1.TokenReviewSpec{ - Token: proxyPwd, - }, - } - res := authClient.Post().Body(tokenReviewRequest).Do() - err = res.Error() - if err != nil { - log.Errorf("fail to POST auth request, %v", err) - return false - } - resRaw, err := res.Raw() - if err != nil { - log.Errorf("fail to get raw data of token review, %v", err) - return false - } - - // Parse the auth response, check the user name and authenticated status. - tokenReviewResponse := &k8s_api_v1beta1.TokenReview{} - err = json.Unmarshal(resRaw, &tokenReviewResponse) - if err != nil { - log.Errorf("fail to decode token review, %v", err) - return false - } if !tokenReviewResponse.Status.Authenticated { log.Errorf("fail to auth user: %s", rawUserName) return false diff --git a/src/core/filter/security_test.go b/src/core/filter/security_test.go index 17307efab..a74d2fa12 100644 --- a/src/core/filter/security_test.go +++ b/src/core/filter/security_test.go @@ -16,8 +16,6 @@ package filter import ( "context" - "github.com/goharbor/harbor/src/common/utils/oidc" - "github.com/stretchr/testify/require" "log" "net/http" "net/http/httptest" @@ -27,6 +25,9 @@ import ( "testing" "time" + "github.com/goharbor/harbor/src/common/utils/oidc" + "github.com/stretchr/testify/require" + "github.com/astaxie/beego" beegoctx "github.com/astaxie/beego/context" "github.com/astaxie/beego/session" @@ -241,7 +242,7 @@ func TestAuthProxyReqCtxModifier(t *testing.T) { defer server.Close() c := map[string]interface{}{ - common.HTTPAuthProxyAlwaysOnboard: "true", + common.HTTPAuthProxySkipSearch: "true", common.HTTPAuthProxyVerifyCert: "false", common.HTTPAuthProxyEndpoint: "https://auth.proxy/suffix", common.HTTPAuthProxyTokenReviewEndpoint: server.URL, @@ -253,7 +254,7 @@ func TestAuthProxyReqCtxModifier(t *testing.T) { assert.Nil(t, e) assert.Equal(t, *v, models.HTTPAuthProxy{ Endpoint: "https://auth.proxy/suffix", - AlwaysOnBoard: true, + SkipSearch: true, VerifyCert: false, TokenReviewEndpoint: server.URL, }) diff --git a/src/core/main.go b/src/core/main.go old mode 100644 new mode 100755 index 2c68141dd..412ab25d5 --- a/src/core/main.go +++ b/src/core/main.go @@ -17,15 +17,12 @@ package main import ( "encoding/gob" "fmt" - "os" - "os/signal" - "strconv" - "syscall" - "github.com/astaxie/beego" _ "github.com/astaxie/beego/session/redis" "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/job" "github.com/goharbor/harbor/src/common/models" + common_quota "github.com/goharbor/harbor/src/common/quota" "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/common/utils/log" "github.com/goharbor/harbor/src/core/api" @@ -33,10 +30,23 @@ import ( _ "github.com/goharbor/harbor/src/core/auth/db" _ "github.com/goharbor/harbor/src/core/auth/ldap" _ "github.com/goharbor/harbor/src/core/auth/uaa" + "os" + "os/signal" + "strconv" + "syscall" + + quota "github.com/goharbor/harbor/src/core/api/quota" + _ "github.com/goharbor/harbor/src/core/api/quota/chart" + _ "github.com/goharbor/harbor/src/core/api/quota/registry" + "github.com/goharbor/harbor/src/core/config" "github.com/goharbor/harbor/src/core/filter" - "github.com/goharbor/harbor/src/core/proxy" + "github.com/goharbor/harbor/src/core/middlewares" + _ "github.com/goharbor/harbor/src/core/notifier/topic" "github.com/goharbor/harbor/src/core/service/token" + "github.com/goharbor/harbor/src/pkg/notification" + "github.com/goharbor/harbor/src/pkg/scheduler" + "github.com/goharbor/harbor/src/pkg/types" "github.com/goharbor/harbor/src/replication" ) @@ -70,6 +80,64 @@ func updateInitPassword(userID int, password string) error { return nil } +// Quota migration +func quotaSync() error { + usages, err := dao.ListQuotaUsages() + if err != nil { + log.Errorf("list quota usage error, %v", err) + return err + } + projects, err := dao.GetProjects(nil) + if err != nil { + log.Errorf("list project error, %v", err) + return err + } + + // The condition handles these two cases: + // 1, len(project) > 1 && len(usages) == 1. existing projects without usage, as we do always has 'library' usage in DB. + // 2, migration fails at the phase of inserting usage into DB, and parts of them are inserted successfully. + if len(projects) != len(usages) { + log.Info("Start to sync quota data .....") + if err := quota.Sync(config.GlobalProjectMgr, true); err != nil { + log.Errorf("Fail to sync quota data, %v", err) + return err + } + log.Info("Success to sync quota data .....") + return nil + } + + // Only has one project without usage + zero := common_quota.ResourceList{ + common_quota.ResourceCount: 0, + common_quota.ResourceStorage: 0, + } + if len(projects) == 1 && len(usages) == 1 { + totalRepo, err := dao.GetTotalOfRepositories() + if totalRepo == 0 { + return nil + } + refID, err := strconv.ParseInt(usages[0].ReferenceID, 10, 64) + if err != nil { + log.Error(err) + return err + } + usedRes, err := types.NewResourceList(usages[0].Used) + if err != nil { + log.Error(err) + return err + } + if types.Equals(usedRes, zero) && refID == projects[0].ProjectID { + log.Info("Start to sync quota data .....") + if err := quota.Sync(config.GlobalProjectMgr, true); err != nil { + log.Errorf("Fail to sync quota data, %v", err) + return err + } + log.Info("Success to sync quota data .....") + } + } + return nil +} + func gracefulShutdown(closing chan struct{}) { signals := make(chan os.Signal, 1) signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) @@ -106,6 +174,11 @@ func main() { log.Fatalf("failed to load config: %v", err) } + // init the jobservice client + job.Init() + // init the scheduler + scheduler.Init() + password, err := config.InitialAdminPassword() if err != nil { log.Fatalf("failed to get admin's initia password: %v", err) @@ -135,6 +208,9 @@ func main() { log.Fatalf("failed to init for replication: %v", err) } + log.Info("initializing notification...") + notification.Init() + filter.Init() beego.InsertFilter("/*", beego.BeforeRouter, filter.SecurityFilter) beego.InsertFilter("/*", beego.BeforeRouter, filter.ReadonlyFilter) @@ -158,7 +234,13 @@ func main() { } log.Info("Init proxy") - proxy.Init() - // go proxy.StartProxy() + if err := middlewares.Init(); err != nil { + log.Fatalf("init proxy error, %v", err) + } + + if err := quotaSync(); err != nil { + log.Fatalf("quota migration error, %v", err) + } + beego.Run() } diff --git a/src/core/middlewares/chain.go b/src/core/middlewares/chain.go new file mode 100644 index 000000000..822dc0c63 --- /dev/null +++ b/src/core/middlewares/chain.go @@ -0,0 +1,75 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middlewares + +import ( + "net/http" + + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/middlewares/chart" + "github.com/goharbor/harbor/src/core/middlewares/contenttrust" + "github.com/goharbor/harbor/src/core/middlewares/countquota" + "github.com/goharbor/harbor/src/core/middlewares/listrepo" + "github.com/goharbor/harbor/src/core/middlewares/multiplmanifest" + "github.com/goharbor/harbor/src/core/middlewares/readonly" + "github.com/goharbor/harbor/src/core/middlewares/sizequota" + "github.com/goharbor/harbor/src/core/middlewares/url" + "github.com/goharbor/harbor/src/core/middlewares/vulnerable" + "github.com/justinas/alice" +) + +// DefaultCreator ... +type DefaultCreator struct { + middlewares []string +} + +// New ... +func New(middlewares []string) *DefaultCreator { + return &DefaultCreator{ + middlewares: middlewares, + } +} + +// Create creates a middleware chain ... +func (b *DefaultCreator) Create() *alice.Chain { + chain := alice.New() + for _, mName := range b.middlewares { + middlewareName := mName + chain = chain.Append(func(next http.Handler) http.Handler { + constructor := b.geMiddleware(middlewareName) + if constructor == nil { + log.Errorf("cannot init middle %s", middlewareName) + return nil + } + return constructor(next) + }) + } + return &chain +} + +func (b *DefaultCreator) geMiddleware(mName string) alice.Constructor { + middlewares := map[string]alice.Constructor{ + CHART: func(next http.Handler) http.Handler { return chart.New(next) }, + READONLY: func(next http.Handler) http.Handler { return readonly.New(next) }, + URL: func(next http.Handler) http.Handler { return url.New(next) }, + MUITIPLEMANIFEST: func(next http.Handler) http.Handler { return multiplmanifest.New(next) }, + LISTREPO: func(next http.Handler) http.Handler { return listrepo.New(next) }, + CONTENTTRUST: func(next http.Handler) http.Handler { return contenttrust.New(next) }, + VULNERABLE: func(next http.Handler) http.Handler { return vulnerable.New(next) }, + SIZEQUOTA: func(next http.Handler) http.Handler { return sizequota.New(next) }, + COUNTQUOTA: func(next http.Handler) http.Handler { return countquota.New(next) }, + } + return middlewares[mName] +} diff --git a/src/core/middlewares/chart/builder.go b/src/core/middlewares/chart/builder.go new file mode 100644 index 000000000..ba54cd2de --- /dev/null +++ b/src/core/middlewares/chart/builder.go @@ -0,0 +1,134 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package chart + +import ( + "fmt" + "net/http" + "regexp" + "strconv" + + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/core/middlewares/interceptor" + "github.com/goharbor/harbor/src/core/middlewares/interceptor/quota" + "github.com/goharbor/harbor/src/core/middlewares/util" + "github.com/goharbor/harbor/src/pkg/types" +) + +var ( + deleteChartVersionRe = regexp.MustCompile(`^/api/chartrepo/(?P\w+)/charts/(?P\w+)/(?P[\w\d\.]+)/?$`) + createChartVersionRe = regexp.MustCompile(`^/api/chartrepo/(?P\w+)/charts/?$`) +) + +var ( + defaultBuilders = []interceptor.Builder{ + &chartVersionDeletionBuilder{}, + &chartVersionCreationBuilder{}, + } +) + +type chartVersionDeletionBuilder struct{} + +func (*chartVersionDeletionBuilder) Build(req *http.Request) (interceptor.Interceptor, error) { + if req.Method != http.MethodDelete { + return nil, nil + } + + matches := deleteChartVersionRe.FindStringSubmatch(req.URL.String()) + if len(matches) <= 1 { + return nil, nil + } + + namespace, chartName, version := matches[1], matches[2], matches[3] + + project, err := dao.GetProjectByName(namespace) + if err != nil { + return nil, fmt.Errorf("failed to get project %s, error: %v", namespace, err) + } + if project == nil { + return nil, fmt.Errorf("project %s not found", namespace) + } + + info := &util.ChartVersionInfo{ + ProjectID: project.ProjectID, + Namespace: namespace, + ChartName: chartName, + Version: version, + } + + opts := []quota.Option{ + quota.EnforceResources(config.QuotaPerProjectEnable()), + quota.WithManager("project", strconv.FormatInt(project.ProjectID, 10)), + quota.WithAction(quota.SubtractAction), + quota.StatusCode(http.StatusOK), + quota.MutexKeys(info.MutexKey()), + quota.Resources(types.ResourceList{types.ResourceCount: 1}), + } + + return quota.New(opts...), nil +} + +type chartVersionCreationBuilder struct{} + +func (*chartVersionCreationBuilder) Build(req *http.Request) (interceptor.Interceptor, error) { + if req.Method != http.MethodPost { + return nil, nil + } + + matches := createChartVersionRe.FindStringSubmatch(req.URL.String()) + if len(matches) <= 1 { + return nil, nil + } + + namespace := matches[1] + + project, err := dao.GetProjectByName(namespace) + if err != nil { + return nil, fmt.Errorf("failed to get project %s, error: %v", namespace, err) + } + if project == nil { + return nil, fmt.Errorf("project %s not found", namespace) + } + + info, ok := util.ChartVersionInfoFromContext(req.Context()) + if !ok { + chart, err := parseChart(req) + if err != nil { + return nil, fmt.Errorf("failed to parse chart from body, error: %v", err) + } + chartName, version := chart.Metadata.Name, chart.Metadata.Version + + info = &util.ChartVersionInfo{ + ProjectID: project.ProjectID, + Namespace: namespace, + ChartName: chartName, + Version: version, + } + // Chart version info will be used by computeQuotaForUpload + *req = *req.WithContext(util.NewChartVersionInfoContext(req.Context(), info)) + } + + opts := []quota.Option{ + quota.EnforceResources(config.QuotaPerProjectEnable()), + quota.WithManager("project", strconv.FormatInt(project.ProjectID, 10)), + quota.WithAction(quota.AddAction), + quota.StatusCode(http.StatusCreated), + quota.MutexKeys(info.MutexKey()), + quota.OnResources(computeResourcesForChartVersionCreation), + } + + return quota.New(opts...), nil +} diff --git a/src/core/middlewares/chart/handler.go b/src/core/middlewares/chart/handler.go new file mode 100644 index 000000000..dd1fa583b --- /dev/null +++ b/src/core/middlewares/chart/handler.go @@ -0,0 +1,83 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package chart + +import ( + "fmt" + "net/http" + + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/middlewares/interceptor" + "github.com/goharbor/harbor/src/core/middlewares/util" +) + +type chartHandler struct { + builders []interceptor.Builder + next http.Handler +} + +// New ... +func New(next http.Handler, builders ...interceptor.Builder) http.Handler { + if len(builders) == 0 { + builders = defaultBuilders + } + + return &chartHandler{ + builders: builders, + next: next, + } +} + +// ServeHTTP manifest ... +func (h *chartHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + interceptor, err := h.getInterceptor(req) + if err != nil { + http.Error(rw, util.MarshalError("InternalError", fmt.Sprintf("Error occurred when to handle request in chart count quota handler: %v", err)), + http.StatusInternalServerError) + return + } + + if interceptor == nil { + h.next.ServeHTTP(rw, req) + return + } + + if err := interceptor.HandleRequest(req); err != nil { + log.Warningf("Error occurred when to handle request in count quota handler: %v", err) + http.Error(rw, util.MarshalError("InternalError", fmt.Sprintf("Error occurred when to handle request in chart count quota handler: %v", err)), + http.StatusInternalServerError) + return + } + + w := util.NewCustomResponseWriter(rw) + h.next.ServeHTTP(w, req) + + interceptor.HandleResponse(w, req) +} + +func (h *chartHandler) getInterceptor(req *http.Request) (interceptor.Interceptor, error) { + for _, builder := range h.builders { + interceptor, err := builder.Build(req) + if err != nil { + return nil, err + } + + if interceptor != nil { + return interceptor, nil + } + } + + return nil, nil +} diff --git a/src/core/middlewares/chart/handler_test.go b/src/core/middlewares/chart/handler_test.go new file mode 100644 index 000000000..aedf1218e --- /dev/null +++ b/src/core/middlewares/chart/handler_test.go @@ -0,0 +1,137 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package chart + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/goharbor/harbor/src/chartserver" + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/core/middlewares/util" + "github.com/goharbor/harbor/src/pkg/types" + htesting "github.com/goharbor/harbor/src/testing" + "github.com/stretchr/testify/suite" +) + +func deleteChartVersion(projectName, chartName, version string) { + url := fmt.Sprintf("/api/chartrepo/%s/charts/%s/%s", projectName, chartName, version) + req, _ := http.NewRequest(http.MethodDelete, url, nil) + + next := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + rr := httptest.NewRecorder() + h := New(next) + h.ServeHTTP(util.NewCustomResponseWriter(rr), req) +} + +func uploadChartVersion(projectID int64, projectName, chartName, version string) { + url := fmt.Sprintf("/api/chartrepo/%s/charts/", projectName) + req, _ := http.NewRequest(http.MethodPost, url, nil) + + info := &util.ChartVersionInfo{ + ProjectID: projectID, + Namespace: projectName, + ChartName: chartName, + Version: version, + } + *req = *req.WithContext(util.NewChartVersionInfoContext(req.Context(), info)) + + next := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusCreated) + }) + + rr := httptest.NewRecorder() + h := New(next) + h.ServeHTTP(util.NewCustomResponseWriter(rr), req) +} + +func mockChartController() (*httptest.Server, *chartserver.Controller, error) { + mockServer := httptest.NewServer(htesting.MockChartRepoHandler) + + var oldController, newController *chartserver.Controller + url, err := url.Parse(mockServer.URL) + if err == nil { + newController, err = chartserver.NewController(url) + } + + if err != nil { + mockServer.Close() + return nil, nil, err + } + + chartController() // Init chart controller + + // Override current controller and keep the old one for restoring + oldController = controller + controller = newController + + return mockServer, oldController, nil +} + +type HandlerSuite struct { + htesting.Suite + oldController *chartserver.Controller + mockChartServer *httptest.Server +} + +func (suite *HandlerSuite) SetupTest() { + mockServer, oldController, err := mockChartController() + suite.Nil(err, "Mock chart controller failed") + + suite.oldController = oldController + suite.mockChartServer = mockServer +} + +func (suite *HandlerSuite) TearDownTest() { + for _, table := range []string{ + "quota", "quota_usage", + } { + dao.ClearTable(table) + } + + controller = suite.oldController + suite.mockChartServer.Close() +} + +func (suite *HandlerSuite) TestUpload() { + suite.WithProject(func(projectID int64, projectName string) { + uploadChartVersion(projectID, projectName, "harbor", "0.2.1") + suite.AssertResourceUsage(1, types.ResourceCount, projectID) + + // harbor:0.2.0 exists in repo1, upload it again + uploadChartVersion(projectID, projectName, "harbor", "0.2.0") + suite.AssertResourceUsage(1, types.ResourceCount, projectID) + }, "repo1") +} + +func (suite *HandlerSuite) TestDelete() { + suite.WithProject(func(projectID int64, projectName string) { + uploadChartVersion(projectID, projectName, "harbor", "0.2.1") + suite.AssertResourceUsage(1, types.ResourceCount, projectID) + + deleteChartVersion(projectName, "harbor", "0.2.1") + suite.AssertResourceUsage(0, types.ResourceCount, projectID) + }, "repo1") +} + +func TestRunHandlerSuite(t *testing.T) { + suite.Run(t, new(HandlerSuite)) +} diff --git a/src/core/middlewares/chart/util.go b/src/core/middlewares/chart/util.go new file mode 100644 index 000000000..03b899498 --- /dev/null +++ b/src/core/middlewares/chart/util.go @@ -0,0 +1,116 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package chart + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + + "github.com/goharbor/harbor/src/chartserver" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/core/middlewares/util" + "github.com/goharbor/harbor/src/pkg/types" + "k8s.io/helm/pkg/chartutil" + "k8s.io/helm/pkg/proto/hapi/chart" +) + +const ( + formFieldNameForChart = "chart" +) + +var ( + controller *chartserver.Controller + controllerErr error + controllerOnce sync.Once +) + +func chartController() (*chartserver.Controller, error) { + controllerOnce.Do(func() { + addr, err := config.GetChartMuseumEndpoint() + if err != nil { + controllerErr = fmt.Errorf("failed to get the endpoint URL of chart storage server: %s", err.Error()) + return + } + + addr = strings.TrimSuffix(addr, "/") + url, err := url.Parse(addr) + if err != nil { + controllerErr = errors.New("endpoint URL of chart storage server is malformed") + return + } + + ctr, err := chartserver.NewController(url) + if err != nil { + controllerErr = errors.New("failed to initialize chart API controller") + } + + controller = ctr + + log.Debugf("Chart storage server is set to %s", url.String()) + log.Info("API controller for chart repository server is successfully initialized") + }) + + return controller, controllerErr +} + +func chartVersionExists(namespace, chartName, version string) bool { + ctr, err := chartController() + if err != nil { + return false + } + + chartVersion, err := ctr.GetChartVersion(namespace, chartName, version) + if err != nil { + log.Debugf("Get chart %s of version %s in namespace %s failed, error: %v", chartName, version, namespace, err) + return false + } + + return !chartVersion.Removed +} + +// computeResourcesForChartVersionCreation returns count resource required for the chart package +// no count required if the chart package of version exists in project +func computeResourcesForChartVersionCreation(req *http.Request) (types.ResourceList, error) { + info, ok := util.ChartVersionInfoFromContext(req.Context()) + if !ok { + return nil, errors.New("chart version info missing") + } + + if chartVersionExists(info.Namespace, info.ChartName, info.Version) { + log.Debugf("Chart %s with version %s in namespace %s exists", info.ChartName, info.Version, info.Namespace) + return nil, nil + } + + return types.ResourceList{types.ResourceCount: 1}, nil +} + +func parseChart(req *http.Request) (*chart.Chart, error) { + chartFile, _, err := req.FormFile(formFieldNameForChart) + if err != nil { + return nil, err + } + + chart, err := chartutil.LoadArchive(chartFile) + if err != nil { + return nil, fmt.Errorf("load chart from archive failed: %s", err.Error()) + } + + return chart, nil +} diff --git a/src/core/middlewares/config.go b/src/core/middlewares/config.go new file mode 100644 index 000000000..8f0dcb3c0 --- /dev/null +++ b/src/core/middlewares/config.go @@ -0,0 +1,37 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middlewares + +// const variables +const ( + CHART = "chart" + READONLY = "readonly" + URL = "url" + MUITIPLEMANIFEST = "manifest" + LISTREPO = "listrepo" + CONTENTTRUST = "contenttrust" + VULNERABLE = "vulnerable" + SIZEQUOTA = "sizequota" + COUNTQUOTA = "countquota" +) + +// ChartMiddlewares middlewares for chart server +var ChartMiddlewares = []string{CHART} + +// Middlewares with sequential organization +var Middlewares = []string{READONLY, URL, MUITIPLEMANIFEST, LISTREPO, CONTENTTRUST, VULNERABLE, SIZEQUOTA, COUNTQUOTA} + +// MiddlewaresLocal ... +var MiddlewaresLocal = []string{SIZEQUOTA, COUNTQUOTA} diff --git a/src/core/middlewares/contenttrust/handler.go b/src/core/middlewares/contenttrust/handler.go new file mode 100644 index 000000000..bcc4de44a --- /dev/null +++ b/src/core/middlewares/contenttrust/handler.go @@ -0,0 +1,101 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package contenttrust + +import ( + "github.com/goharbor/harbor/src/common/utils" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/common/utils/notary" + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/core/middlewares/util" + "net/http" +) + +// NotaryEndpoint ... +var NotaryEndpoint = "" + +type contentTrustHandler struct { + next http.Handler +} + +// New ... +func New(next http.Handler) http.Handler { + return &contentTrustHandler{ + next: next, + } +} + +// ServeHTTP ... +func (cth contentTrustHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + imgRaw := req.Context().Value(util.ImageInfoCtxKey) + if imgRaw == nil || !config.WithNotary() { + cth.next.ServeHTTP(rw, req) + return + } + img, _ := req.Context().Value(util.ImageInfoCtxKey).(util.ImageInfo) + if img.Digest == "" { + cth.next.ServeHTTP(rw, req) + return + } + if !util.GetPolicyChecker().ContentTrustEnabled(img.ProjectName) { + cth.next.ServeHTTP(rw, req) + return + } + match, err := matchNotaryDigest(img) + if err != nil { + http.Error(rw, util.MarshalError("PROJECT_POLICY_VIOLATION", "Failed in communication with Notary please check the log"), http.StatusInternalServerError) + return + } + if !match { + log.Debugf("digest mismatch, failing the response.") + http.Error(rw, util.MarshalError("PROJECT_POLICY_VIOLATION", "The image is not signed in Notary."), http.StatusPreconditionFailed) + return + } + cth.next.ServeHTTP(rw, req) +} + +func matchNotaryDigest(img util.ImageInfo) (bool, error) { + if NotaryEndpoint == "" { + NotaryEndpoint = config.InternalNotaryEndpoint() + } + targets, err := notary.GetInternalTargets(NotaryEndpoint, util.TokenUsername, img.Repository) + if err != nil { + return false, err + } + for _, t := range targets { + if utils.IsDigest(img.Reference) { + d, err := notary.DigestFromTarget(t) + if err != nil { + return false, err + } + if img.Digest == d { + return true, nil + } + } else { + if t.Tag == img.Reference { + log.Debugf("found reference: %s in notary, try to match digest.", img.Reference) + d, err := notary.DigestFromTarget(t) + if err != nil { + return false, err + } + if img.Digest == d { + return true, nil + } + } + } + } + log.Debugf("image: %#v, not found in notary", img) + return false, nil +} diff --git a/src/core/middlewares/contenttrust/handler_test.go b/src/core/middlewares/contenttrust/handler_test.go new file mode 100644 index 000000000..d7767cac1 --- /dev/null +++ b/src/core/middlewares/contenttrust/handler_test.go @@ -0,0 +1,63 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package contenttrust + +import ( + "github.com/goharbor/harbor/src/common" + notarytest "github.com/goharbor/harbor/src/common/utils/notary/test" + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/core/middlewares/util" + "github.com/stretchr/testify/assert" + "net/http/httptest" + "os" + "testing" +) + +var endpoint = "10.117.4.142" +var notaryServer *httptest.Server + +var admiralEndpoint = "http://127.0.0.1:8282" +var token = "" + +func TestMain(m *testing.M) { + notaryServer = notarytest.NewNotaryServer(endpoint) + defer notaryServer.Close() + NotaryEndpoint = notaryServer.URL + var defaultConfig = map[string]interface{}{ + common.ExtEndpoint: "https://" + endpoint, + common.WithNotary: true, + common.TokenExpiration: 30, + } + config.InitWithSettings(defaultConfig) + result := m.Run() + if result != 0 { + os.Exit(result) + } +} + +func TestMatchNotaryDigest(t *testing.T) { + assert := assert.New(t) + // The data from common/utils/notary/helper_test.go + img1 := util.ImageInfo{Repository: "notary-demo/busybox", Reference: "1.0", ProjectName: "notary-demo", Digest: "sha256:1359608115b94599e5641638bac5aef1ddfaa79bb96057ebf41ebc8d33acf8a7"} + img2 := util.ImageInfo{Repository: "notary-demo/busybox", Reference: "2.0", ProjectName: "notary-demo", Digest: "sha256:12345678"} + + res1, err := matchNotaryDigest(img1) + assert.Nil(err, "Unexpected error: %v, image: %#v", err, img1) + assert.True(res1) + + res2, err := matchNotaryDigest(img2) + assert.Nil(err, "Unexpected error: %v, image: %#v, take 2", err, img2) + assert.False(res2) +} diff --git a/src/core/middlewares/countquota/builder.go b/src/core/middlewares/countquota/builder.go new file mode 100644 index 000000000..089c4a5d6 --- /dev/null +++ b/src/core/middlewares/countquota/builder.go @@ -0,0 +1,100 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package countquota + +import ( + "fmt" + "net/http" + "strconv" + + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/core/middlewares/interceptor" + "github.com/goharbor/harbor/src/core/middlewares/interceptor/quota" + "github.com/goharbor/harbor/src/core/middlewares/util" +) + +var ( + defaultBuilders = []interceptor.Builder{ + &manifestDeletionBuilder{}, + &manifestCreationBuilder{}, + } +) + +type manifestDeletionBuilder struct{} + +func (*manifestDeletionBuilder) Build(req *http.Request) (interceptor.Interceptor, error) { + if match, _, _ := util.MatchDeleteManifest(req); !match { + return nil, nil + } + + info, ok := util.ManifestInfoFromContext(req.Context()) + if !ok { + var err error + info, err = util.ParseManifestInfoFromPath(req) + if err != nil { + return nil, fmt.Errorf("failed to parse manifest, error %v", err) + } + + // Manifest info will be used by computeResourcesForDeleteManifest + *req = *(req.WithContext(util.NewManifestInfoContext(req.Context(), info))) + } + + opts := []quota.Option{ + quota.EnforceResources(config.QuotaPerProjectEnable()), + quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)), + quota.WithAction(quota.SubtractAction), + quota.StatusCode(http.StatusAccepted), + quota.MutexKeys(info.MutexKey("count")), + quota.OnResources(computeResourcesForManifestDeletion), + quota.OnFulfilled(func(http.ResponseWriter, *http.Request) error { + return dao.DeleteArtifactByDigest(info.ProjectID, info.Repository, info.Digest) + }), + } + + return quota.New(opts...), nil +} + +type manifestCreationBuilder struct{} + +func (*manifestCreationBuilder) Build(req *http.Request) (interceptor.Interceptor, error) { + if match, _, _ := util.MatchPushManifest(req); !match { + return nil, nil + } + + info, ok := util.ManifestInfoFromContext(req.Context()) + if !ok { + var err error + info, err = util.ParseManifestInfo(req) + if err != nil { + return nil, fmt.Errorf("failed to parse manifest, error %v", err) + } + + // Manifest info will be used by computeResourcesForCreateManifest + *req = *(req.WithContext(util.NewManifestInfoContext(req.Context(), info))) + } + + opts := []quota.Option{ + quota.EnforceResources(config.QuotaPerProjectEnable()), + quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)), + quota.WithAction(quota.AddAction), + quota.StatusCode(http.StatusCreated), + quota.MutexKeys(info.MutexKey("count")), + quota.OnResources(computeResourcesForManifestCreation), + quota.OnFulfilled(afterManifestCreated), + } + + return quota.New(opts...), nil +} diff --git a/src/core/middlewares/countquota/handler.go b/src/core/middlewares/countquota/handler.go new file mode 100644 index 000000000..1b05a4cf5 --- /dev/null +++ b/src/core/middlewares/countquota/handler.go @@ -0,0 +1,83 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package countquota + +import ( + "fmt" + "net/http" + + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/middlewares/interceptor" + "github.com/goharbor/harbor/src/core/middlewares/util" +) + +type countQuotaHandler struct { + builders []interceptor.Builder + next http.Handler +} + +// New ... +func New(next http.Handler, builders ...interceptor.Builder) http.Handler { + if len(builders) == 0 { + builders = defaultBuilders + } + + return &countQuotaHandler{ + builders: builders, + next: next, + } +} + +// ServeHTTP manifest ... +func (h *countQuotaHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + interceptor, err := h.getInterceptor(req) + if err != nil { + log.Warningf("Error occurred when to handle request in count quota handler: %v", err) + http.Error(rw, util.MarshalError("InternalError", fmt.Sprintf("Error occurred when to handle request in count quota handler: %v", err)), + http.StatusInternalServerError) + return + } + + if interceptor == nil { + h.next.ServeHTTP(rw, req) + return + } + + if err := interceptor.HandleRequest(req); err != nil { + log.Warningf("Error occurred when to handle request in count quota handler: %v", err) + http.Error(rw, util.MarshalError("InternalError", fmt.Sprintf("Error occurred when to handle request in count quota handler: %v", err)), + http.StatusInternalServerError) + return + } + + h.next.ServeHTTP(rw, req) + + interceptor.HandleResponse(rw, req) +} + +func (h *countQuotaHandler) getInterceptor(req *http.Request) (interceptor.Interceptor, error) { + for _, builder := range h.builders { + interceptor, err := builder.Build(req) + if err != nil { + return nil, err + } + + if interceptor != nil { + return interceptor, nil + } + } + + return nil, nil +} diff --git a/src/core/middlewares/countquota/handler_test.go b/src/core/middlewares/countquota/handler_test.go new file mode 100644 index 000000000..a2ebb5a69 --- /dev/null +++ b/src/core/middlewares/countquota/handler_test.go @@ -0,0 +1,304 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package countquota + +import ( + "fmt" + "math/rand" + "net/http" + "net/http/httptest" + "os" + "testing" + "time" + + "github.com/docker/distribution" + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/core/middlewares/util" + "github.com/goharbor/harbor/src/pkg/types" + "github.com/opencontainers/go-digest" + "github.com/stretchr/testify/suite" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +func getProjectCountUsage(projectID int64) (int64, error) { + usage := models.QuotaUsage{Reference: "project", ReferenceID: fmt.Sprintf("%d", projectID)} + err := dao.GetOrmer().Read(&usage, "reference", "reference_id") + if err != nil { + return 0, err + } + used, err := types.NewResourceList(usage.Used) + if err != nil { + return 0, err + } + + return used[types.ResourceCount], nil +} + +func randomString(n int) string { + const letterBytes = "abcdefghijklmnopqrstuvwxyz" + + b := make([]byte, n) + for i := range b { + b[i] = letterBytes[rand.Intn(len(letterBytes))] + } + + return string(b) +} + +func doDeleteManifestRequest(projectID int64, projectName, name, dgt string, next ...http.HandlerFunc) int { + repository := fmt.Sprintf("%s/%s", projectName, name) + + url := fmt.Sprintf("/v2/%s/manifests/%s", repository, dgt) + req, _ := http.NewRequest("DELETE", url, nil) + + ctx := util.NewManifestInfoContext(req.Context(), &util.ManifestInfo{ + ProjectID: projectID, + Repository: repository, + Digest: dgt, + }) + + rr := httptest.NewRecorder() + + var n http.HandlerFunc + if len(next) > 0 { + n = next[0] + } else { + n = func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusAccepted) + } + } + + h := New(http.HandlerFunc(n)) + h.ServeHTTP(util.NewCustomResponseWriter(rr), req.WithContext(ctx)) + + return rr.Code +} + +func doPutManifestRequest(projectID int64, projectName, name, tag, dgt string, next ...http.HandlerFunc) int { + repository := fmt.Sprintf("%s/%s", projectName, name) + + url := fmt.Sprintf("/v2/%s/manifests/%s", repository, tag) + req, _ := http.NewRequest("PUT", url, nil) + + ctx := util.NewManifestInfoContext(req.Context(), &util.ManifestInfo{ + ProjectID: projectID, + Repository: repository, + Tag: tag, + Digest: dgt, + References: []distribution.Descriptor{ + {Digest: digest.FromString(randomString(15))}, + {Digest: digest.FromString(randomString(15))}, + }, + }) + + rr := httptest.NewRecorder() + + var n http.HandlerFunc + if len(next) > 0 { + n = next[0] + } else { + n = func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusCreated) + } + } + + h := New(http.HandlerFunc(n)) + h.ServeHTTP(util.NewCustomResponseWriter(rr), req.WithContext(ctx)) + + return rr.Code +} + +type HandlerSuite struct { + suite.Suite +} + +func (suite *HandlerSuite) addProject(projectName string) int64 { + projectID, err := dao.AddProject(models.Project{ + Name: projectName, + OwnerID: 1, + }) + + suite.Nil(err, fmt.Sprintf("Add project failed for %s", projectName)) + + return projectID +} + +func (suite *HandlerSuite) checkCountUsage(expected, projectID int64) { + count, err := getProjectCountUsage(projectID) + suite.Nil(err, fmt.Sprintf("Failed to get count usage of project %d, error: %v", projectID, err)) + suite.Equal(expected, count, "Failed to check count usage for project %d", projectID) +} + +func (suite *HandlerSuite) TearDownTest() { + for _, table := range []string{ + "artifact", "blob", + "artifact_blob", "project_blob", + "quota", "quota_usage", + } { + dao.ClearTable(table) + } +} + +func (suite *HandlerSuite) TestPutManifestCreated() { + projectName := randomString(5) + + projectID := suite.addProject(projectName) + defer func() { + dao.DeleteProject(projectID) + }() + + dgt := digest.FromString(randomString(15)).String() + code := doPutManifestRequest(projectID, projectName, "photon", "latest", dgt) + suite.Equal(http.StatusCreated, code) + suite.checkCountUsage(1, projectID) + + total, err := dao.GetTotalOfArtifacts(&models.ArtifactQuery{Digest: dgt}) + suite.Nil(err) + suite.Equal(int64(1), total, "Artifact should be created") + + // Push the photon:latest with photon:dev + code = doPutManifestRequest(projectID, projectName, "photon", "dev", dgt) + suite.Equal(http.StatusCreated, code) + suite.checkCountUsage(2, projectID) + + total, err = dao.GetTotalOfArtifacts(&models.ArtifactQuery{Digest: dgt}) + suite.Nil(err) + suite.Equal(int64(2), total, "Artifact should be created") + + // Push the photon:latest with new image + newDgt := digest.FromString(randomString(15)).String() + code = doPutManifestRequest(projectID, projectName, "photon", "latest", newDgt) + suite.Equal(http.StatusCreated, code) + suite.checkCountUsage(2, projectID) + + total, err = dao.GetTotalOfArtifacts(&models.ArtifactQuery{Digest: newDgt}) + suite.Nil(err) + suite.Equal(int64(1), total, "Artifact should be updated") +} + +func (suite *HandlerSuite) TestPutManifestFailed() { + projectName := randomString(5) + + projectID := suite.addProject(projectName) + defer func() { + dao.DeleteProject(projectID) + }() + + next := func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + } + + dgt := digest.FromString(randomString(15)).String() + code := doPutManifestRequest(projectID, projectName, "photon", "latest", dgt, next) + suite.Equal(http.StatusInternalServerError, code) + suite.checkCountUsage(0, projectID) + + total, err := dao.GetTotalOfArtifacts(&models.ArtifactQuery{Digest: dgt}) + suite.Nil(err) + suite.Equal(int64(0), total, "Artifact should not be created") +} + +func (suite *HandlerSuite) TestDeleteManifestAccepted() { + projectName := randomString(5) + + projectID := suite.addProject(projectName) + defer func() { + dao.DeleteProject(projectID) + }() + + dgt := digest.FromString(randomString(15)).String() + code := doPutManifestRequest(projectID, projectName, "photon", "latest", dgt) + suite.Equal(http.StatusCreated, code) + suite.checkCountUsage(1, projectID) + + code = doDeleteManifestRequest(projectID, projectName, "photon", dgt) + suite.Equal(http.StatusAccepted, code) + suite.checkCountUsage(0, projectID) +} + +func (suite *HandlerSuite) TestDeleteManifestFailed() { + projectName := randomString(5) + + projectID := suite.addProject(projectName) + defer func() { + dao.DeleteProject(projectID) + }() + + dgt := digest.FromString(randomString(15)).String() + code := doPutManifestRequest(projectID, projectName, "photon", "latest", dgt) + suite.Equal(http.StatusCreated, code) + suite.checkCountUsage(1, projectID) + + next := func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + } + + code = doDeleteManifestRequest(projectID, projectName, "photon", dgt, next) + suite.Equal(http.StatusInternalServerError, code) + suite.checkCountUsage(1, projectID) +} + +func (suite *HandlerSuite) TestDeleteManifestInMultiProjects() { + projectName := randomString(5) + + projectID := suite.addProject(projectName) + defer func() { + dao.DeleteProject(projectID) + }() + + dgt := digest.FromString(randomString(15)).String() + code := doPutManifestRequest(projectID, projectName, "photon", "latest", dgt) + suite.Equal(http.StatusCreated, code) + suite.checkCountUsage(1, projectID) + + { + projectName := randomString(5) + + projectID := suite.addProject(projectName) + defer func() { + dao.DeleteProject(projectID) + }() + + code := doPutManifestRequest(projectID, projectName, "photon", "latest", dgt) + suite.Equal(http.StatusCreated, code) + suite.checkCountUsage(1, projectID) + + code = doDeleteManifestRequest(projectID, projectName, "photon", dgt) + suite.Equal(http.StatusAccepted, code) + suite.checkCountUsage(0, projectID) + } + + code = doDeleteManifestRequest(projectID, projectName, "photon", dgt) + suite.Equal(http.StatusAccepted, code) + suite.checkCountUsage(0, projectID) +} + +func TestMain(m *testing.M) { + config.Init() + dao.PrepareTestForPostgresSQL() + + if result := m.Run(); result != 0 { + os.Exit(result) + } +} + +func TestRunHandlerSuite(t *testing.T) { + suite.Run(t, new(HandlerSuite)) +} diff --git a/src/core/middlewares/countquota/util.go b/src/core/middlewares/countquota/util.go new file mode 100644 index 000000000..8275cb7ae --- /dev/null +++ b/src/core/middlewares/countquota/util.go @@ -0,0 +1,118 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package countquota + +import ( + "errors" + "fmt" + "net/http" + "strings" + + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/quota" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/middlewares/util" + "github.com/goharbor/harbor/src/pkg/types" +) + +// computeResourcesForManifestCreation returns count resource required for manifest +// no count required if the tag of the repository exists in the project +func computeResourcesForManifestCreation(req *http.Request) (types.ResourceList, error) { + info, ok := util.ManifestInfoFromContext(req.Context()) + if !ok { + return nil, errors.New("manifest info missing") + } + + // only count quota required when push new tag + if info.IsNewTag() { + return quota.ResourceList{quota.ResourceCount: 1}, nil + } + + return nil, nil +} + +// computeResourcesForManifestDeletion returns count resource will be released when manifest deleted +// then result will be the sum of manifest count of the same repository in the project +func computeResourcesForManifestDeletion(req *http.Request) (types.ResourceList, error) { + info, ok := util.ManifestInfoFromContext(req.Context()) + if !ok { + return nil, errors.New("manifest info missing") + } + + total, err := dao.GetTotalOfArtifacts(&models.ArtifactQuery{ + PID: info.ProjectID, + Repo: info.Repository, + Digest: info.Digest, + }) + + if err != nil { + return nil, fmt.Errorf("error occurred when get artifacts %v ", err) + } + + return types.ResourceList{types.ResourceCount: total}, nil +} + +// afterManifestCreated the handler after manifest created success +// it will create or update the artifact info in db, and then attach blobs to artifact +func afterManifestCreated(w http.ResponseWriter, req *http.Request) error { + info, ok := util.ManifestInfoFromContext(req.Context()) + if !ok { + return errors.New("manifest info missing") + } + + artifact := info.Artifact() + if artifact.ID == 0 { + if _, err := dao.AddArtifact(artifact); err != nil { + return fmt.Errorf("error to add artifact, %v", err) + } + } else { + if err := dao.UpdateArtifact(artifact); err != nil { + return fmt.Errorf("error to update artifact, %v", err) + } + } + + return attachBlobsToArtifact(info) +} + +// attachBlobsToArtifact attach the blobs which from manifest to artifact +func attachBlobsToArtifact(info *util.ManifestInfo) error { + self := &models.ArtifactAndBlob{ + DigestAF: info.Digest, + DigestBlob: info.Digest, + } + + artifactBlobs := append([]*models.ArtifactAndBlob{}, self) + + for _, reference := range info.References { + artifactBlob := &models.ArtifactAndBlob{ + DigestAF: info.Digest, + DigestBlob: reference.Digest.String(), + } + + artifactBlobs = append(artifactBlobs, artifactBlob) + } + + if err := dao.AddArtifactNBlobs(artifactBlobs); err != nil { + if strings.Contains(err.Error(), dao.ErrDupRows.Error()) { + log.Warning("the artifact and blobs have already in the DB, it maybe an existing image with different tag") + return nil + } + + return fmt.Errorf("error to add artifact and blobs in proxy response handler, %v", err) + } + + return nil +} diff --git a/src/core/middlewares/inlet.go b/src/core/middlewares/inlet.go new file mode 100644 index 000000000..3e0f30eef --- /dev/null +++ b/src/core/middlewares/inlet.go @@ -0,0 +1,57 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middlewares + +import ( + "errors" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/filter" + "github.com/goharbor/harbor/src/core/middlewares/registryproxy" + "github.com/goharbor/harbor/src/core/middlewares/util" + "net/http" +) + +var head http.Handler +var proxy http.Handler + +// Init initialize the Proxy instance and handler chain. +func Init() error { + proxy = registryproxy.New() + if proxy == nil { + return errors.New("get nil when to create proxy") + } + return nil +} + +// Handle handles the request. +func Handle(rw http.ResponseWriter, req *http.Request) { + securityCtx, err := filter.GetSecurityContext(req) + if err != nil { + log.Errorf("failed to get security context in middlerware: %v", err) + // error to get security context, use the default chain. + head = New(Middlewares).Create().Then(proxy) + } else { + // true: the request is from 127.0.0.1, only quota middlewares are applied to request + // false: the request is from outside, all of middlewares are applied to the request. + if securityCtx.IsSolutionUser() { + head = New(MiddlewaresLocal).Create().Then(proxy) + } else { + head = New(Middlewares).Create().Then(proxy) + } + } + + customResW := util.NewCustomResponseWriter(rw) + head.ServeHTTP(customResW, req) +} diff --git a/src/core/middlewares/interceptor/interceptor.go b/src/core/middlewares/interceptor/interceptor.go new file mode 100644 index 000000000..ab8cf6ec6 --- /dev/null +++ b/src/core/middlewares/interceptor/interceptor.go @@ -0,0 +1,48 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interceptor + +import ( + "net/http" +) + +// Builder interceptor builder +type Builder interface { + // Build build interceptor from http.Request + // (nil, nil) must be returned if builder not match the request + Build(*http.Request) (Interceptor, error) +} + +// Interceptor interceptor for middleware +type Interceptor interface { + // HandleRequest ... + HandleRequest(*http.Request) error + + // HandleResponse won't return any error + HandleResponse(http.ResponseWriter, *http.Request) +} + +// ResponseInterceptorFunc ... +type ResponseInterceptorFunc func(w http.ResponseWriter, r *http.Request) + +// HandleRequest no-op HandleRequest +func (f ResponseInterceptorFunc) HandleRequest(*http.Request) error { + return nil +} + +// HandleResponse calls f(w, r). +func (f ResponseInterceptorFunc) HandleResponse(w http.ResponseWriter, r *http.Request) { + f(w, r) +} diff --git a/src/core/middlewares/interceptor/quota/options.go b/src/core/middlewares/interceptor/quota/options.go new file mode 100644 index 000000000..ddf102a74 --- /dev/null +++ b/src/core/middlewares/interceptor/quota/options.go @@ -0,0 +1,150 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package quota + +import ( + "net/http" + + "github.com/goharbor/harbor/src/common/quota" + "github.com/goharbor/harbor/src/pkg/types" +) + +// Option ... +type Option func(*Options) + +// Action ... +type Action string + +const ( + // AddAction action to add resources + AddAction Action = "add" + // SubtractAction action to subtract resources + SubtractAction Action = "subtract" +) + +// Options ... +type Options struct { + enforceResources *bool + + Action Action + Manager *quota.Manager + MutexKeys []string + Resources types.ResourceList + StatusCode int + + OnResources func(*http.Request) (types.ResourceList, error) + OnFulfilled func(http.ResponseWriter, *http.Request) error + OnRejected func(http.ResponseWriter, *http.Request) error + OnFinally func(http.ResponseWriter, *http.Request) error +} + +// EnforceResources ... +func (opts *Options) EnforceResources() bool { + return opts.enforceResources != nil && *opts.enforceResources +} + +func boolPtr(v bool) *bool { + return &v +} + +func newOptions(opt ...Option) Options { + opts := Options{} + + for _, o := range opt { + o(&opts) + } + + if opts.Action == "" { + opts.Action = AddAction + } + + if opts.StatusCode == 0 { + opts.StatusCode = http.StatusOK + } + + if opts.enforceResources == nil { + opts.enforceResources = boolPtr(true) + } + + return opts +} + +// EnforceResources sets the interceptor enforceResources +func EnforceResources(enforceResources bool) Option { + return func(o *Options) { + o.enforceResources = boolPtr(enforceResources) + } +} + +// WithAction sets the interceptor action +func WithAction(a Action) Option { + return func(o *Options) { + o.Action = a + } +} + +// Manager sets the interceptor manager +func Manager(m *quota.Manager) Option { + return func(o *Options) { + o.Manager = m + } +} + +// WithManager sets the interceptor manager by reference and referenceID +func WithManager(reference, referenceID string) Option { + return func(o *Options) { + m, err := quota.NewManager(reference, referenceID) + if err != nil { + return + } + + o.Manager = m + } +} + +// MutexKeys set the interceptor mutex keys +func MutexKeys(keys ...string) Option { + return func(o *Options) { + o.MutexKeys = keys + } +} + +// Resources set the interceptor resources +func Resources(r types.ResourceList) Option { + return func(o *Options) { + o.Resources = r + } +} + +// StatusCode set the interceptor status code +func StatusCode(c int) Option { + return func(o *Options) { + o.StatusCode = c + } +} + +// OnResources sets the interceptor on resources function +func OnResources(f func(*http.Request) (types.ResourceList, error)) Option { + return func(o *Options) { + o.OnResources = f + } +} + +// OnFulfilled set the success handler for interceptor +func OnFulfilled(f func(http.ResponseWriter, *http.Request) error) Option { + return func(o *Options) { + o.OnFulfilled = f + } +} diff --git a/src/core/middlewares/interceptor/quota/quota.go b/src/core/middlewares/interceptor/quota/quota.go new file mode 100644 index 000000000..607f58dde --- /dev/null +++ b/src/core/middlewares/interceptor/quota/quota.go @@ -0,0 +1,189 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package quota + +import ( + "fmt" + "net/http" + + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/common/utils/redis" + "github.com/goharbor/harbor/src/core/middlewares/interceptor" + "github.com/goharbor/harbor/src/pkg/types" +) + +// New .... +func New(opts ...Option) interceptor.Interceptor { + options := newOptions(opts...) + + return "aInterceptor{opts: &options} +} + +type statusRecorder interface { + Status() int +} + +type quotaInterceptor struct { + opts *Options + resources types.ResourceList + mutexes []*redis.Mutex +} + +// HandleRequest ... +func (qi *quotaInterceptor) HandleRequest(req *http.Request) (err error) { + defer func() { + if err != nil { + qi.freeMutexes() + } + }() + + err = qi.requireMutexes() + if err != nil { + return + } + + err = qi.computeResources(req) + if err != nil { + return + } + + err = qi.reserve() + if err != nil { + log.Errorf("Failed to %s resources, error: %v", qi.opts.Action, err) + } + + return +} + +// HandleResponse ... +func (qi *quotaInterceptor) HandleResponse(w http.ResponseWriter, req *http.Request) { + defer qi.freeMutexes() + + sr, ok := w.(statusRecorder) + if !ok { + return + } + + opts := qi.opts + + switch sr.Status() { + case opts.StatusCode: + if opts.OnFulfilled != nil { + if err := opts.OnFulfilled(w, req); err != nil { + log.Errorf("Failed to handle on fulfilled, error: %v", err) + } + } + default: + if err := qi.unreserve(); err != nil { + log.Errorf("Failed to %s resources, error: %v", opts.Action, err) + } + + if opts.OnRejected != nil { + if err := opts.OnRejected(w, req); err != nil { + log.Errorf("Failed to handle on rejected, error: %v", err) + } + } + } + + if opts.OnFinally != nil { + if err := opts.OnFinally(w, req); err != nil { + log.Errorf("Failed to handle on finally, error: %v", err) + } + } +} + +func (qi *quotaInterceptor) requireMutexes() error { + if !qi.opts.EnforceResources() { + // Do nothing for locks when quota interceptor not enforce resources + return nil + } + + for _, key := range qi.opts.MutexKeys { + m, err := redis.RequireLock(key) + if err != nil { + return err + } + qi.mutexes = append(qi.mutexes, m) + } + + return nil +} + +func (qi *quotaInterceptor) freeMutexes() { + for i := len(qi.mutexes) - 1; i >= 0; i-- { + if err := redis.FreeLock(qi.mutexes[i]); err != nil { + log.Error(err) + } + } +} + +func (qi *quotaInterceptor) computeResources(req *http.Request) error { + if !qi.opts.EnforceResources() { + // Do nothing in compute resources when quota interceptor not enforce resources + return nil + } + + qi.resources = qi.opts.Resources + if len(qi.resources) == 0 && qi.opts.OnResources != nil { + resources, err := qi.opts.OnResources(req) + if err != nil { + return fmt.Errorf("failed to compute the resources for quota, error: %v", err) + } + + qi.resources = resources + } + + return nil +} + +func (qi *quotaInterceptor) reserve() error { + if !qi.opts.EnforceResources() { + // Do nothing in reserve resources when quota interceptor not enforce resources + return nil + } + + if len(qi.resources) == 0 { + return nil + } + + switch qi.opts.Action { + case AddAction: + return qi.opts.Manager.AddResources(qi.resources) + case SubtractAction: + return qi.opts.Manager.SubtractResources(qi.resources) + } + + return nil +} + +func (qi *quotaInterceptor) unreserve() error { + if !qi.opts.EnforceResources() { + // Do nothing in unreserve resources when quota interceptor not enforce resources + return nil + } + + if len(qi.resources) == 0 { + return nil + } + + switch qi.opts.Action { + case AddAction: + return qi.opts.Manager.SubtractResources(qi.resources) + case SubtractAction: + return qi.opts.Manager.AddResources(qi.resources) + } + + return nil +} diff --git a/src/core/middlewares/interface.go b/src/core/middlewares/interface.go new file mode 100644 index 000000000..4ca772f43 --- /dev/null +++ b/src/core/middlewares/interface.go @@ -0,0 +1,22 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middlewares + +import "github.com/justinas/alice" + +// ChainCreator ... +type ChainCreator interface { + Create(middlewares []string) *alice.Chain +} diff --git a/src/core/middlewares/listrepo/handler.go b/src/core/middlewares/listrepo/handler.go new file mode 100644 index 000000000..9cc2a2ae0 --- /dev/null +++ b/src/core/middlewares/listrepo/handler.go @@ -0,0 +1,104 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package listrepo + +import ( + "encoding/json" + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/middlewares/util" + "net/http" + "net/http/httptest" + "regexp" + "strconv" +) + +const ( + catalogURLPattern = `/v2/_catalog` +) + +type listReposHandler struct { + next http.Handler +} + +// New ... +func New(next http.Handler) http.Handler { + return &listReposHandler{ + next: next, + } +} + +// ServeHTTP ... +func (lrh listReposHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + var rec *httptest.ResponseRecorder + listReposFlag := matchListRepos(req) + if listReposFlag { + rec = httptest.NewRecorder() + lrh.next.ServeHTTP(rec, req) + if rec.Result().StatusCode != http.StatusOK { + util.CopyResp(rec, rw) + return + } + var ctlg struct { + Repositories []string `json:"repositories"` + } + decoder := json.NewDecoder(rec.Body) + if err := decoder.Decode(&ctlg); err != nil { + log.Errorf("Decode repositories error: %v", err) + util.CopyResp(rec, rw) + return + } + var entries []string + for repo := range ctlg.Repositories { + log.Debugf("the repo in the response %s", ctlg.Repositories[repo]) + exist := dao.RepositoryExists(ctlg.Repositories[repo]) + if exist { + entries = append(entries, ctlg.Repositories[repo]) + } + } + type Repos struct { + Repositories []string `json:"repositories"` + } + resp := &Repos{Repositories: entries} + respJSON, err := json.Marshal(resp) + if err != nil { + log.Errorf("Encode repositories error: %v", err) + util.CopyResp(rec, rw) + return + } + + for k, v := range rec.Header() { + rw.Header()[k] = v + } + clen := len(respJSON) + rw.Header().Set(http.CanonicalHeaderKey("Content-Length"), strconv.Itoa(clen)) + rw.Write(respJSON) + return + } + lrh.next.ServeHTTP(rw, req) +} + +// matchListRepos checks if the request looks like a request to list repositories. +func matchListRepos(req *http.Request) bool { + if req.Method != http.MethodGet { + return false + } + re := regexp.MustCompile(catalogURLPattern) + s := re.FindStringSubmatch(req.URL.Path) + if len(s) == 1 { + return true + } + return false +} diff --git a/src/core/middlewares/listrepo/handler_test.go b/src/core/middlewares/listrepo/handler_test.go new file mode 100644 index 000000000..70bbbeaf9 --- /dev/null +++ b/src/core/middlewares/listrepo/handler_test.go @@ -0,0 +1,37 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package listrepo + +import ( + "github.com/stretchr/testify/assert" + "net/http" + "testing" +) + +func TestMatchListRepos(t *testing.T) { + assert := assert.New(t) + req1, _ := http.NewRequest("POST", "http://127.0.0.1:5000/v2/_catalog", nil) + res1 := matchListRepos(req1) + assert.False(res1, "%s %v is not a request to list repos", req1.Method, req1.URL) + + req2, _ := http.NewRequest("GET", "http://127.0.0.1:5000/v2/_catalog", nil) + res2 := matchListRepos(req2) + assert.True(res2, "%s %v is a request to list repos", req2.Method, req2.URL) + + req3, _ := http.NewRequest("GET", "https://192.168.0.5:443/v1/_catalog", nil) + res3 := matchListRepos(req3) + assert.False(res3, "%s %v is not a request to pull manifest", req3.Method, req3.URL) + +} diff --git a/src/core/middlewares/multiplmanifest/handler.go b/src/core/middlewares/multiplmanifest/handler.go new file mode 100644 index 000000000..d0126696c --- /dev/null +++ b/src/core/middlewares/multiplmanifest/handler.go @@ -0,0 +1,48 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package multiplmanifest + +import ( + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/middlewares/util" + "net/http" + "strings" +) + +type multipleManifestHandler struct { + next http.Handler +} + +// New ... +func New(next http.Handler) http.Handler { + return &multipleManifestHandler{ + next: next, + } +} + +// ServeHTTP The handler is responsible for blocking request to upload manifest list by docker client, which is not supported so far by Harbor. +func (mh multipleManifestHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + match, _, _ := util.MatchPushManifest(req) + if match { + contentType := req.Header.Get("Content-type") + // application/vnd.docker.distribution.manifest.list.v2+json + if strings.Contains(contentType, "manifest.list.v2") { + log.Debugf("Content-type: %s is not supported, failing the response.", contentType) + http.Error(rw, util.MarshalError("UNSUPPORTED_MEDIA_TYPE", "Manifest.list is not supported."), http.StatusUnsupportedMediaType) + return + } + } + mh.next.ServeHTTP(rw, req) +} diff --git a/src/core/middlewares/readonly/hanlder.go b/src/core/middlewares/readonly/hanlder.go new file mode 100644 index 000000000..be77ac285 --- /dev/null +++ b/src/core/middlewares/readonly/hanlder.go @@ -0,0 +1,45 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package readonly + +import ( + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/core/middlewares/util" + "net/http" +) + +type readonlyHandler struct { + next http.Handler +} + +// New ... +func New(next http.Handler) http.Handler { + return &readonlyHandler{ + next: next, + } +} + +// ServeHTTP ... +func (rh readonlyHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + if config.ReadOnly() { + if req.Method == http.MethodDelete || req.Method == http.MethodPost || req.Method == http.MethodPatch || req.Method == http.MethodPut { + log.Warningf("The request is prohibited in readonly mode, url is: %s", req.URL.Path) + http.Error(rw, util.MarshalError("DENIED", "The system is in read only mode. Any modification is prohibited."), http.StatusForbidden) + return + } + } + rh.next.ServeHTTP(rw, req) +} diff --git a/src/core/middlewares/registryproxy/handler.go b/src/core/middlewares/registryproxy/handler.go new file mode 100644 index 000000000..72a9f02f0 --- /dev/null +++ b/src/core/middlewares/registryproxy/handler.go @@ -0,0 +1,61 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package registryproxy + +import ( + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/config" + "net/http" + "net/http/httputil" + "net/url" +) + +type proxyHandler struct { + handler http.Handler +} + +// New ... +func New(urls ...string) http.Handler { + var registryURL string + var err error + if len(urls) > 1 { + log.Errorf("the parm, urls should have only 0 or 1 elements") + return nil + } + if len(urls) == 0 { + registryURL, err = config.RegistryURL() + if err != nil { + log.Error(err) + return nil + } + } else { + registryURL = urls[0] + } + targetURL, err := url.Parse(registryURL) + if err != nil { + log.Error(err) + return nil + } + + return &proxyHandler{ + handler: httputil.NewSingleHostReverseProxy(targetURL), + } + +} + +// ServeHTTP ... +func (ph proxyHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + ph.handler.ServeHTTP(rw, req) +} diff --git a/src/core/middlewares/sizequota/builder.go b/src/core/middlewares/sizequota/builder.go new file mode 100644 index 000000000..a6e1ecf92 --- /dev/null +++ b/src/core/middlewares/sizequota/builder.go @@ -0,0 +1,212 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sizequota + +import ( + "fmt" + "net/http" + "strconv" + + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/core/middlewares/interceptor" + "github.com/goharbor/harbor/src/core/middlewares/interceptor/quota" + "github.com/goharbor/harbor/src/core/middlewares/util" +) + +var ( + defaultBuilders = []interceptor.Builder{ + &blobStreamUploadBuilder{}, + &blobStorageQuotaBuilder{}, + &manifestCreationBuilder{}, + &manifestDeletionBuilder{}, + } +) + +// blobStreamUploadBuilder interceptor for PATCH /v2//blobs/uploads/ +type blobStreamUploadBuilder struct{} + +func (*blobStreamUploadBuilder) Build(req *http.Request) (interceptor.Interceptor, error) { + if !match(req, http.MethodPatch, blobUploadURLRe) { + return nil, nil + } + + s := blobUploadURLRe.FindStringSubmatch(req.URL.Path) + uuid := s[2] + + onResponse := func(w http.ResponseWriter, req *http.Request) { + size, err := parseUploadedBlobSize(w) + if err != nil { + log.Errorf("failed to parse uploaded blob size for upload %s", uuid) + return + } + + ok, err := setUploadedBlobSize(uuid, size) + if err != nil { + log.Errorf("failed to update blob update size for upload %s, error: %v", uuid, err) + return + } + + if !ok { + // ToDo discuss what to do here. + log.Errorf("fail to set bunk: %s size: %d in redis, it causes unable to set correct quota for the artifact", uuid, size) + } + } + + return interceptor.ResponseInterceptorFunc(onResponse), nil +} + +// blobStorageQuotaBuilder interceptor builder for these requests +// PUT /v2//blobs/uploads/?digest= +// POST /v2//blobs/uploads/?mount=&from= +type blobStorageQuotaBuilder struct{} + +func (*blobStorageQuotaBuilder) Build(req *http.Request) (interceptor.Interceptor, error) { + parseBlobInfo := getBlobInfoParser(req) + if parseBlobInfo == nil { + return nil, nil + } + + info, err := parseBlobInfo(req) + if err != nil { + return nil, err + } + + // replace req with blob info context + *req = *(req.WithContext(util.NewBlobInfoContext(req.Context(), info))) + + opts := []quota.Option{ + quota.EnforceResources(config.QuotaPerProjectEnable()), + quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)), + quota.WithAction(quota.AddAction), + quota.StatusCode(http.StatusCreated), // NOTICE: mount blob and blob upload complete both return 201 when success + quota.OnResources(computeResourcesForBlob), + quota.MutexKeys(info.MutexKey()), + quota.OnFulfilled(func(http.ResponseWriter, *http.Request) error { + return syncBlobInfoToProject(info) + }), + } + + return quota.New(opts...), nil +} + +// manifestCreationBuilder interceptor builder for the request PUT /v2//manifests/ +type manifestCreationBuilder struct{} + +func (*manifestCreationBuilder) Build(req *http.Request) (interceptor.Interceptor, error) { + if match, _, _ := util.MatchPushManifest(req); !match { + return nil, nil + } + + info, err := util.ParseManifestInfo(req) + if err != nil { + return nil, err + } + + // Replace request with manifests info context + *req = *req.WithContext(util.NewManifestInfoContext(req.Context(), info)) + + opts := []quota.Option{ + quota.EnforceResources(config.QuotaPerProjectEnable()), + quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)), + quota.WithAction(quota.AddAction), + quota.StatusCode(http.StatusCreated), + quota.OnResources(computeResourcesForManifestCreation), + quota.MutexKeys(info.MutexKey("size")), + quota.OnFulfilled(func(http.ResponseWriter, *http.Request) error { + // manifest created, sync manifest itself as blob to blob and project_blob table + blobInfo, err := parseBlobInfoFromManifest(req) + if err != nil { + return err + } + + if err := syncBlobInfoToProject(blobInfo); err != nil { + return err + } + + // sync blobs from manifest which are not in project to project_blob table + blobs, err := info.GetBlobsNotInProject() + if err != nil { + return err + } + + _, err = dao.AddBlobsToProject(info.ProjectID, blobs...) + + return err + }), + } + + return quota.New(opts...), nil +} + +// deleteManifestBuilder interceptor builder for the request DELETE /v2//manifests/ +type manifestDeletionBuilder struct{} + +func (*manifestDeletionBuilder) Build(req *http.Request) (interceptor.Interceptor, error) { + if match, _, _ := util.MatchDeleteManifest(req); !match { + return nil, nil + } + + info, ok := util.ManifestInfoFromContext(req.Context()) + if !ok { + var err error + info, err = util.ParseManifestInfoFromPath(req) + if err != nil { + return nil, fmt.Errorf("failed to parse manifest, error %v", err) + } + + // Manifest info will be used by computeResourcesForDeleteManifest + *req = *(req.WithContext(util.NewManifestInfoContext(req.Context(), info))) + } + + blobs, err := dao.GetBlobsByArtifact(info.Digest) + if err != nil { + return nil, fmt.Errorf("failed to query blobs of %s, error: %v", info.Digest, err) + } + + mutexKeys := []string{info.MutexKey("size")} + for _, blob := range blobs { + mutexKeys = append(mutexKeys, info.BlobMutexKey(blob)) + } + + opts := []quota.Option{ + quota.EnforceResources(config.QuotaPerProjectEnable()), + quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)), + quota.WithAction(quota.SubtractAction), + quota.StatusCode(http.StatusAccepted), + quota.OnResources(computeResourcesForManifestDeletion), + quota.MutexKeys(mutexKeys...), + quota.OnFulfilled(func(http.ResponseWriter, *http.Request) error { + blobs := info.ExclusiveBlobs + + total, err := dao.GetTotalOfArtifacts(&models.ArtifactQuery{ + PID: info.ProjectID, + Digest: info.Digest, + }) + if err == nil && total > 0 { + blob, err := dao.GetBlob(info.Digest) + if err == nil { + blobs = append(blobs, blob) + } + } + + return dao.RemoveBlobsFromProject(info.ProjectID, blobs...) + }), + } + + return quota.New(opts...), nil +} diff --git a/src/core/middlewares/sizequota/handler.go b/src/core/middlewares/sizequota/handler.go new file mode 100644 index 000000000..244e55589 --- /dev/null +++ b/src/core/middlewares/sizequota/handler.go @@ -0,0 +1,83 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sizequota + +import ( + "fmt" + "net/http" + + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/middlewares/interceptor" + "github.com/goharbor/harbor/src/core/middlewares/util" +) + +type sizeQuotaHandler struct { + builders []interceptor.Builder + next http.Handler +} + +// New ... +func New(next http.Handler, builders ...interceptor.Builder) http.Handler { + if len(builders) == 0 { + builders = defaultBuilders + } + + return &sizeQuotaHandler{ + builders: builders, + next: next, + } +} + +// ServeHTTP ... +func (h *sizeQuotaHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + interceptor, err := h.getInterceptor(req) + if err != nil { + log.Warningf("Error occurred when to handle request in size quota handler: %v", err) + http.Error(rw, util.MarshalError("InternalError", fmt.Sprintf("Error occurred when to handle request in size quota handler: %v", err)), + http.StatusInternalServerError) + return + } + + if interceptor == nil { + h.next.ServeHTTP(rw, req) + return + } + + if err := interceptor.HandleRequest(req); err != nil { + log.Warningf("Error occurred when to handle request in size quota handler: %v", err) + http.Error(rw, util.MarshalError("InternalError", fmt.Sprintf("Error occurred when to handle request in size quota handler: %v", err)), + http.StatusInternalServerError) + return + } + + h.next.ServeHTTP(rw, req) + + interceptor.HandleResponse(rw, req) +} + +func (h *sizeQuotaHandler) getInterceptor(req *http.Request) (interceptor.Interceptor, error) { + for _, builder := range h.builders { + interceptor, err := builder.Build(req) + if err != nil { + return nil, err + } + + if interceptor != nil { + return interceptor, nil + } + } + + return nil, nil +} diff --git a/src/core/middlewares/sizequota/handler_test.go b/src/core/middlewares/sizequota/handler_test.go new file mode 100644 index 000000000..e2b2bb309 --- /dev/null +++ b/src/core/middlewares/sizequota/handler_test.go @@ -0,0 +1,710 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sizequota + +import ( + "bytes" + "encoding/json" + "fmt" + "math/rand" + "net/http" + "net/http/httptest" + "os" + "strconv" + "sync" + "testing" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema2" + "github.com/goharbor/harbor/src/common" + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/core/middlewares/countquota" + "github.com/goharbor/harbor/src/core/middlewares/util" + "github.com/goharbor/harbor/src/pkg/types" + "github.com/opencontainers/go-digest" + "github.com/stretchr/testify/suite" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +func genUUID() string { + b := make([]byte, 16) + + if _, err := rand.Read(b); err != nil { + return "" + } + + return fmt.Sprintf("%X-%X-%X-%X-%X", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]) +} + +func getProjectCountUsage(projectID int64) (int64, error) { + usage := models.QuotaUsage{Reference: "project", ReferenceID: fmt.Sprintf("%d", projectID)} + err := dao.GetOrmer().Read(&usage, "reference", "reference_id") + if err != nil { + return 0, err + } + used, err := types.NewResourceList(usage.Used) + if err != nil { + return 0, err + } + + return used[types.ResourceCount], nil +} + +func getProjectStorageUsage(projectID int64) (int64, error) { + usage := models.QuotaUsage{Reference: "project", ReferenceID: fmt.Sprintf("%d", projectID)} + err := dao.GetOrmer().Read(&usage, "reference", "reference_id") + if err != nil { + return 0, err + } + used, err := types.NewResourceList(usage.Used) + if err != nil { + return 0, err + } + + return used[types.ResourceStorage], nil +} + +func randomString(n int) string { + const letterBytes = "abcdefghijklmnopqrstuvwxyz" + + b := make([]byte, n) + for i := range b { + b[i] = letterBytes[rand.Intn(len(letterBytes))] + } + + return string(b) +} + +func makeManifest(configSize int64, layerSizes []int64) schema2.Manifest { + manifest := schema2.Manifest{ + Versioned: manifest.Versioned{SchemaVersion: 2, MediaType: schema2.MediaTypeManifest}, + Config: distribution.Descriptor{ + MediaType: schema2.MediaTypeImageConfig, + Size: configSize, + Digest: digest.FromString(randomString(15)), + }, + } + + for _, size := range layerSizes { + manifest.Layers = append(manifest.Layers, distribution.Descriptor{ + MediaType: schema2.MediaTypeLayer, + Size: size, + Digest: digest.FromString(randomString(15)), + }) + } + + return manifest +} + +func manifestWithAdditionalLayers(raw schema2.Manifest, layerSizes []int64) schema2.Manifest { + var manifest schema2.Manifest + + manifest.Versioned = raw.Versioned + manifest.Config = raw.Config + manifest.Layers = append(manifest.Layers, raw.Layers...) + + for _, size := range layerSizes { + manifest.Layers = append(manifest.Layers, distribution.Descriptor{ + MediaType: schema2.MediaTypeLayer, + Size: size, + Digest: digest.FromString(randomString(15)), + }) + } + + return manifest +} + +func digestOfManifest(manifest schema2.Manifest) string { + bytes, _ := json.Marshal(manifest) + + return digest.FromBytes(bytes).String() +} + +func sizeOfManifest(manifest schema2.Manifest) int64 { + bytes, _ := json.Marshal(manifest) + + return int64(len(bytes)) +} + +func sizeOfImage(manifest schema2.Manifest) int64 { + totalSizeOfLayers := manifest.Config.Size + + for _, layer := range manifest.Layers { + totalSizeOfLayers += layer.Size + } + + return sizeOfManifest(manifest) + totalSizeOfLayers +} + +func doHandle(req *http.Request, next ...http.HandlerFunc) int { + rr := httptest.NewRecorder() + + var n http.HandlerFunc + if len(next) > 0 { + n = next[0] + } else { + n = func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusCreated) + } + } + + h := New(http.HandlerFunc(n)) + h.ServeHTTP(util.NewCustomResponseWriter(rr), req) + + return rr.Code +} + +func patchBlobUpload(projectName, name, uuid, blobDigest string, chunkSize int64) { + repository := fmt.Sprintf("%s/%s", projectName, name) + + url := fmt.Sprintf("/v2/%s/blobs/uploads/%s?digest=%s", repository, uuid, blobDigest) + req, _ := http.NewRequest(http.MethodPatch, url, nil) + + doHandle(req, func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusAccepted) + w.Header().Add("Range", fmt.Sprintf("0-%d", chunkSize-1)) + }) +} + +func putBlobUpload(projectName, name, uuid, blobDigest string, blobSize ...int64) { + repository := fmt.Sprintf("%s/%s", projectName, name) + + url := fmt.Sprintf("/v2/%s/blobs/uploads/%s?digest=%s", repository, uuid, blobDigest) + req, _ := http.NewRequest(http.MethodPut, url, nil) + if len(blobSize) > 0 { + req.Header.Add("Content-Length", strconv.FormatInt(blobSize[0], 10)) + } + + doHandle(req, func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusCreated) + }) +} + +func mountBlob(projectName, name, blobDigest, fromRepository string) { + repository := fmt.Sprintf("%s/%s", projectName, name) + + url := fmt.Sprintf("/v2/%s/blobs/uploads/?mount=%s&from=%s", repository, blobDigest, fromRepository) + req, _ := http.NewRequest(http.MethodPost, url, nil) + + doHandle(req, func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusCreated) + }) +} + +func deleteManifest(projectName, name, digest string, accepted ...func() bool) { + repository := fmt.Sprintf("%s/%s", projectName, name) + + url := fmt.Sprintf("/v2/%s/manifests/%s", repository, digest) + req, _ := http.NewRequest(http.MethodDelete, url, nil) + + next := countquota.New(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if len(accepted) > 0 { + if accepted[0]() { + w.WriteHeader(http.StatusAccepted) + } else { + w.WriteHeader(http.StatusNotFound) + } + + return + } + + w.WriteHeader(http.StatusAccepted) + })) + + rr := httptest.NewRecorder() + h := New(next) + h.ServeHTTP(util.NewCustomResponseWriter(rr), req) +} + +func putManifest(projectName, name, tag string, manifest schema2.Manifest) { + repository := fmt.Sprintf("%s/%s", projectName, name) + + buf, _ := json.Marshal(manifest) + + url := fmt.Sprintf("/v2/%s/manifests/%s", repository, tag) + req, _ := http.NewRequest(http.MethodPut, url, bytes.NewReader(buf)) + req.Header.Add("Content-Type", manifest.MediaType) + + next := countquota.New(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusCreated) + })) + + rr := httptest.NewRecorder() + h := New(next) + h.ServeHTTP(util.NewCustomResponseWriter(rr), req) +} + +func pushImage(projectName, name, tag string, manifest schema2.Manifest) { + putBlobUpload(projectName, name, genUUID(), manifest.Config.Digest.String(), manifest.Config.Size) + for _, layer := range manifest.Layers { + putBlobUpload(projectName, name, genUUID(), layer.Digest.String(), layer.Size) + } + + putManifest(projectName, name, tag, manifest) +} + +func withProject(f func(int64, string)) { + projectName := randomString(5) + + projectID, err := dao.AddProject(models.Project{ + Name: projectName, + OwnerID: 1, + }) + if err != nil { + panic(err) + } + + defer func() { + dao.DeleteProject(projectID) + }() + + f(projectID, projectName) +} + +type HandlerSuite struct { + suite.Suite +} + +func (suite *HandlerSuite) checkCountUsage(expected, projectID int64) { + count, err := getProjectCountUsage(projectID) + suite.Nil(err, fmt.Sprintf("Failed to get count usage of project %d, error: %v", projectID, err)) + suite.Equal(expected, count, "Failed to check count usage for project %d", projectID) +} + +func (suite *HandlerSuite) checkStorageUsage(expected, projectID int64) { + value, err := getProjectStorageUsage(projectID) + suite.Nil(err, fmt.Sprintf("Failed to get storage usage of project %d, error: %v", projectID, err)) + suite.Equal(expected, value, "Failed to check storage usage for project %d", projectID) +} + +func (suite *HandlerSuite) TearDownTest() { + for _, table := range []string{ + "artifact", "blob", + "artifact_blob", "project_blob", + "quota", "quota_usage", + } { + dao.ClearTable(table) + } +} + +func (suite *HandlerSuite) TestPatchBlobUpload() { + withProject(func(projectID int64, projectName string) { + uuid := genUUID() + blobDigest := digest.FromString(randomString(15)).String() + patchBlobUpload(projectName, "photon", uuid, blobDigest, 1024) + size, err := getUploadedBlobSize(uuid) + suite.Nil(err) + suite.Equal(int64(1024), size) + }) +} + +func (suite *HandlerSuite) TestPutBlobUpload() { + withProject(func(projectID int64, projectName string) { + uuid := genUUID() + blobDigest := digest.FromString(randomString(15)).String() + putBlobUpload(projectName, "photon", uuid, blobDigest, 1024) + suite.checkStorageUsage(1024, projectID) + + blob, err := dao.GetBlob(blobDigest) + suite.Nil(err) + suite.Equal(int64(1024), blob.Size) + }) +} + +func (suite *HandlerSuite) TestPutBlobUploadWithPatch() { + withProject(func(projectID int64, projectName string) { + uuid := genUUID() + blobDigest := digest.FromString(randomString(15)).String() + patchBlobUpload(projectName, "photon", uuid, blobDigest, 1024) + + putBlobUpload(projectName, "photon", uuid, blobDigest) + suite.checkStorageUsage(1024, projectID) + + blob, err := dao.GetBlob(blobDigest) + suite.Nil(err) + suite.Equal(int64(1024), blob.Size) + }) +} + +func (suite *HandlerSuite) TestMountBlob() { + withProject(func(projectID int64, projectName string) { + blobDigest := digest.FromString(randomString(15)).String() + putBlobUpload(projectName, "photon", genUUID(), blobDigest, 1024) + suite.checkStorageUsage(1024, projectID) + + repository := fmt.Sprintf("%s/%s", projectName, "photon") + + withProject(func(projectID int64, projectName string) { + mountBlob(projectName, "harbor", blobDigest, repository) + suite.checkStorageUsage(1024, projectID) + }) + }) +} + +func (suite *HandlerSuite) TestPutManifestCreated() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(100, []int64{100, 100}) + + putBlobUpload(projectName, "photon", genUUID(), manifest.Config.Digest.String(), manifest.Config.Size) + for _, layer := range manifest.Layers { + putBlobUpload(projectName, "photon", genUUID(), layer.Digest.String(), layer.Size) + } + + putManifest(projectName, "photon", "latest", manifest) + + suite.checkStorageUsage(int64(300+sizeOfManifest(manifest)), projectID) + }) +} + +func (suite *HandlerSuite) TestDeleteManifest() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + size := sizeOfImage(manifest) + + pushImage(projectName, "photon", "latest", manifest) + suite.checkStorageUsage(size, projectID) + + deleteManifest(projectName, "photon", digestOfManifest(manifest)) + suite.checkStorageUsage(0, projectID) + }) +} + +func (suite *HandlerSuite) TestImageOverwrite() { + withProject(func(projectID int64, projectName string) { + manifest1 := makeManifest(1, []int64{2, 3, 4, 5}) + size1 := sizeOfImage(manifest1) + pushImage(projectName, "photon", "latest", manifest1) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(size1, projectID) + + manifest2 := makeManifest(1, []int64{2, 3, 4, 5}) + size2 := sizeOfImage(manifest2) + pushImage(projectName, "photon", "latest", manifest2) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(size1+size2, projectID) + + manifest3 := makeManifest(1, []int64{2, 3, 4, 5}) + size3 := sizeOfImage(manifest2) + pushImage(projectName, "photon", "latest", manifest3) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(size1+size2+size3, projectID) + }) +} + +func (suite *HandlerSuite) TestPushImageMultiTimes() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + size := sizeOfImage(manifest) + + pushImage(projectName, "photon", "latest", manifest) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(size, projectID) + + pushImage(projectName, "photon", "latest", manifest) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(size, projectID) + + pushImage(projectName, "photon", "latest", manifest) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(size, projectID) + }) +} + +func (suite *HandlerSuite) TestPushImageToSameRepository() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + size := sizeOfImage(manifest) + + pushImage(projectName, "photon", "latest", manifest) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(size, projectID) + + pushImage(projectName, "photon", "dev", manifest) + suite.checkCountUsage(2, projectID) + suite.checkStorageUsage(size, projectID) + }) +} + +func (suite *HandlerSuite) TestPushImageToDifferentRepositories() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + size := sizeOfImage(manifest) + + pushImage(projectName, "mysql", "latest", manifest) + suite.checkStorageUsage(size, projectID) + + pushImage(projectName, "redis", "latest", manifest) + suite.checkStorageUsage(size+sizeOfManifest(manifest), projectID) + + pushImage(projectName, "postgres", "latest", manifest) + suite.checkStorageUsage(size+2*sizeOfManifest(manifest), projectID) + }) +} + +func (suite *HandlerSuite) TestPushImageToDifferentProjects() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + size := sizeOfImage(manifest) + + pushImage(projectName, "mysql", "latest", manifest) + suite.checkStorageUsage(size, projectID) + + withProject(func(id int64, name string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + size := sizeOfImage(manifest) + + pushImage(name, "mysql", "latest", manifest) + suite.checkStorageUsage(size, id) + + suite.checkStorageUsage(size, projectID) + }) + }) +} + +func (suite *HandlerSuite) TestDeleteManifestShareLayersInSameRepository() { + withProject(func(projectID int64, projectName string) { + manifest1 := makeManifest(1, []int64{2, 3, 4, 5}) + size1 := sizeOfImage(manifest1) + + pushImage(projectName, "mysql", "latest", manifest1) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(size1, projectID) + + manifest2 := manifestWithAdditionalLayers(manifest1, []int64{6, 7}) + pushImage(projectName, "mysql", "dev", manifest2) + suite.checkCountUsage(2, projectID) + + totalSize := size1 + sizeOfManifest(manifest2) + 6 + 7 + suite.checkStorageUsage(totalSize, projectID) + + deleteManifest(projectName, "mysql", digestOfManifest(manifest1)) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(totalSize-sizeOfManifest(manifest1), projectID) + }) +} + +func (suite *HandlerSuite) TestDeleteManifestShareLayersInDifferentRepositories() { + withProject(func(projectID int64, projectName string) { + manifest1 := makeManifest(1, []int64{2, 3, 4, 5}) + size1 := sizeOfImage(manifest1) + + pushImage(projectName, "mysql", "latest", manifest1) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(size1, projectID) + + pushImage(projectName, "mysql", "dev", manifest1) + suite.checkCountUsage(2, projectID) + suite.checkStorageUsage(size1, projectID) + + manifest2 := manifestWithAdditionalLayers(manifest1, []int64{6, 7}) + pushImage(projectName, "mariadb", "latest", manifest2) + suite.checkCountUsage(3, projectID) + + totalSize := size1 + sizeOfManifest(manifest2) + 6 + 7 + suite.checkStorageUsage(totalSize, projectID) + + deleteManifest(projectName, "mysql", digestOfManifest(manifest1)) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(totalSize-sizeOfManifest(manifest1), projectID) + }) +} + +func (suite *HandlerSuite) TestDeleteManifestInSameRepository() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + size := sizeOfImage(manifest) + + pushImage(projectName, "photon", "latest", manifest) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(size, projectID) + + pushImage(projectName, "photon", "dev", manifest) + suite.checkCountUsage(2, projectID) + suite.checkStorageUsage(size, projectID) + + deleteManifest(projectName, "photon", digestOfManifest(manifest)) + suite.checkCountUsage(0, projectID) + suite.checkStorageUsage(0, projectID) + }) +} + +func (suite *HandlerSuite) TestDeleteManifestInDifferentRepositories() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + size := sizeOfImage(manifest) + + pushImage(projectName, "mysql", "latest", manifest) + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(size, projectID) + + pushImage(projectName, "mysql", "5.6", manifest) + suite.checkCountUsage(2, projectID) + suite.checkStorageUsage(size, projectID) + + pushImage(projectName, "redis", "latest", manifest) + suite.checkCountUsage(3, projectID) + suite.checkStorageUsage(size+sizeOfManifest(manifest), projectID) + + deleteManifest(projectName, "redis", digestOfManifest(manifest)) + suite.checkCountUsage(2, projectID) + suite.checkStorageUsage(size, projectID) + + pushImage(projectName, "redis", "latest", manifest) + suite.checkCountUsage(3, projectID) + suite.checkStorageUsage(size+sizeOfManifest(manifest), projectID) + }) +} + +func (suite *HandlerSuite) TestDeleteManifestInDifferentProjects() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + size := sizeOfImage(manifest) + + pushImage(projectName, "mysql", "latest", manifest) + suite.checkStorageUsage(size, projectID) + + withProject(func(id int64, name string) { + pushImage(name, "mysql", "latest", manifest) + suite.checkStorageUsage(size, id) + + suite.checkStorageUsage(size, projectID) + deleteManifest(projectName, "mysql", digestOfManifest(manifest)) + suite.checkCountUsage(0, projectID) + suite.checkStorageUsage(0, projectID) + }) + + }) +} + +func (suite *HandlerSuite) TestPushDeletePush() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + size := sizeOfImage(manifest) + + pushImage(projectName, "photon", "latest", manifest) + suite.checkStorageUsage(size, projectID) + + deleteManifest(projectName, "photon", digestOfManifest(manifest)) + suite.checkStorageUsage(0, projectID) + + pushImage(projectName, "photon", "latest", manifest) + suite.checkStorageUsage(size, projectID) + }) +} + +func (suite *HandlerSuite) TestPushImageRace() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + size := sizeOfImage(manifest) + + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + pushImage(projectName, "photon", "latest", manifest) + }() + } + wg.Wait() + + suite.checkCountUsage(1, projectID) + suite.checkStorageUsage(size, projectID) + }) +} + +func (suite *HandlerSuite) TestDeleteImageRace() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + pushImage(projectName, "photon", "latest", manifest) + + count := 100 + size := sizeOfImage(manifest) + for i := 0; i < count; i++ { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + pushImage(projectName, "mysql", fmt.Sprintf("tag%d", i), manifest) + size += sizeOfImage(manifest) + } + + suite.checkCountUsage(int64(count+1), projectID) + suite.checkStorageUsage(size, projectID) + + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + deleteManifest(projectName, "photon", digestOfManifest(manifest), func() bool { + return i == 0 + }) + }(i) + } + wg.Wait() + + suite.checkCountUsage(int64(count), projectID) + suite.checkStorageUsage(size-sizeOfImage(manifest), projectID) + }) +} + +func (suite *HandlerSuite) TestDisableProjectQuota() { + withProject(func(projectID int64, projectName string) { + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + pushImage(projectName, "photon", "latest", manifest) + + quotas, err := dao.ListQuotas(&models.QuotaQuery{ + Reference: "project", + ReferenceID: strconv.FormatInt(projectID, 10), + }) + + suite.Nil(err) + suite.Len(quotas, 1) + }) + + withProject(func(projectID int64, projectName string) { + cfg := config.GetCfgManager() + cfg.Set(common.QuotaPerProjectEnable, false) + defer cfg.Set(common.QuotaPerProjectEnable, true) + + manifest := makeManifest(1, []int64{2, 3, 4, 5}) + pushImage(projectName, "photon", "latest", manifest) + + quotas, err := dao.ListQuotas(&models.QuotaQuery{ + Reference: "project", + ReferenceID: strconv.FormatInt(projectID, 10), + }) + + suite.Nil(err) + suite.Len(quotas, 0) + }) +} + +func TestMain(m *testing.M) { + config.Init() + dao.PrepareTestForPostgresSQL() + + if result := m.Run(); result != 0 { + os.Exit(result) + } +} + +func TestRunHandlerSuite(t *testing.T) { + suite.Run(t, new(HandlerSuite)) +} diff --git a/src/core/middlewares/sizequota/util.go b/src/core/middlewares/sizequota/util.go new file mode 100644 index 000000000..edcf92631 --- /dev/null +++ b/src/core/middlewares/sizequota/util.go @@ -0,0 +1,330 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sizequota + +import ( + "errors" + "fmt" + "net/http" + "regexp" + "strconv" + "strings" + "time" + + "github.com/garyburd/redigo/redis" + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils" + "github.com/goharbor/harbor/src/core/middlewares/util" + "github.com/goharbor/harbor/src/pkg/types" + "github.com/opencontainers/go-digest" +) + +var ( + blobUploadURLRe = regexp.MustCompile(`^/v2/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)+)blobs/uploads/([a-zA-Z0-9-_.=]+)/?$`) + initiateBlobUploadURLRe = regexp.MustCompile(`^/v2/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)+)blobs/uploads/?$`) +) + +// parseUploadedBlobSize parse the blob stream upload response and return the size blob uploaded +func parseUploadedBlobSize(w http.ResponseWriter) (int64, error) { + // Range: Range indicating the current progress of the upload. + // https://github.com/opencontainers/distribution-spec/blob/master/spec.md#get-blob-upload + r := w.Header().Get("Range") + + end := strings.Split(r, "-")[1] + size, err := strconv.ParseInt(end, 10, 64) + if err != nil { + return 0, err + } + + // docker registry did '-1' in the response + if size > 0 { + size = size + 1 + } + + return size, nil +} + +// setUploadedBlobSize update the size of stream upload blob +func setUploadedBlobSize(uuid string, size int64) (bool, error) { + conn, err := util.GetRegRedisCon() + if err != nil { + return false, err + } + defer conn.Close() + + key := fmt.Sprintf("upload:%s:size", uuid) + reply, err := redis.String(conn.Do("SET", key, size)) + if err != nil { + return false, err + } + return reply == "OK", nil + +} + +// getUploadedBlobSize returns the size of stream upload blob +func getUploadedBlobSize(uuid string) (int64, error) { + conn, err := util.GetRegRedisCon() + if err != nil { + return 0, err + } + defer conn.Close() + + key := fmt.Sprintf("upload:%s:size", uuid) + size, err := redis.Int64(conn.Do("GET", key)) + if err != nil { + return 0, err + } + + return size, nil +} + +// parseBlobSize returns blob size from blob upload complete request +func parseBlobSize(req *http.Request, uuid string) (int64, error) { + size, err := strconv.ParseInt(req.Header.Get("Content-Length"), 10, 64) + if err == nil && size != 0 { + return size, nil + } + + return getUploadedBlobSize(uuid) +} + +// match returns true if request method equal method and path match re +func match(req *http.Request, method string, re *regexp.Regexp) bool { + return req.Method == method && re.MatchString(req.URL.Path) +} + +// parseBlobInfoFromComplete returns blob info from blob upload complete request +func parseBlobInfoFromComplete(req *http.Request) (*util.BlobInfo, error) { + if !match(req, http.MethodPut, blobUploadURLRe) { + return nil, fmt.Errorf("not match url %s for blob upload complete", req.URL.Path) + } + + s := blobUploadURLRe.FindStringSubmatch(req.URL.Path) + repository, uuid := s[1][:len(s[1])-1], s[2] + + projectName, _ := utils.ParseRepository(repository) + project, err := dao.GetProjectByName(projectName) + if err != nil { + return nil, fmt.Errorf("failed to get project %s, error: %v", projectName, err) + } + if project == nil { + return nil, fmt.Errorf("project %s not found", projectName) + } + + dgt, err := digest.Parse(req.FormValue("digest")) + if err != nil { + return nil, fmt.Errorf("blob digest invalid for upload %s", uuid) + } + + size, err := parseBlobSize(req, uuid) + if err != nil { + return nil, fmt.Errorf("failed to get content length of blob upload %s, error: %v", uuid, err) + } + + return &util.BlobInfo{ + ProjectID: project.ProjectID, + Repository: repository, + Digest: dgt.String(), + Size: size, + }, nil +} + +// parseBlobInfoFromManifest returns blob info from put the manifest request +func parseBlobInfoFromManifest(req *http.Request) (*util.BlobInfo, error) { + info, ok := util.ManifestInfoFromContext(req.Context()) + if !ok { + manifest, err := util.ParseManifestInfo(req) + if err != nil { + return nil, err + } + + info = manifest + + // replace the request with manifest info + *req = *(req.WithContext(util.NewManifestInfoContext(req.Context(), info))) + } + + return &util.BlobInfo{ + ProjectID: info.ProjectID, + Repository: info.Repository, + Digest: info.Descriptor.Digest.String(), + Size: info.Descriptor.Size, + ContentType: info.Descriptor.MediaType, + }, nil +} + +// parseBlobInfoFromMount returns blob info from blob mount request +func parseBlobInfoFromMount(req *http.Request) (*util.BlobInfo, error) { + if !match(req, http.MethodPost, initiateBlobUploadURLRe) { + return nil, fmt.Errorf("not match url %s for mount blob", req.URL.Path) + } + + if req.FormValue("mount") == "" || req.FormValue("from") == "" { + return nil, fmt.Errorf("not match url %s for mount blob", req.URL.Path) + } + + dgt, err := digest.Parse(req.FormValue("mount")) + if err != nil { + return nil, errors.New("mount must be digest") + } + + s := initiateBlobUploadURLRe.FindStringSubmatch(req.URL.Path) + repository := strings.TrimSuffix(s[1], "/") + + projectName, _ := utils.ParseRepository(repository) + project, err := dao.GetProjectByName(projectName) + if err != nil { + return nil, fmt.Errorf("failed to get project %s, error: %v", projectName, err) + } + if project == nil { + return nil, fmt.Errorf("project %s not found", projectName) + } + + blob, err := dao.GetBlob(dgt.String()) + if err != nil { + return nil, fmt.Errorf("failed to get blob %s, error: %v", dgt.String(), err) + } + if blob == nil { + return nil, fmt.Errorf("the blob in the mount request with digest: %s doesn't exist", dgt.String()) + } + + return &util.BlobInfo{ + ProjectID: project.ProjectID, + Repository: repository, + Digest: dgt.String(), + Size: blob.Size, + }, nil +} + +// getBlobInfoParser return parse blob info function for request +// returns parseBlobInfoFromComplete when request match PUT /v2//blobs/uploads/?digest= +// returns parseBlobInfoFromMount when request match POST /v2//blobs/uploads/?mount=&from= +func getBlobInfoParser(req *http.Request) func(*http.Request) (*util.BlobInfo, error) { + if match(req, http.MethodPut, blobUploadURLRe) { + if req.FormValue("digest") != "" { + return parseBlobInfoFromComplete + } + } + + if match(req, http.MethodPost, initiateBlobUploadURLRe) { + if req.FormValue("mount") != "" && req.FormValue("from") != "" { + return parseBlobInfoFromMount + } + } + + return nil +} + +// computeResourcesForBlob returns storage required for blob, no storage required if blob exists in project +func computeResourcesForBlob(req *http.Request) (types.ResourceList, error) { + info, ok := util.BlobInfoFromContext(req.Context()) + if !ok { + return nil, errors.New("blob info missing") + } + + exist, err := info.BlobExists() + if err != nil { + return nil, err + } + + if exist { + return nil, nil + } + + return types.ResourceList{types.ResourceStorage: info.Size}, nil +} + +// computeResourcesForManifestCreation returns storage resource required for manifest +// no storage required if manifest exists in project +// the sum size of manifest itself and blobs not in project will return if manifest not exists in project +func computeResourcesForManifestCreation(req *http.Request) (types.ResourceList, error) { + info, ok := util.ManifestInfoFromContext(req.Context()) + if !ok { + return nil, errors.New("manifest info missing") + } + + exist, err := info.ManifestExists() + if err != nil { + return nil, err + } + + // manifest exist in project, so no storage quota required + if exist { + return nil, nil + } + + blobs, err := info.GetBlobsNotInProject() + if err != nil { + return nil, err + } + + size := info.Descriptor.Size + + for _, blob := range blobs { + size += blob.Size + } + + return types.ResourceList{types.ResourceStorage: size}, nil +} + +// computeResourcesForManifestDeletion returns storage resource will be released when manifest deleted +// then result will be the sum of manifest itself and blobs which will not be used by other manifests of project +func computeResourcesForManifestDeletion(req *http.Request) (types.ResourceList, error) { + info, ok := util.ManifestInfoFromContext(req.Context()) + if !ok { + return nil, errors.New("manifest info missing") + } + + blobs, err := dao.GetExclusiveBlobs(info.ProjectID, info.Repository, info.Digest) + if err != nil { + return nil, err + } + + info.ExclusiveBlobs = blobs + + blob, err := dao.GetBlob(info.Digest) + if err != nil { + return nil, err + } + + // manifest size will always be released + size := blob.Size + + for _, blob := range blobs { + size = size + blob.Size + } + + return types.ResourceList{types.ResourceStorage: size}, nil +} + +// syncBlobInfoToProject create the blob and add it to project +func syncBlobInfoToProject(info *util.BlobInfo) error { + _, blob, err := dao.GetOrCreateBlob(&models.Blob{ + Digest: info.Digest, + ContentType: info.ContentType, + Size: info.Size, + CreationTime: time.Now(), + }) + if err != nil { + return err + } + + if _, err := dao.AddBlobToProject(blob.ID, info.ProjectID); err != nil { + return err + } + + return nil +} diff --git a/src/core/middlewares/url/handler.go b/src/core/middlewares/url/handler.go new file mode 100644 index 000000000..07e1a0f3f --- /dev/null +++ b/src/core/middlewares/url/handler.go @@ -0,0 +1,74 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package url + +import ( + "context" + "fmt" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/middlewares/util" + coreutils "github.com/goharbor/harbor/src/core/utils" + "net/http" + "strings" +) + +type urlHandler struct { + next http.Handler +} + +// New ... +func New(next http.Handler) http.Handler { + return &urlHandler{ + next: next, + } +} + +// ServeHTTP ... +func (uh urlHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + log.Debugf("in url handler, path: %s", req.URL.Path) + flag, repository, reference := util.MatchPullManifest(req) + if flag { + components := strings.SplitN(repository, "/", 2) + if len(components) < 2 { + http.Error(rw, util.MarshalError("PROJECT_POLICY_VIOLATION", fmt.Sprintf("Bad repository name: %s", repository)), http.StatusBadRequest) + return + } + + client, err := coreutils.NewRepositoryClientForUI(util.TokenUsername, repository) + if err != nil { + log.Errorf("Error creating repository Client: %v", err) + http.Error(rw, util.MarshalError("PROJECT_POLICY_VIOLATION", fmt.Sprintf("Failed due to internal Error: %v", err)), http.StatusInternalServerError) + return + } + digest, _, err := client.ManifestExist(reference) + if err != nil { + log.Errorf("Failed to get digest for reference: %s, error: %v", reference, err) + http.Error(rw, util.MarshalError("PROJECT_POLICY_VIOLATION", fmt.Sprintf("Failed due to internal Error: %v", err)), http.StatusInternalServerError) + return + } + + img := util.ImageInfo{ + Repository: repository, + Reference: reference, + ProjectName: components[0], + Digest: digest, + } + + log.Debugf("image info of the request: %#v", img) + ctx := context.WithValue(req.Context(), util.ImageInfoCtxKey, img) + req = req.WithContext(ctx) + } + uh.next.ServeHTTP(rw, req) +} diff --git a/src/core/middlewares/util/response.go b/src/core/middlewares/util/response.go new file mode 100644 index 000000000..48e3f0cda --- /dev/null +++ b/src/core/middlewares/util/response.go @@ -0,0 +1,59 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "net/http" +) + +// CustomResponseWriter write the response code into the status +type CustomResponseWriter struct { + http.ResponseWriter + status int + wroteHeader bool +} + +// NewCustomResponseWriter ... +func NewCustomResponseWriter(w http.ResponseWriter) *CustomResponseWriter { + return &CustomResponseWriter{ResponseWriter: w} +} + +// Status ... +func (w *CustomResponseWriter) Status() int { + return w.status +} + +// Header ... +func (w CustomResponseWriter) Header() http.Header { + return w.ResponseWriter.Header() +} + +// Write ... +func (w *CustomResponseWriter) Write(p []byte) (n int, err error) { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + return w.ResponseWriter.Write(p) +} + +// WriteHeader ... +func (w *CustomResponseWriter) WriteHeader(code int) { + w.ResponseWriter.WriteHeader(code) + if w.wroteHeader { + return + } + w.status = code + w.wroteHeader = true +} diff --git a/src/replication/adapter/image_registry_test.go b/src/core/middlewares/util/response_test.go similarity index 61% rename from src/replication/adapter/image_registry_test.go rename to src/core/middlewares/util/response_test.go index 157471d41..40ec59c4e 100644 --- a/src/replication/adapter/image_registry_test.go +++ b/src/core/middlewares/util/response_test.go @@ -12,35 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -package adapter +package util import ( + "net/http/httptest" "testing" "github.com/stretchr/testify/assert" ) -// TODO add UT - -func TestIsDigest(t *testing.T) { - cases := []struct { - str string - isDigest bool - }{ - { - str: "", - isDigest: false, - }, - { - str: "latest", - isDigest: false, - }, - { - str: "sha256:fea8895f450959fa676bcc1df0611ea93823a735a01205fd8622846041d0c7cf", - isDigest: true, - }, - } - for _, c := range cases { - assert.Equal(t, c.isDigest, isDigest(c.str)) - } +func TestCustomResponseWriter(t *testing.T) { + rw := httptest.NewRecorder() + customResW := CustomResponseWriter{ResponseWriter: rw} + customResW.WriteHeader(501) + assert.Equal(t, customResW.Status(), 501) } diff --git a/src/core/middlewares/util/util.go b/src/core/middlewares/util/util.go new file mode 100644 index 000000000..7b8d2839e --- /dev/null +++ b/src/core/middlewares/util/util.go @@ -0,0 +1,528 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "regexp" + "strings" + "sync" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/garyburd/redigo/redis" + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils" + "github.com/goharbor/harbor/src/common/utils/clair" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/core/promgr" + "github.com/goharbor/harbor/src/pkg/scan/whitelist" + "github.com/opencontainers/go-digest" +) + +type contextKey string + +const ( + // ImageInfoCtxKey the context key for image information + ImageInfoCtxKey = contextKey("ImageInfo") + // TokenUsername ... + // TODO: temp solution, remove after vmware/harbor#2242 is resolved. + TokenUsername = "harbor-core" + + // blobInfoKey the context key for blob info + blobInfoKey = contextKey("BlobInfo") + // chartVersionInfoKey the context key for chart version info + chartVersionInfoKey = contextKey("ChartVersionInfo") + // manifestInfoKey the context key for manifest info + manifestInfoKey = contextKey("ManifestInfo") + + // DialConnectionTimeout ... + DialConnectionTimeout = 30 * time.Second + // DialReadTimeout ... + DialReadTimeout = time.Minute + 10*time.Second + // DialWriteTimeout ... + DialWriteTimeout = 10 * time.Second +) + +var ( + manifestURLRe = regexp.MustCompile(`^/v2/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)+)manifests/([\w][\w.:-]{0,127})`) +) + +// ChartVersionInfo ... +type ChartVersionInfo struct { + ProjectID int64 + Namespace string + ChartName string + Version string +} + +// MutexKey returns mutex key of the chart version +func (info *ChartVersionInfo) MutexKey(suffix ...string) string { + a := []string{"quota", info.Namespace, "chart", info.ChartName, "version", info.Version} + + return strings.Join(append(a, suffix...), ":") +} + +// ImageInfo ... +type ImageInfo struct { + Repository string + Reference string + ProjectName string + Digest string +} + +// BlobInfo ... +type BlobInfo struct { + ProjectID int64 + ContentType string + Size int64 + Repository string + Digest string + + blobExist bool + blobExistErr error + blobExistOnce sync.Once +} + +// BlobExists returns true when blob exists in the project +func (info *BlobInfo) BlobExists() (bool, error) { + info.blobExistOnce.Do(func() { + info.blobExist, info.blobExistErr = dao.HasBlobInProject(info.ProjectID, info.Digest) + }) + + return info.blobExist, info.blobExistErr +} + +// MutexKey returns mutex key of the blob +func (info *BlobInfo) MutexKey(suffix ...string) string { + projectName, _ := utils.ParseRepository(info.Repository) + a := []string{"quota", projectName, "blob", info.Digest} + + return strings.Join(append(a, suffix...), ":") +} + +// ManifestInfo ... +type ManifestInfo struct { + // basic information of a manifest + ProjectID int64 + Repository string + Tag string + Digest string + + References []distribution.Descriptor + Descriptor distribution.Descriptor + + // manifestExist is to index the existing of the manifest in DB by (repository, digest) + manifestExist bool + manifestExistErr error + manifestExistOnce sync.Once + + // artifact the artifact indexed by (repository, tag) in DB + artifact *models.Artifact + artifactErr error + artifactOnce sync.Once + + // ExclusiveBlobs include the blobs that belong to the manifest only + // and exclude the blobs that shared by other manifests in the same repo(project/repository). + ExclusiveBlobs []*models.Blob +} + +// MutexKey returns mutex key of the manifest +func (info *ManifestInfo) MutexKey(suffix ...string) string { + projectName, _ := utils.ParseRepository(info.Repository) + var a []string + + if info.Tag != "" { + // tag not empty happened in PUT /v2//manifests/ + // lock by to tag to compute the count resource required by quota + a = []string{"quota", projectName, "manifest", info.Tag} + } else { + a = []string{"quota", projectName, "manifest", info.Digest} + } + + return strings.Join(append(a, suffix...), ":") +} + +// BlobMutexKey returns mutex key of the blob in manifest +func (info *ManifestInfo) BlobMutexKey(blob *models.Blob, suffix ...string) string { + projectName, _ := utils.ParseRepository(info.Repository) + a := []string{"quota", projectName, "blob", blob.Digest} + + return strings.Join(append(a, suffix...), ":") +} + +// GetBlobsNotInProject returns blobs of the manifest which not in the project +func (info *ManifestInfo) GetBlobsNotInProject() ([]*models.Blob, error) { + var digests []string + for _, reference := range info.References { + digests = append(digests, reference.Digest.String()) + } + + blobs, err := dao.GetBlobsNotInProject(info.ProjectID, digests...) + if err != nil { + return nil, err + } + + return blobs, nil +} + +func (info *ManifestInfo) fetchArtifact() (*models.Artifact, error) { + info.artifactOnce.Do(func() { + info.artifact, info.artifactErr = dao.GetArtifact(info.Repository, info.Tag) + }) + + return info.artifact, info.artifactErr +} + +// IsNewTag returns true if the tag of the manifest not exists in project +func (info *ManifestInfo) IsNewTag() bool { + artifact, _ := info.fetchArtifact() + + return artifact == nil +} + +// Artifact returns artifact of the manifest +func (info *ManifestInfo) Artifact() *models.Artifact { + result := &models.Artifact{ + PID: info.ProjectID, + Repo: info.Repository, + Tag: info.Tag, + Digest: info.Digest, + Kind: "Docker-Image", + } + + if artifact, _ := info.fetchArtifact(); artifact != nil { + result.ID = artifact.ID + result.CreationTime = artifact.CreationTime + result.PushTime = time.Now() + } + + return result +} + +// ManifestExists returns true if manifest exist in repository +func (info *ManifestInfo) ManifestExists() (bool, error) { + info.manifestExistOnce.Do(func() { + total, err := dao.GetTotalOfArtifacts(&models.ArtifactQuery{ + PID: info.ProjectID, + Repo: info.Repository, + Digest: info.Digest, + }) + + info.manifestExist = total > 0 + info.manifestExistErr = err + }) + + return info.manifestExist, info.manifestExistErr +} + +// JSONError wraps a concrete Code and Message, it's readable for docker deamon. +type JSONError struct { + Code string `json:"code,omitempty"` + Message string `json:"message,omitempty"` + Detail string `json:"detail,omitempty"` +} + +// MarshalError ... +func MarshalError(code, msg string) string { + var tmpErrs struct { + Errors []JSONError `json:"errors,omitempty"` + } + tmpErrs.Errors = append(tmpErrs.Errors, JSONError{ + Code: code, + Message: msg, + Detail: msg, + }) + str, err := json.Marshal(tmpErrs) + if err != nil { + log.Debugf("failed to marshal json error, %v", err) + return msg + } + return string(str) +} + +// MatchManifestURL ... +func MatchManifestURL(req *http.Request) (bool, string, string) { + s := manifestURLRe.FindStringSubmatch(req.URL.Path) + if len(s) == 3 { + s[1] = strings.TrimSuffix(s[1], "/") + return true, s[1], s[2] + } + return false, "", "" +} + +// MatchPullManifest checks if the request looks like a request to pull manifest. If it is returns the image and tag/sha256 digest as 2nd and 3rd return values +func MatchPullManifest(req *http.Request) (bool, string, string) { + if req.Method != http.MethodGet { + return false, "", "" + } + return MatchManifestURL(req) +} + +// MatchPushManifest checks if the request looks like a request to push manifest. If it is returns the image and tag/sha256 digest as 2nd and 3rd return values +func MatchPushManifest(req *http.Request) (bool, string, string) { + if req.Method != http.MethodPut { + return false, "", "" + } + return MatchManifestURL(req) +} + +// MatchDeleteManifest checks if the request +func MatchDeleteManifest(req *http.Request) (match bool, repository string, reference string) { + if req.Method != http.MethodDelete { + return + } + + match, repository, reference = MatchManifestURL(req) + if _, err := digest.Parse(reference); err != nil { + // Delete manifest only accept digest as reference + match = false + + return + } + + return +} + +// CopyResp ... +func CopyResp(rec *httptest.ResponseRecorder, rw http.ResponseWriter) { + for k, v := range rec.Header() { + rw.Header()[k] = v + } + rw.WriteHeader(rec.Result().StatusCode) + rw.Write(rec.Body.Bytes()) +} + +// PolicyChecker checks the policy of a project by project name, to determine if it's needed to check the image's status under this project. +type PolicyChecker interface { + // contentTrustEnabled returns whether a project has enabled content trust. + ContentTrustEnabled(name string) bool + // vulnerablePolicy returns whether a project has enabled vulnerable, and the project's severity. + VulnerablePolicy(name string) (bool, models.Severity, models.CVEWhitelist) +} + +// PmsPolicyChecker ... +type PmsPolicyChecker struct { + pm promgr.ProjectManager +} + +// ContentTrustEnabled ... +func (pc PmsPolicyChecker) ContentTrustEnabled(name string) bool { + project, err := pc.pm.Get(name) + if err != nil { + log.Errorf("Unexpected error when getting the project, error: %v", err) + return true + } + return project.ContentTrustEnabled() +} + +// VulnerablePolicy ... +func (pc PmsPolicyChecker) VulnerablePolicy(name string) (bool, models.Severity, models.CVEWhitelist) { + project, err := pc.pm.Get(name) + wl := models.CVEWhitelist{} + if err != nil { + log.Errorf("Unexpected error when getting the project, error: %v", err) + return true, models.SevUnknown, wl + } + mgr := whitelist.NewDefaultManager() + if project.ReuseSysCVEWhitelist() { + w, err := mgr.GetSys() + if err != nil { + return project.VulPrevented(), clair.ParseClairSev(project.Severity()), wl + } + wl = *w + } else { + w, err := mgr.Get(project.ProjectID) + if err != nil { + return project.VulPrevented(), clair.ParseClairSev(project.Severity()), wl + } + wl = *w + } + return project.VulPrevented(), clair.ParseClairSev(project.Severity()), wl + +} + +// NewPMSPolicyChecker returns an instance of an pmsPolicyChecker +func NewPMSPolicyChecker(pm promgr.ProjectManager) PolicyChecker { + return &PmsPolicyChecker{ + pm: pm, + } +} + +// GetPolicyChecker ... +func GetPolicyChecker() PolicyChecker { + return NewPMSPolicyChecker(config.GlobalProjectMgr) +} + +// GetRegRedisCon ... +func GetRegRedisCon() (redis.Conn, error) { + // FOR UT + if os.Getenv("UTTEST") == "true" { + return redis.Dial( + "tcp", + fmt.Sprintf("%s:%d", os.Getenv("REDIS_HOST"), 6379), + redis.DialConnectTimeout(DialConnectionTimeout), + redis.DialReadTimeout(DialReadTimeout), + redis.DialWriteTimeout(DialWriteTimeout), + ) + } + return redis.DialURL( + config.GetRedisOfRegURL(), + redis.DialConnectTimeout(DialConnectionTimeout), + redis.DialReadTimeout(DialReadTimeout), + redis.DialWriteTimeout(DialWriteTimeout), + ) +} + +// BlobInfoFromContext returns blob info from context +func BlobInfoFromContext(ctx context.Context) (*BlobInfo, bool) { + info, ok := ctx.Value(blobInfoKey).(*BlobInfo) + return info, ok +} + +// ChartVersionInfoFromContext returns chart info from context +func ChartVersionInfoFromContext(ctx context.Context) (*ChartVersionInfo, bool) { + info, ok := ctx.Value(chartVersionInfoKey).(*ChartVersionInfo) + return info, ok +} + +// ImageInfoFromContext returns image info from context +func ImageInfoFromContext(ctx context.Context) (*ImageInfo, bool) { + info, ok := ctx.Value(ImageInfoCtxKey).(*ImageInfo) + return info, ok +} + +// ManifestInfoFromContext returns manifest info from context +func ManifestInfoFromContext(ctx context.Context) (*ManifestInfo, bool) { + info, ok := ctx.Value(manifestInfoKey).(*ManifestInfo) + return info, ok +} + +// NewBlobInfoContext returns context with blob info +func NewBlobInfoContext(ctx context.Context, info *BlobInfo) context.Context { + return context.WithValue(ctx, blobInfoKey, info) +} + +// NewChartVersionInfoContext returns context with blob info +func NewChartVersionInfoContext(ctx context.Context, info *ChartVersionInfo) context.Context { + return context.WithValue(ctx, chartVersionInfoKey, info) +} + +// NewImageInfoContext returns context with image info +func NewImageInfoContext(ctx context.Context, info *ImageInfo) context.Context { + return context.WithValue(ctx, ImageInfoCtxKey, info) +} + +// NewManifestInfoContext returns context with manifest info +func NewManifestInfoContext(ctx context.Context, info *ManifestInfo) context.Context { + return context.WithValue(ctx, manifestInfoKey, info) +} + +// ParseManifestInfo prase manifest from request +func ParseManifestInfo(req *http.Request) (*ManifestInfo, error) { + match, repository, reference := MatchManifestURL(req) + if !match { + return nil, fmt.Errorf("not match url %s for manifest", req.URL.Path) + } + + var tag string + if _, err := digest.Parse(reference); err != nil { + tag = reference + } + + mediaType := req.Header.Get("Content-Type") + if mediaType != schema1.MediaTypeManifest && + mediaType != schema1.MediaTypeSignedManifest && + mediaType != schema2.MediaTypeManifest { + return nil, fmt.Errorf("unsupported content type for manifest: %s", mediaType) + } + + if req.Body == nil { + return nil, fmt.Errorf("body missing") + } + + body, err := ioutil.ReadAll(req.Body) + if err != nil { + log.Warningf("Error occurred when to copy manifest body %v", err) + return nil, err + } + req.Body = ioutil.NopCloser(bytes.NewBuffer(body)) + + manifest, desc, err := distribution.UnmarshalManifest(mediaType, body) + if err != nil { + log.Warningf("Error occurred when to Unmarshal Manifest %v", err) + return nil, err + } + + projectName, _ := utils.ParseRepository(repository) + project, err := dao.GetProjectByName(projectName) + if err != nil { + return nil, fmt.Errorf("failed to get project %s, error: %v", projectName, err) + } + if project == nil { + return nil, fmt.Errorf("project %s not found", projectName) + } + + return &ManifestInfo{ + ProjectID: project.ProjectID, + Repository: repository, + Tag: tag, + Digest: desc.Digest.String(), + References: manifest.References(), + Descriptor: desc, + }, nil +} + +// ParseManifestInfoFromPath prase manifest from request path +func ParseManifestInfoFromPath(req *http.Request) (*ManifestInfo, error) { + match, repository, reference := MatchManifestURL(req) + if !match { + return nil, fmt.Errorf("not match url %s for manifest", req.URL.Path) + } + + projectName, _ := utils.ParseRepository(repository) + project, err := dao.GetProjectByName(projectName) + if err != nil { + return nil, fmt.Errorf("failed to get project %s, error: %v", projectName, err) + } + if project == nil { + return nil, fmt.Errorf("project %s not found", projectName) + } + + info := &ManifestInfo{ + ProjectID: project.ProjectID, + Repository: repository, + } + + dgt, err := digest.Parse(reference) + if err != nil { + info.Tag = reference + } else { + info.Digest = dgt.String() + } + + return info, nil +} diff --git a/src/core/proxy/interceptor_test.go b/src/core/middlewares/util/util_test.go similarity index 54% rename from src/core/proxy/interceptor_test.go rename to src/core/middlewares/util/util_test.go index ab73f27bf..e02229ad9 100644 --- a/src/core/proxy/interceptor_test.go +++ b/src/core/middlewares/util/util_test.go @@ -1,30 +1,49 @@ -package proxy +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util import ( - "github.com/goharbor/harbor/src/common" - "github.com/goharbor/harbor/src/common/models" - notarytest "github.com/goharbor/harbor/src/common/utils/notary/test" - testutils "github.com/goharbor/harbor/src/common/utils/test" - "github.com/goharbor/harbor/src/core/config" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - + "bytes" + "encoding/json" "net/http" "net/http/httptest" "os" + "reflect" "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema2" + "github.com/goharbor/harbor/src/common" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils" + notarytest "github.com/goharbor/harbor/src/common/utils/notary/test" + testutils "github.com/goharbor/harbor/src/common/utils/test" + "github.com/goharbor/harbor/src/core/config" + "github.com/opencontainers/go-digest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var endpoint = "10.117.4.142" var notaryServer *httptest.Server -var admiralEndpoint = "http://127.0.0.1:8282" -var token = "" - func TestMain(m *testing.M) { + testutils.InitDatabaseFromEnv() notaryServer = notarytest.NewNotaryServer(endpoint) defer notaryServer.Close() - NotaryEndpoint = notaryServer.URL var defaultConfig = map[string]interface{}{ common.ExtEndpoint: "https://" + endpoint, common.WithNotary: true, @@ -125,22 +144,6 @@ func TestMatchPushManifest(t *testing.T) { assert.Equal("14.04", tag8) } -func TestMatchListRepos(t *testing.T) { - assert := assert.New(t) - req1, _ := http.NewRequest("POST", "http://127.0.0.1:5000/v2/_catalog", nil) - res1 := MatchListRepos(req1) - assert.False(res1, "%s %v is not a request to list repos", req1.Method, req1.URL) - - req2, _ := http.NewRequest("GET", "http://127.0.0.1:5000/v2/_catalog", nil) - res2 := MatchListRepos(req2) - assert.True(res2, "%s %v is a request to list repos", req2.Method, req2.URL) - - req3, _ := http.NewRequest("GET", "https://192.168.0.5:443/v1/_catalog", nil) - res3 := MatchListRepos(req3) - assert.False(res3, "%s %v is not a request to pull manifest", req3.Method, req3.URL) - -} - func TestPMSPolicyChecker(t *testing.T) { var defaultConfigAdmiral = map[string]interface{}{ common.ExtEndpoint: "https://" + endpoint, @@ -157,7 +160,6 @@ func TestPMSPolicyChecker(t *testing.T) { if err := config.Init(); err != nil { panic(err) } - testutils.InitDatabaseFromEnv() config.Upload(defaultConfigAdmiral) @@ -166,9 +168,10 @@ func TestPMSPolicyChecker(t *testing.T) { Name: name, OwnerID: 1, Metadata: map[string]string{ - models.ProMetaEnableContentTrust: "true", - models.ProMetaPreventVul: "true", - models.ProMetaSeverity: "low", + models.ProMetaEnableContentTrust: "true", + models.ProMetaPreventVul: "true", + models.ProMetaSeverity: "low", + models.ProMetaReuseSysCVEWhitelist: "false", }, }) require.Nil(t, err) @@ -178,26 +181,12 @@ func TestPMSPolicyChecker(t *testing.T) { } }(id) - contentTrustFlag := getPolicyChecker().contentTrustEnabled("project_for_test_get_sev_low") + contentTrustFlag := GetPolicyChecker().ContentTrustEnabled("project_for_test_get_sev_low") assert.True(t, contentTrustFlag) - projectVulnerableEnabled, projectVulnerableSeverity := getPolicyChecker().vulnerablePolicy("project_for_test_get_sev_low") + projectVulnerableEnabled, projectVulnerableSeverity, wl := GetPolicyChecker().VulnerablePolicy("project_for_test_get_sev_low") assert.True(t, projectVulnerableEnabled) assert.Equal(t, projectVulnerableSeverity, models.SevLow) -} - -func TestMatchNotaryDigest(t *testing.T) { - assert := assert.New(t) - // The data from common/utils/notary/helper_test.go - img1 := imageInfo{"notary-demo/busybox", "1.0", "notary-demo", "sha256:1359608115b94599e5641638bac5aef1ddfaa79bb96057ebf41ebc8d33acf8a7"} - img2 := imageInfo{"notary-demo/busybox", "2.0", "notary-demo", "sha256:12345678"} - - res1, err := matchNotaryDigest(img1) - assert.Nil(err, "Unexpected error: %v, image: %#v", err, img1) - assert.True(res1) - - res2, err := matchNotaryDigest(img2) - assert.Nil(err, "Unexpected error: %v, image: %#v, take 2", err, img2) - assert.False(res2) + assert.Empty(t, wl.Items) } func TestCopyResp(t *testing.T) { @@ -206,21 +195,207 @@ func TestCopyResp(t *testing.T) { rec2 := httptest.NewRecorder() rec1.Header().Set("X-Test", "mytest") rec1.WriteHeader(418) - copyResp(rec1, rec2) + CopyResp(rec1, rec2) assert.Equal(418, rec2.Result().StatusCode) assert.Equal("mytest", rec2.Header().Get("X-Test")) } func TestMarshalError(t *testing.T) { assert := assert.New(t) - js1 := marshalError("PROJECT_POLICY_VIOLATION", "Not Found") + js1 := MarshalError("PROJECT_POLICY_VIOLATION", "Not Found") assert.Equal("{\"errors\":[{\"code\":\"PROJECT_POLICY_VIOLATION\",\"message\":\"Not Found\",\"detail\":\"Not Found\"}]}", js1) - js2 := marshalError("DENIED", "The action is denied") + js2 := MarshalError("DENIED", "The action is denied") assert.Equal("{\"errors\":[{\"code\":\"DENIED\",\"message\":\"The action is denied\",\"detail\":\"The action is denied\"}]}", js2) } -func TestIsDigest(t *testing.T) { - assert := assert.New(t) - assert.False(isDigest("latest")) - assert.True(isDigest("sha256:1359608115b94599e5641638bac5aef1ddfaa79bb96057ebf41ebc8d33acf8a7")) +func makeManifest(configSize int64, layerSizes []int64) schema2.Manifest { + manifest := schema2.Manifest{ + Versioned: manifest.Versioned{SchemaVersion: 2, MediaType: schema2.MediaTypeManifest}, + Config: distribution.Descriptor{ + MediaType: schema2.MediaTypeImageConfig, + Size: configSize, + Digest: digest.FromString(utils.GenerateRandomString()), + }, + } + + for _, size := range layerSizes { + manifest.Layers = append(manifest.Layers, distribution.Descriptor{ + MediaType: schema2.MediaTypeLayer, + Size: size, + Digest: digest.FromString(utils.GenerateRandomString()), + }) + } + + return manifest +} + +func getDescriptor(manifest schema2.Manifest) distribution.Descriptor { + buf, _ := json.Marshal(manifest) + _, desc, _ := distribution.UnmarshalManifest(manifest.Versioned.MediaType, buf) + return desc +} + +func TestParseManifestInfo(t *testing.T) { + manifest := makeManifest(1, []int64{2, 3, 4}) + + tests := []struct { + name string + req func() *http.Request + want *ManifestInfo + wantErr bool + }{ + { + "ok", + func() *http.Request { + buf, _ := json.Marshal(manifest) + req, _ := http.NewRequest(http.MethodPut, "/v2/library/photon/manifests/latest", bytes.NewReader(buf)) + req.Header.Add("Content-Type", manifest.MediaType) + + return req + }, + &ManifestInfo{ + ProjectID: 1, + Repository: "library/photon", + Tag: "latest", + Digest: getDescriptor(manifest).Digest.String(), + References: manifest.References(), + Descriptor: getDescriptor(manifest), + }, + false, + }, + { + "bad content type", + func() *http.Request { + buf, _ := json.Marshal(manifest) + req, _ := http.NewRequest(http.MethodPut, "/v2/notfound/photon/manifests/latest", bytes.NewReader(buf)) + req.Header.Add("Content-Type", "application/json") + + return req + }, + nil, + true, + }, + { + "bad manifest", + func() *http.Request { + req, _ := http.NewRequest(http.MethodPut, "/v2/notfound/photon/manifests/latest", bytes.NewReader([]byte(""))) + req.Header.Add("Content-Type", schema2.MediaTypeManifest) + + return req + }, + nil, + true, + }, + { + "body missing", + func() *http.Request { + req, _ := http.NewRequest(http.MethodPut, "/v2/notfound/photon/manifests/latest", nil) + req.Header.Add("Content-Type", schema2.MediaTypeManifest) + + return req + }, + nil, + true, + }, + { + "project not found", + func() *http.Request { + + buf, _ := json.Marshal(manifest) + + req, _ := http.NewRequest(http.MethodPut, "/v2/notfound/photon/manifests/latest", bytes.NewReader(buf)) + req.Header.Add("Content-Type", manifest.MediaType) + + return req + }, + nil, + true, + }, + { + "url not match", + func() *http.Request { + buf, _ := json.Marshal(manifest) + req, _ := http.NewRequest(http.MethodPut, "/v2/library/photon/manifest/latest", bytes.NewReader(buf)) + req.Header.Add("Content-Type", manifest.MediaType) + + return req + }, + nil, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ParseManifestInfo(tt.req()) + if (err != nil) != tt.wantErr { + t.Errorf("ParseManifestInfo() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("ParseManifestInfo() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestParseManifestInfoFromPath(t *testing.T) { + mustRequest := func(method, url string) *http.Request { + req, _ := http.NewRequest(method, url, nil) + return req + } + + type args struct { + req *http.Request + } + tests := []struct { + name string + args args + want *ManifestInfo + wantErr bool + }{ + { + "ok for digest", + args{mustRequest(http.MethodDelete, "/v2/library/photon/manifests/sha256:3e17b60ab9d92d953fb8ebefa25624c0d23fb95f78dde5572285d10158044059")}, + &ManifestInfo{ + ProjectID: 1, + Repository: "library/photon", + Digest: "sha256:3e17b60ab9d92d953fb8ebefa25624c0d23fb95f78dde5572285d10158044059", + }, + false, + }, + { + "ok for tag", + args{mustRequest(http.MethodDelete, "/v2/library/photon/manifests/latest")}, + &ManifestInfo{ + ProjectID: 1, + Repository: "library/photon", + Tag: "latest", + }, + false, + }, + { + "project not found", + args{mustRequest(http.MethodDelete, "/v2/notfound/photon/manifests/latest")}, + nil, + true, + }, + { + "url not match", + args{mustRequest(http.MethodDelete, "/v2/library/photon/manifest/latest")}, + nil, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ParseManifestInfoFromPath(tt.args.req) + if (err != nil) != tt.wantErr { + t.Errorf("ParseManifestInfoFromPath() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("ParseManifestInfoFromPath() = %v, want %v", got, tt.want) + } + }) + } } diff --git a/src/core/middlewares/vulnerable/handler.go b/src/core/middlewares/vulnerable/handler.go new file mode 100644 index 000000000..67d1b97ce --- /dev/null +++ b/src/core/middlewares/vulnerable/handler.go @@ -0,0 +1,80 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vulnerable + +import ( + "fmt" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/core/middlewares/util" + "github.com/goharbor/harbor/src/pkg/scan" + "net/http" +) + +type vulnerableHandler struct { + next http.Handler +} + +// New ... +func New(next http.Handler) http.Handler { + return &vulnerableHandler{ + next: next, + } +} + +// ServeHTTP ... +func (vh vulnerableHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + imgRaw := req.Context().Value(util.ImageInfoCtxKey) + if imgRaw == nil || !config.WithClair() { + vh.next.ServeHTTP(rw, req) + return + } + img, _ := req.Context().Value(util.ImageInfoCtxKey).(util.ImageInfo) + if img.Digest == "" { + vh.next.ServeHTTP(rw, req) + return + } + projectVulnerableEnabled, projectVulnerableSeverity, wl := util.GetPolicyChecker().VulnerablePolicy(img.ProjectName) + if !projectVulnerableEnabled { + vh.next.ServeHTTP(rw, req) + return + } + vl, err := scan.VulnListByDigest(img.Digest) + if err != nil { + log.Errorf("Failed to get the vulnerability list, error: %v", err) + http.Error(rw, util.MarshalError("PROJECT_POLICY_VIOLATION", "Failed to get vulnerabilities."), http.StatusPreconditionFailed) + return + } + filtered := vl.ApplyWhitelist(wl) + msg := vh.filterMsg(img, filtered) + log.Info(msg) + if int(vl.Severity()) >= int(projectVulnerableSeverity) { + log.Debugf("the image severity: %q is higher then project setting: %q, failing the response.", vl.Severity(), projectVulnerableSeverity) + http.Error(rw, util.MarshalError("PROJECT_POLICY_VIOLATION", fmt.Sprintf("The severity of vulnerability of the image: %q is equal or higher than the threshold in project setting: %q.", vl.Severity(), projectVulnerableSeverity)), http.StatusPreconditionFailed) + return + } + vh.next.ServeHTTP(rw, req) +} + +func (vh vulnerableHandler) filterMsg(img util.ImageInfo, filtered scan.VulnerabilityList) string { + filterMsg := fmt.Sprintf("Image: %s/%s:%s, digest: %s, vulnerabilities fitered by whitelist:", img.ProjectName, img.Repository, img.Reference, img.Digest) + if len(filtered) == 0 { + filterMsg = fmt.Sprintf("%s none.", filterMsg) + } + for _, v := range filtered { + filterMsg = fmt.Sprintf("%s ID: %s, severity: %s;", filterMsg, v.ID, v.Severity) + } + return filterMsg +} diff --git a/src/core/notifier/event/event.go b/src/core/notifier/event/event.go new file mode 100644 index 000000000..088a8d2e7 --- /dev/null +++ b/src/core/notifier/event/event.go @@ -0,0 +1,154 @@ +package event + +import ( + "time" + + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/notifier" + "github.com/goharbor/harbor/src/core/notifier/model" + notifyModel "github.com/goharbor/harbor/src/pkg/notification/model" + "github.com/pkg/errors" +) + +// Event to publish +type Event struct { + Topic string + Data interface{} +} + +// Metadata is the event raw data to be processed +type Metadata interface { + Resolve(event *Event) error +} + +// ImageDelMetaData defines images deleting related event data +type ImageDelMetaData struct { + Project *models.Project + Tags []string + OccurAt time.Time + Operator string + RepoName string +} + +// Resolve image deleting metadata into common image event +func (i *ImageDelMetaData) Resolve(evt *Event) error { + data := &model.ImageEvent{ + EventType: notifyModel.EventTypeDeleteImage, + Project: i.Project, + OccurAt: i.OccurAt, + Operator: i.Operator, + RepoName: i.RepoName, + } + for _, t := range i.Tags { + res := &model.ImgResource{Tag: t} + data.Resource = append(data.Resource, res) + } + evt.Topic = model.DeleteImageTopic + evt.Data = data + return nil +} + +// ImagePushMetaData defines images pushing related event data +type ImagePushMetaData struct { + Project *models.Project + Tag string + Digest string + OccurAt time.Time + Operator string + RepoName string +} + +// Resolve image pushing metadata into common image event +func (i *ImagePushMetaData) Resolve(evt *Event) error { + data := &model.ImageEvent{ + EventType: notifyModel.EventTypePushImage, + Project: i.Project, + OccurAt: i.OccurAt, + Operator: i.Operator, + RepoName: i.RepoName, + Resource: []*model.ImgResource{ + { + Tag: i.Tag, + Digest: i.Digest, + }, + }, + } + + evt.Topic = model.PushImageTopic + evt.Data = data + return nil +} + +// ImagePullMetaData defines images pulling related event data +type ImagePullMetaData struct { + Project *models.Project + Tag string + Digest string + OccurAt time.Time + Operator string + RepoName string +} + +// Resolve image pulling metadata into common image event +func (i *ImagePullMetaData) Resolve(evt *Event) error { + data := &model.ImageEvent{ + EventType: notifyModel.EventTypePullImage, + Project: i.Project, + OccurAt: i.OccurAt, + Operator: i.Operator, + RepoName: i.RepoName, + Resource: []*model.ImgResource{ + { + Tag: i.Tag, + Digest: i.Digest, + }, + }, + } + + evt.Topic = model.PullImageTopic + evt.Data = data + return nil +} + +// HookMetaData defines hook notification related event data +type HookMetaData struct { + PolicyID int64 + EventType string + Target *models.EventTarget + Payload *model.Payload +} + +// Resolve hook metadata into hook event +func (h *HookMetaData) Resolve(evt *Event) error { + data := &model.HookEvent{ + PolicyID: h.PolicyID, + EventType: h.EventType, + Target: h.Target, + Payload: h.Payload, + } + + evt.Topic = h.Target.Type + evt.Data = data + return nil +} + +// Build an event by metadata +func (e *Event) Build(metadata ...Metadata) error { + for _, md := range metadata { + if err := md.Resolve(e); err != nil { + log.Debugf("failed to resolve event metadata: %v", md) + return errors.Wrap(err, "failed to resolve event metadata") + } + } + return nil +} + +// Publish an event +func (e *Event) Publish() error { + if err := notifier.Publish(e.Topic, e.Data); err != nil { + log.Debugf("failed to publish topic %s with event: %v", e.Topic, e.Data) + return errors.Wrap(err, "failed to publish event") + } + return nil +} diff --git a/src/core/notifier/event/event_test.go b/src/core/notifier/event/event_test.go new file mode 100644 index 000000000..21e0d8d23 --- /dev/null +++ b/src/core/notifier/event/event_test.go @@ -0,0 +1,212 @@ +package event + +import ( + "testing" + "time" + + "github.com/goharbor/harbor/src/common/models" + notifierModel "github.com/goharbor/harbor/src/core/notifier/model" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestImagePushEvent_Build(t *testing.T) { + type args struct { + imgPushMetadata *ImagePushMetaData + hookMetadata *HookMetaData + } + + tests := []struct { + name string + args args + wantErr bool + want *Event + }{ + { + name: "Build Image Push Event", + args: args{ + imgPushMetadata: &ImagePushMetaData{ + Project: &models.Project{ProjectID: 1, Name: "library"}, + Tag: "v1.0", + Digest: "abcd", + OccurAt: time.Now(), + Operator: "admin", + RepoName: "library/alpine", + }, + }, + want: &Event{ + Topic: notifierModel.PushImageTopic, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + event := &Event{} + err := event.Build(tt.args.imgPushMetadata) + if tt.wantErr { + require.NotNil(t, err, "Error: %s", err) + return + } + assert.Equal(t, tt.want.Topic, event.Topic) + }) + } +} + +func TestImagePullEvent_Build(t *testing.T) { + type args struct { + imgPullMetadata *ImagePullMetaData + } + + tests := []struct { + name string + args args + wantErr bool + want *Event + }{ + { + name: "Build Image Pull Event", + args: args{ + imgPullMetadata: &ImagePullMetaData{ + Project: &models.Project{ProjectID: 1, Name: "library"}, + Tag: "v1.0", + Digest: "abcd", + OccurAt: time.Now(), + Operator: "admin", + RepoName: "library/alpine", + }, + }, + want: &Event{ + Topic: notifierModel.PullImageTopic, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + event := &Event{} + err := event.Build(tt.args.imgPullMetadata) + if tt.wantErr { + require.NotNil(t, err, "Error: %s", err) + return + } + assert.Equal(t, tt.want.Topic, event.Topic) + }) + } +} + +func TestImageDelEvent_Build(t *testing.T) { + type args struct { + imgDelMetadata *ImageDelMetaData + } + + tests := []struct { + name string + args args + wantErr bool + want *Event + }{ + { + name: "Build Image Delete Event", + args: args{ + imgDelMetadata: &ImageDelMetaData{ + Project: &models.Project{ProjectID: 1, Name: "library"}, + Tags: []string{"v1.0"}, + OccurAt: time.Now(), + Operator: "admin", + RepoName: "library/alpine", + }, + }, + want: &Event{ + Topic: notifierModel.DeleteImageTopic, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + event := &Event{} + err := event.Build(tt.args.imgDelMetadata) + if tt.wantErr { + require.NotNil(t, err, "Error: %s", err) + return + } + assert.Equal(t, tt.want.Topic, event.Topic) + }) + } +} + +func TestHookEvent_Build(t *testing.T) { + type args struct { + hookMetadata *HookMetaData + } + + tests := []struct { + name string + args args + wantErr bool + want *Event + }{ + { + name: "Build HTTP Hook Event", + args: args{ + hookMetadata: &HookMetaData{ + PolicyID: 1, + EventType: "pushImage", + Target: &models.EventTarget{ + Type: "http", + Address: "http://127.0.0.1", + }, + Payload: nil, + }, + }, + want: &Event{ + Topic: notifierModel.WebhookTopic, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + event := &Event{} + err := event.Build(tt.args.hookMetadata) + if tt.wantErr { + require.NotNil(t, err, "Error: %s", err) + return + } + assert.Equal(t, tt.want.Topic, event.Topic) + }) + } +} + +func TestEvent_Publish(t *testing.T) { + type args struct { + event *Event + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "Publish Error 1", + args: args{ + event: &Event{ + Topic: notifierModel.WebhookTopic, + Data: nil, + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.args.event.Publish() + if tt.wantErr { + require.NotNil(t, err, "Error: %s", err) + return + } + }) + } +} diff --git a/src/core/notifier/handler/notification/http_handler.go b/src/core/notifier/handler/notification/http_handler.go new file mode 100755 index 000000000..9795a7c2b --- /dev/null +++ b/src/core/notifier/handler/notification/http_handler.go @@ -0,0 +1,59 @@ +package notification + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/goharbor/harbor/src/common/job/models" + "github.com/goharbor/harbor/src/core/notifier/model" + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/pkg/notification" +) + +// HTTPHandler preprocess http event data and start the hook processing +type HTTPHandler struct { +} + +// Handle handles http event +func (h *HTTPHandler) Handle(value interface{}) error { + if value == nil { + return errors.New("HTTPHandler cannot handle nil value") + } + + event, ok := value.(*model.HookEvent) + if !ok || event == nil { + return errors.New("invalid notification http event") + } + + return h.process(event) +} + +// IsStateful ... +func (h *HTTPHandler) IsStateful() bool { + return false +} + +func (h *HTTPHandler) process(event *model.HookEvent) error { + j := &models.JobData{ + Metadata: &models.JobMetadata{ + JobKind: job.KindGeneric, + }, + } + j.Name = job.WebhookJob + + payload, err := json.Marshal(event.Payload) + if err != nil { + return fmt.Errorf("marshal from payload %v failed: %v", event.Payload, err) + } + + j.Parameters = map[string]interface{}{ + "payload": string(payload), + "address": event.Target.Address, + // Users can define a auth header in http statement in notification(webhook) policy. + // So it will be sent in header in http request. + "auth_header": event.Target.AuthHeader, + "skip_cert_verify": event.Target.SkipCertVerify, + } + return notification.HookManager.StartHook(event, j) +} diff --git a/src/core/notifier/handler/notification/http_handler_test.go b/src/core/notifier/handler/notification/http_handler_test.go new file mode 100644 index 000000000..c7d5ef3ae --- /dev/null +++ b/src/core/notifier/handler/notification/http_handler_test.go @@ -0,0 +1,97 @@ +package notification + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/goharbor/harbor/src/common/job/models" + cModels "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/core/notifier/event" + "github.com/goharbor/harbor/src/core/notifier/model" + "github.com/goharbor/harbor/src/pkg/notification" + "github.com/stretchr/testify/require" +) + +type fakedHookManager struct { +} + +func (f *fakedHookManager) StartHook(event *model.HookEvent, job *models.JobData) error { + return nil +} + +func TestHTTPHandler_Handle(t *testing.T) { + hookMgr := notification.HookManager + defer func() { + notification.HookManager = hookMgr + }() + notification.HookManager = &fakedHookManager{} + + handler := &HTTPHandler{} + + type args struct { + event *event.Event + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "HTTPHandler_Handle Want Error 1", + args: args{ + event: &event.Event{ + Topic: "http", + Data: nil, + }, + }, + wantErr: true, + }, + { + name: "HTTPHandler_Handle Want Error 2", + args: args{ + event: &event.Event{ + Topic: "http", + Data: &model.ImageEvent{}, + }, + }, + wantErr: true, + }, + { + name: "HTTPHandler_Handle 1", + args: args{ + event: &event.Event{ + Topic: "http", + Data: &model.HookEvent{ + PolicyID: 1, + EventType: "pushImage", + Target: &cModels.EventTarget{ + Type: "http", + Address: "http://127.0.0.1:8080", + }, + Payload: &model.Payload{ + OccurAt: time.Now().Unix(), + }, + }, + }, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := handler.Handle(tt.args.event.Data) + if tt.wantErr { + require.NotNil(t, err, "Error: %s", err) + return + } + }) + } +} + +func TestHTTPHandler_IsStateful(t *testing.T) { + handler := &HTTPHandler{} + assert.False(t, handler.IsStateful()) +} diff --git a/src/core/notifier/handler/notification/image_handler.go b/src/core/notifier/handler/notification/image_handler.go new file mode 100644 index 000000000..f9dc12468 --- /dev/null +++ b/src/core/notifier/handler/notification/image_handler.go @@ -0,0 +1,18 @@ +package notification + +// ImagePreprocessHandler preprocess image event data +type ImagePreprocessHandler struct { +} + +// Handle preprocess image event data and then publish hook event +func (h *ImagePreprocessHandler) Handle(value interface{}) error { + if err := preprocessAndSendImageHook(value); err != nil { + return err + } + return nil +} + +// IsStateful ... +func (h *ImagePreprocessHandler) IsStateful() bool { + return false +} diff --git a/src/core/notifier/handler/notification/image_handler_test.go b/src/core/notifier/handler/notification/image_handler_test.go new file mode 100644 index 000000000..ae1696b44 --- /dev/null +++ b/src/core/notifier/handler/notification/image_handler_test.go @@ -0,0 +1,193 @@ +package notification + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/core/notifier/model" + "github.com/goharbor/harbor/src/pkg/notification" + notificationModel "github.com/goharbor/harbor/src/pkg/notification/model" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +type fakedNotificationPlyMgr struct { +} + +func (f *fakedNotificationPlyMgr) Create(*models.NotificationPolicy) (int64, error) { + return 0, nil +} + +func (f *fakedNotificationPlyMgr) List(id int64) ([]*models.NotificationPolicy, error) { + return nil, nil +} + +func (f *fakedNotificationPlyMgr) Get(id int64) (*models.NotificationPolicy, error) { + return nil, nil +} + +func (f *fakedNotificationPlyMgr) GetByNameAndProjectID(string, int64) (*models.NotificationPolicy, error) { + return nil, nil +} + +func (f *fakedNotificationPlyMgr) Update(*models.NotificationPolicy) error { + return nil +} + +func (f *fakedNotificationPlyMgr) Delete(int64) error { + return nil +} + +func (f *fakedNotificationPlyMgr) Test(*models.NotificationPolicy) error { + return nil +} + +func (f *fakedNotificationPlyMgr) GetRelatedPolices(id int64, eventType string) ([]*models.NotificationPolicy, error) { + if id == 1 { + return []*models.NotificationPolicy{ + { + ID: 1, + EventTypes: []string{ + notificationModel.EventTypePullImage, + notificationModel.EventTypePushImage, + }, + Targets: []models.EventTarget{ + { + Type: "http", + Address: "http://127.0.0.1:8080", + }, + }, + }, + }, nil + } + if id == 2 { + return nil, nil + } + return nil, errors.New("") +} + +func TestMain(m *testing.M) { + dao.PrepareTestForPostgresSQL() + os.Exit(m.Run()) +} + +func TestImagePreprocessHandler_Handle(t *testing.T) { + PolicyMgr := notification.PolicyMgr + defer func() { + notification.PolicyMgr = PolicyMgr + }() + notification.PolicyMgr = &fakedNotificationPlyMgr{} + + handler := &ImagePreprocessHandler{} + config.Init() + + type args struct { + data interface{} + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "ImagePreprocessHandler Want Error 1", + args: args{ + data: nil, + }, + wantErr: true, + }, + { + name: "ImagePreprocessHandler Want Error 2", + args: args{ + data: &model.ImageEvent{}, + }, + wantErr: true, + }, + { + name: "ImagePreprocessHandler Want Error 3", + args: args{ + data: &model.ImageEvent{ + Resource: []*model.ImgResource{ + { + Tag: "v1.0", + }, + }, + Project: &models.Project{ + ProjectID: 3, + }, + }, + }, + wantErr: true, + }, + { + name: "ImagePreprocessHandler Want Error 4", + args: args{ + data: &model.ImageEvent{ + Resource: []*model.ImgResource{ + { + Tag: "v1.0", + }, + }, + Project: &models.Project{ + ProjectID: 1, + }, + }, + }, + wantErr: true, + }, + // No handlers registered for handling topic http + { + name: "ImagePreprocessHandler Want Error 5", + args: args{ + data: &model.ImageEvent{ + RepoName: "test/alpine", + Resource: []*model.ImgResource{ + { + Tag: "v1.0", + }, + }, + Project: &models.Project{ + ProjectID: 1, + }, + }, + }, + wantErr: true, + }, + { + name: "ImagePreprocessHandler 2", + args: args{ + data: &model.ImageEvent{ + Resource: []*model.ImgResource{ + { + Tag: "v1.0", + }, + }, + Project: &models.Project{ + ProjectID: 2, + }, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := handler.Handle(tt.args.data) + if tt.wantErr { + require.NotNil(t, err, "Error: %s", err) + return + } + assert.Nil(t, err) + }) + } +} + +func TestImagePreprocessHandler_IsStateful(t *testing.T) { + handler := &ImagePreprocessHandler{} + assert.False(t, handler.IsStateful()) +} diff --git a/src/core/notifier/handler/notification/processor.go b/src/core/notifier/handler/notification/processor.go new file mode 100644 index 000000000..513640fd2 --- /dev/null +++ b/src/core/notifier/handler/notification/processor.go @@ -0,0 +1,174 @@ +package notification + +import ( + "errors" + "fmt" + "strings" + + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/core/notifier/event" + notifyModel "github.com/goharbor/harbor/src/core/notifier/model" + "github.com/goharbor/harbor/src/pkg/notification" +) + +// getNameFromImgRepoFullName gets image name from repo full name with format `repoName/imageName` +func getNameFromImgRepoFullName(repo string) string { + idx := strings.Index(repo, "/") + return repo[idx+1:] +} + +func buildImageResourceURL(extURL, repoName, tag string) (string, error) { + resURL := fmt.Sprintf("%s/%s:%s", extURL, repoName, tag) + return resURL, nil +} + +func constructImagePayload(event *notifyModel.ImageEvent) (*notifyModel.Payload, error) { + repoName := event.RepoName + if repoName == "" { + return nil, fmt.Errorf("invalid %s event with empty repo name", event.EventType) + } + + repoType := models.ProjectPrivate + if event.Project.IsPublic() { + repoType = models.ProjectPublic + } + + imageName := getNameFromImgRepoFullName(repoName) + + payload := ¬ifyModel.Payload{ + Type: event.EventType, + OccurAt: event.OccurAt.Unix(), + EventData: ¬ifyModel.EventData{ + Repository: ¬ifyModel.Repository{ + Name: imageName, + Namespace: event.Project.Name, + RepoFullName: repoName, + RepoType: repoType, + }, + }, + Operator: event.Operator, + } + + repoRecord, err := dao.GetRepositoryByName(repoName) + if err != nil { + log.Errorf("failed to get repository with name %s: %v", repoName, err) + return nil, err + } + // once repo has been delete, cannot ensure to get repo record + if repoRecord == nil { + log.Debugf("cannot find repository info with repo %s", repoName) + } else { + payload.EventData.Repository.DateCreated = repoRecord.CreationTime.Unix() + } + + extURL, err := config.ExtURL() + if err != nil { + return nil, fmt.Errorf("get external endpoint failed: %v", err) + } + + for _, res := range event.Resource { + tag := res.Tag + digest := res.Digest + + if tag == "" { + log.Errorf("invalid notification event with empty tag: %v", event) + continue + } + + resURL, err := buildImageResourceURL(extURL, event.RepoName, tag) + if err != nil { + log.Errorf("get resource URL failed: %v", err) + continue + } + + resource := ¬ifyModel.Resource{ + Tag: tag, + Digest: digest, + ResourceURL: resURL, + } + payload.EventData.Resources = append(payload.EventData.Resources, resource) + } + + return payload, nil +} + +// send hook by publishing topic of specified target type(notify type) +func sendHookWithPolicies(policies []*models.NotificationPolicy, payload *notifyModel.Payload, eventType string) error { + for _, ply := range policies { + targets := ply.Targets + for _, target := range targets { + evt := &event.Event{} + hookMetadata := &event.HookMetaData{ + EventType: eventType, + PolicyID: ply.ID, + Payload: payload, + Target: &target, + } + if err := evt.Build(hookMetadata); err != nil { + log.Errorf("failed to build hook notify event metadata: %v", err) + return err + } + if err := evt.Publish(); err != nil { + log.Errorf("failed to publish hook notify event: %v", err) + return err + } + + log.Debugf("published image event %s by topic %s", payload.Type, target.Type) + } + } + return nil +} + +func resolveImageEventData(value interface{}) (*notifyModel.ImageEvent, error) { + imgEvent, ok := value.(*notifyModel.ImageEvent) + if !ok || imgEvent == nil { + return nil, errors.New("invalid image event") + } + + if len(imgEvent.Resource) == 0 { + return nil, fmt.Errorf("empty event resouece data in image event: %v", imgEvent) + } + + return imgEvent, nil +} + +// preprocessAndSendImageHook preprocess image event data and send hook by notification policy target +func preprocessAndSendImageHook(value interface{}) error { + // if global notification configured disabled, return directly + if !config.NotificationEnable() { + log.Debug("notification feature is not enabled") + return nil + } + + imgEvent, err := resolveImageEventData(value) + if err != nil { + return err + } + + policies, err := notification.PolicyMgr.GetRelatedPolices(imgEvent.Project.ProjectID, imgEvent.EventType) + if err != nil { + log.Errorf("failed to find policy for %s event: %v", imgEvent.EventType, err) + return err + } + // if cannot find policy including event type in project, return directly + if len(policies) == 0 { + log.Debugf("cannot find policy for %s event: %v", imgEvent.EventType, imgEvent) + return nil + } + + payload, err := constructImagePayload(imgEvent) + if err != nil { + return err + } + + err = sendHookWithPolicies(policies, payload, imgEvent.EventType) + if err != nil { + return err + } + + return nil + +} diff --git a/src/core/notifier/model/event.go b/src/core/notifier/model/event.go new file mode 100755 index 000000000..67889e751 --- /dev/null +++ b/src/core/notifier/model/event.go @@ -0,0 +1,61 @@ +package model + +import ( + "time" + + "github.com/goharbor/harbor/src/common/models" +) + +// ImageEvent is image related event data to publish +type ImageEvent struct { + EventType string + Project *models.Project + Resource []*ImgResource + OccurAt time.Time + Operator string + RepoName string +} + +// ImgResource include image digest and tag +type ImgResource struct { + Digest string + Tag string +} + +// HookEvent is hook related event data to publish +type HookEvent struct { + PolicyID int64 + EventType string + Target *models.EventTarget + Payload *Payload +} + +// Payload of notification event +type Payload struct { + Type string `json:"type"` + OccurAt int64 `json:"occur_at"` + EventData *EventData `json:"event_data,omitempty"` + Operator string `json:"operator"` +} + +// EventData of notification event payload +type EventData struct { + Resources []*Resource `json:"resources"` + Repository *Repository `json:"repository"` +} + +// Resource describe infos of resource triggered notification +type Resource struct { + Digest string `json:"digest,omitempty"` + Tag string `json:"tag"` + ResourceURL string `json:"resource_url,omitempty"` +} + +// Repository info of notification event +type Repository struct { + DateCreated int64 `json:"date_created,omitempty"` + Name string `json:"name"` + Namespace string `json:"namespace"` + RepoFullName string `json:"repo_full_name"` + RepoType string `json:"repo_type"` +} diff --git a/src/core/notifier/model/topic.go b/src/core/notifier/model/topic.go new file mode 100644 index 000000000..7278858b8 --- /dev/null +++ b/src/core/notifier/model/topic.go @@ -0,0 +1,26 @@ +package model + +// Define global topic names +const ( + // PushImageTopic is topic for push image event + PushImageTopic = "OnPushImage" + // PullImageTopic is topic for pull image event + PullImageTopic = "OnPullImage" + // DeleteImageTopic is topic for delete image event + DeleteImageTopic = "OnDeleteImage" + // UploadChartTopic is topic for upload chart event + UploadChartTopic = "OnUploadChart" + // DownloadChartTopic is topic for download chart event + DownloadChartTopic = "OnDownloadChart" + // DeleteChartTopic is topic for delete chart event + DeleteChartTopic = "OnDeleteChart" + // ScanningFailedTopic is topic for scanning failed event + ScanningFailedTopic = "OnScanningFailed" + // ScanningCompletedTopic is topic for scanning completed event + ScanningCompletedTopic = "OnScanningCompleted" + + // WebhookTopic is topic for sending webhook payload + WebhookTopic = "http" + // EmailTopic is topic for sending email payload + EmailTopic = "email" +) diff --git a/src/core/notifier/topic/topics.go b/src/core/notifier/topic/topics.go new file mode 100644 index 000000000..2762da259 --- /dev/null +++ b/src/core/notifier/topic/topics.go @@ -0,0 +1,28 @@ +package topic + +import ( + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/notifier" + "github.com/goharbor/harbor/src/core/notifier/handler/notification" + "github.com/goharbor/harbor/src/core/notifier/model" +) + +// Subscribe topics +func init() { + handlersMap := map[string][]notifier.NotificationHandler{ + model.PushImageTopic: {¬ification.ImagePreprocessHandler{}}, + model.PullImageTopic: {¬ification.ImagePreprocessHandler{}}, + model.DeleteImageTopic: {¬ification.ImagePreprocessHandler{}}, + model.WebhookTopic: {¬ification.HTTPHandler{}}, + } + + for t, handlers := range handlersMap { + for _, handler := range handlers { + if err := notifier.Subscribe(t, handler); err != nil { + log.Errorf("failed to subscribe topic %s: %v", t, err) + continue + } + log.Debugf("topic %s is subscribed", t) + } + } +} diff --git a/src/core/notifier/topics.go b/src/core/notifier/topics.go deleted file mode 100644 index 23aca94cf..000000000 --- a/src/core/notifier/topics.go +++ /dev/null @@ -1,11 +0,0 @@ -package notifier - -import ( - "github.com/goharbor/harbor/src/common" -) - -// Define global topic names -const ( - // ScanAllPolicyTopic is for notifying the change of scanning all policy. - ScanAllPolicyTopic = common.ScanAllPolicy -) diff --git a/src/core/promgr/pmsdriver/local/local.go b/src/core/promgr/pmsdriver/local/local.go index b02b19cbd..4706f3f43 100644 --- a/src/core/promgr/pmsdriver/local/local.go +++ b/src/core/promgr/pmsdriver/local/local.go @@ -20,7 +20,6 @@ import ( "time" "github.com/goharbor/harbor/src/common/dao" - "github.com/goharbor/harbor/src/common/dao/group" "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/common/utils" errutil "github.com/goharbor/harbor/src/common/utils/error" @@ -132,19 +131,16 @@ func (d *driver) Update(projectIDOrName interface{}, func (d *driver) List(query *models.ProjectQueryParam) (*models.ProjectQueryResult, error) { var total int64 var projects []*models.Project - var groupDNCondition string - - // List with LDAP group projects + var groupIDs []int if query != nil && query.Member != nil { - groupDNCondition = group.GetGroupDNQueryCondition(query.Member.GroupList) + groupIDs = query.Member.GroupIDs } - - count, err := dao.GetTotalGroupProjects(groupDNCondition, query) + count, err := dao.GetTotalGroupProjects(groupIDs, query) if err != nil { return nil, err } total = int64(count) - projects, err = dao.GetGroupProjects(groupDNCondition, query) + projects, err = dao.GetGroupProjects(groupIDs, query) if err != nil { return nil, err } diff --git a/src/core/promgr/promgr.go b/src/core/promgr/promgr.go index b2de26ff3..3ac8f6ca8 100644 --- a/src/core/promgr/promgr.go +++ b/src/core/promgr/promgr.go @@ -16,6 +16,7 @@ package promgr import ( "fmt" + "github.com/goharbor/harbor/src/pkg/scan/whitelist" "strconv" "github.com/goharbor/harbor/src/common/models" @@ -44,6 +45,7 @@ type defaultProjectManager struct { pmsDriver pmsdriver.PMSDriver metaMgrEnabled bool // if metaMgrEnabled is enabled, metaMgr will be used to CURD metadata metaMgr metamgr.ProjectMetadataManager + whitelistMgr whitelist.Manager } // NewDefaultProjectManager returns an instance of defaultProjectManager, @@ -56,6 +58,7 @@ func NewDefaultProjectManager(driver pmsdriver.PMSDriver, metaMgrEnabled bool) P } if metaMgrEnabled { mgr.metaMgr = metamgr.NewDefaultProjectMetadataManager() + mgr.whitelistMgr = whitelist.NewDefaultManager() } return mgr } @@ -77,6 +80,11 @@ func (d *defaultProjectManager) Get(projectIDOrName interface{}) (*models.Projec for k, v := range meta { project.Metadata[k] = v } + wl, err := d.whitelistMgr.Get(project.ProjectID) + if err != nil { + return nil, err + } + project.CVEWhitelist = *wl } return project, nil } @@ -85,9 +93,12 @@ func (d *defaultProjectManager) Create(project *models.Project) (int64, error) { if err != nil { return 0, err } - if len(project.Metadata) > 0 && d.metaMgrEnabled { - if err = d.metaMgr.Add(id, project.Metadata); err != nil { - log.Errorf("failed to add metadata for project %s: %v", project.Name, err) + if d.metaMgrEnabled { + d.whitelistMgr.CreateEmpty(id) + if len(project.Metadata) > 0 { + if err = d.metaMgr.Add(id, project.Metadata); err != nil { + log.Errorf("failed to add metadata for project %s: %v", project.Name, err) + } } } return id, nil @@ -110,37 +121,40 @@ func (d *defaultProjectManager) Delete(projectIDOrName interface{}) error { } func (d *defaultProjectManager) Update(projectIDOrName interface{}, project *models.Project) error { - if len(project.Metadata) > 0 && d.metaMgrEnabled { - pro, err := d.Get(projectIDOrName) - if err != nil { + pro, err := d.Get(projectIDOrName) + if err != nil { + return err + } + if pro == nil { + return fmt.Errorf("project %v not found", projectIDOrName) + } + // TODO transaction? + if d.metaMgrEnabled { + if err := d.whitelistMgr.Set(pro.ProjectID, project.CVEWhitelist); err != nil { return err } - if pro == nil { - return fmt.Errorf("project %v not found", projectIDOrName) - } - - // TODO transaction? - metaNeedUpdated := map[string]string{} - metaNeedCreated := map[string]string{} - if pro.Metadata == nil { - pro.Metadata = map[string]string{} - } - for key, value := range project.Metadata { - _, exist := pro.Metadata[key] - if exist { - metaNeedUpdated[key] = value - } else { - metaNeedCreated[key] = value + if len(project.Metadata) > 0 { + metaNeedUpdated := map[string]string{} + metaNeedCreated := map[string]string{} + if pro.Metadata == nil { + pro.Metadata = map[string]string{} + } + for key, value := range project.Metadata { + _, exist := pro.Metadata[key] + if exist { + metaNeedUpdated[key] = value + } else { + metaNeedCreated[key] = value + } + } + if err = d.metaMgr.Add(pro.ProjectID, metaNeedCreated); err != nil { + return err + } + if err = d.metaMgr.Update(pro.ProjectID, metaNeedUpdated); err != nil { + return err } } - if err = d.metaMgr.Add(pro.ProjectID, metaNeedCreated); err != nil { - return err - } - if err = d.metaMgr.Update(pro.ProjectID, metaNeedUpdated); err != nil { - return err - } } - return d.pmsDriver.Update(projectIDOrName, project) } @@ -179,6 +193,7 @@ func (d *defaultProjectManager) List(query *models.ProjectQueryParam) (*models.P project.Metadata = meta } } + // the whitelist is not populated deliberately return result, nil } diff --git a/src/core/proxy/interceptors.go b/src/core/proxy/interceptors.go deleted file mode 100644 index b8a3fe3b8..000000000 --- a/src/core/proxy/interceptors.go +++ /dev/null @@ -1,397 +0,0 @@ -package proxy - -import ( - "encoding/json" - - "github.com/goharbor/harbor/src/common/dao" - "github.com/goharbor/harbor/src/common/models" - "github.com/goharbor/harbor/src/common/utils/clair" - "github.com/goharbor/harbor/src/common/utils/log" - "github.com/goharbor/harbor/src/common/utils/notary" - "github.com/goharbor/harbor/src/core/config" - "github.com/goharbor/harbor/src/core/promgr" - coreutils "github.com/goharbor/harbor/src/core/utils" - - "context" - "fmt" - "net/http" - "net/http/httptest" - "regexp" - "strconv" - "strings" -) - -type contextKey string - -const ( - manifestURLPattern = `^/v2/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)+)manifests/([\w][\w.:-]{0,127})` - catalogURLPattern = `/v2/_catalog` - imageInfoCtxKey = contextKey("ImageInfo") - // TODO: temp solution, remove after vmware/harbor#2242 is resolved. - tokenUsername = "harbor-core" -) - -// Record the docker deamon raw response. -var rec *httptest.ResponseRecorder - -// NotaryEndpoint , exported for testing. -var NotaryEndpoint = "" - -// MatchPullManifest checks if the request looks like a request to pull manifest. If it is returns the image and tag/sha256 digest as 2nd and 3rd return values -func MatchPullManifest(req *http.Request) (bool, string, string) { - // TODO: add user agent check. - if req.Method != http.MethodGet { - return false, "", "" - } - return matchManifestURL(req) -} - -// MatchPushManifest checks if the request looks like a request to push manifest. If it is returns the image and tag/sha256 digest as 2nd and 3rd return values -func MatchPushManifest(req *http.Request) (bool, string, string) { - if req.Method != http.MethodPut { - return false, "", "" - } - return matchManifestURL(req) -} - -func matchManifestURL(req *http.Request) (bool, string, string) { - re := regexp.MustCompile(manifestURLPattern) - s := re.FindStringSubmatch(req.URL.Path) - if len(s) == 3 { - s[1] = strings.TrimSuffix(s[1], "/") - return true, s[1], s[2] - } - return false, "", "" -} - -// MatchListRepos checks if the request looks like a request to list repositories. -func MatchListRepos(req *http.Request) bool { - if req.Method != http.MethodGet { - return false - } - re := regexp.MustCompile(catalogURLPattern) - s := re.FindStringSubmatch(req.URL.Path) - if len(s) == 1 { - return true - } - return false -} - -// policyChecker checks the policy of a project by project name, to determine if it's needed to check the image's status under this project. -type policyChecker interface { - // contentTrustEnabled returns whether a project has enabled content trust. - contentTrustEnabled(name string) bool - // vulnerablePolicy returns whether a project has enabled vulnerable, and the project's severity. - vulnerablePolicy(name string) (bool, models.Severity) -} - -type pmsPolicyChecker struct { - pm promgr.ProjectManager -} - -func (pc pmsPolicyChecker) contentTrustEnabled(name string) bool { - project, err := pc.pm.Get(name) - if err != nil { - log.Errorf("Unexpected error when getting the project, error: %v", err) - return true - } - return project.ContentTrustEnabled() -} -func (pc pmsPolicyChecker) vulnerablePolicy(name string) (bool, models.Severity) { - project, err := pc.pm.Get(name) - if err != nil { - log.Errorf("Unexpected error when getting the project, error: %v", err) - return true, models.SevUnknown - } - return project.VulPrevented(), clair.ParseClairSev(project.Severity()) -} - -// newPMSPolicyChecker returns an instance of an pmsPolicyChecker -func newPMSPolicyChecker(pm promgr.ProjectManager) policyChecker { - return &pmsPolicyChecker{ - pm: pm, - } -} - -func getPolicyChecker() policyChecker { - return newPMSPolicyChecker(config.GlobalProjectMgr) -} - -type imageInfo struct { - repository string - reference string - projectName string - digest string -} - -type urlHandler struct { - next http.Handler -} - -func (uh urlHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - log.Debugf("in url handler, path: %s", req.URL.Path) - flag, repository, reference := MatchPullManifest(req) - if flag { - components := strings.SplitN(repository, "/", 2) - if len(components) < 2 { - http.Error(rw, marshalError("PROJECT_POLICY_VIOLATION", fmt.Sprintf("Bad repository name: %s", repository)), http.StatusBadRequest) - return - } - - client, err := coreutils.NewRepositoryClientForUI(tokenUsername, repository) - if err != nil { - log.Errorf("Error creating repository Client: %v", err) - http.Error(rw, marshalError("PROJECT_POLICY_VIOLATION", fmt.Sprintf("Failed due to internal Error: %v", err)), http.StatusInternalServerError) - return - } - digest, _, err := client.ManifestExist(reference) - if err != nil { - log.Errorf("Failed to get digest for reference: %s, error: %v", reference, err) - http.Error(rw, marshalError("PROJECT_POLICY_VIOLATION", fmt.Sprintf("Failed due to internal Error: %v", err)), http.StatusInternalServerError) - return - } - - img := imageInfo{ - repository: repository, - reference: reference, - projectName: components[0], - digest: digest, - } - - log.Debugf("image info of the request: %#v", img) - ctx := context.WithValue(req.Context(), imageInfoCtxKey, img) - req = req.WithContext(ctx) - } - uh.next.ServeHTTP(rw, req) -} - -type readonlyHandler struct { - next http.Handler -} - -func (rh readonlyHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - if config.ReadOnly() { - if req.Method == http.MethodDelete || req.Method == http.MethodPost || req.Method == http.MethodPatch || req.Method == http.MethodPut { - log.Warningf("The request is prohibited in readonly mode, url is: %s", req.URL.Path) - http.Error(rw, marshalError("DENIED", "The system is in read only mode. Any modification is prohibited."), http.StatusForbidden) - return - } - } - rh.next.ServeHTTP(rw, req) -} - -type multipleManifestHandler struct { - next http.Handler -} - -// The handler is responsible for blocking request to upload manifest list by docker client, which is not supported so far by Harbor. -func (mh multipleManifestHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - match, _, _ := MatchPushManifest(req) - if match { - contentType := req.Header.Get("Content-type") - // application/vnd.docker.distribution.manifest.list.v2+json - if strings.Contains(contentType, "manifest.list.v2") { - log.Debugf("Content-type: %s is not supported, failing the response.", contentType) - http.Error(rw, marshalError("UNSUPPORTED_MEDIA_TYPE", "Manifest.list is not supported."), http.StatusUnsupportedMediaType) - return - } - } - mh.next.ServeHTTP(rw, req) -} - -type listReposHandler struct { - next http.Handler -} - -func (lrh listReposHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - listReposFlag := MatchListRepos(req) - if listReposFlag { - rec = httptest.NewRecorder() - lrh.next.ServeHTTP(rec, req) - if rec.Result().StatusCode != http.StatusOK { - copyResp(rec, rw) - return - } - var ctlg struct { - Repositories []string `json:"repositories"` - } - decoder := json.NewDecoder(rec.Body) - if err := decoder.Decode(&ctlg); err != nil { - log.Errorf("Decode repositories error: %v", err) - copyResp(rec, rw) - return - } - var entries []string - for repo := range ctlg.Repositories { - log.Debugf("the repo in the response %s", ctlg.Repositories[repo]) - exist := dao.RepositoryExists(ctlg.Repositories[repo]) - if exist { - entries = append(entries, ctlg.Repositories[repo]) - } - } - type Repos struct { - Repositories []string `json:"repositories"` - } - resp := &Repos{Repositories: entries} - respJSON, err := json.Marshal(resp) - if err != nil { - log.Errorf("Encode repositories error: %v", err) - copyResp(rec, rw) - return - } - - for k, v := range rec.Header() { - rw.Header()[k] = v - } - clen := len(respJSON) - rw.Header().Set(http.CanonicalHeaderKey("Content-Length"), strconv.Itoa(clen)) - rw.Write(respJSON) - return - } - lrh.next.ServeHTTP(rw, req) -} - -type contentTrustHandler struct { - next http.Handler -} - -func (cth contentTrustHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - imgRaw := req.Context().Value(imageInfoCtxKey) - if imgRaw == nil || !config.WithNotary() { - cth.next.ServeHTTP(rw, req) - return - } - img, _ := req.Context().Value(imageInfoCtxKey).(imageInfo) - if img.digest == "" { - cth.next.ServeHTTP(rw, req) - return - } - if !getPolicyChecker().contentTrustEnabled(img.projectName) { - cth.next.ServeHTTP(rw, req) - return - } - match, err := matchNotaryDigest(img) - if err != nil { - http.Error(rw, marshalError("PROJECT_POLICY_VIOLATION", "Failed in communication with Notary please check the log"), http.StatusInternalServerError) - return - } - if !match { - log.Debugf("digest mismatch, failing the response.") - http.Error(rw, marshalError("PROJECT_POLICY_VIOLATION", "The image is not signed in Notary."), http.StatusPreconditionFailed) - return - } - cth.next.ServeHTTP(rw, req) -} - -type vulnerableHandler struct { - next http.Handler -} - -func (vh vulnerableHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - imgRaw := req.Context().Value(imageInfoCtxKey) - if imgRaw == nil || !config.WithClair() { - vh.next.ServeHTTP(rw, req) - return - } - img, _ := req.Context().Value(imageInfoCtxKey).(imageInfo) - if img.digest == "" { - vh.next.ServeHTTP(rw, req) - return - } - projectVulnerableEnabled, projectVulnerableSeverity := getPolicyChecker().vulnerablePolicy(img.projectName) - if !projectVulnerableEnabled { - vh.next.ServeHTTP(rw, req) - return - } - overview, err := dao.GetImgScanOverview(img.digest) - if err != nil { - log.Errorf("failed to get ImgScanOverview with repo: %s, reference: %s, digest: %s. Error: %v", img.repository, img.reference, img.digest, err) - http.Error(rw, marshalError("PROJECT_POLICY_VIOLATION", "Failed to get ImgScanOverview."), http.StatusPreconditionFailed) - return - } - // severity is 0 means that the image fails to scan or not scanned successfully. - if overview == nil || overview.Sev == 0 { - log.Debugf("cannot get the image scan overview info, failing the response.") - http.Error(rw, marshalError("PROJECT_POLICY_VIOLATION", "Cannot get the image severity."), http.StatusPreconditionFailed) - return - } - imageSev := overview.Sev - if imageSev >= int(projectVulnerableSeverity) { - log.Debugf("the image severity: %q is higher then project setting: %q, failing the response.", models.Severity(imageSev), projectVulnerableSeverity) - http.Error(rw, marshalError("PROJECT_POLICY_VIOLATION", fmt.Sprintf("The severity of vulnerability of the image: %q is equal or higher than the threshold in project setting: %q.", models.Severity(imageSev), projectVulnerableSeverity)), http.StatusPreconditionFailed) - return - } - vh.next.ServeHTTP(rw, req) -} - -func matchNotaryDigest(img imageInfo) (bool, error) { - if NotaryEndpoint == "" { - NotaryEndpoint = config.InternalNotaryEndpoint() - } - targets, err := notary.GetInternalTargets(NotaryEndpoint, tokenUsername, img.repository) - if err != nil { - return false, err - } - for _, t := range targets { - if isDigest(img.reference) { - d, err := notary.DigestFromTarget(t) - if err != nil { - return false, err - } - if img.digest == d { - return true, nil - } - } else { - if t.Tag == img.reference { - log.Debugf("found reference: %s in notary, try to match digest.", img.reference) - d, err := notary.DigestFromTarget(t) - if err != nil { - return false, err - } - if img.digest == d { - return true, nil - } - } - } - } - log.Debugf("image: %#v, not found in notary", img) - return false, nil -} - -// A sha256 is a string with 64 characters. -func isDigest(ref string) bool { - return strings.HasPrefix(ref, "sha256:") && len(ref) == 71 -} - -func copyResp(rec *httptest.ResponseRecorder, rw http.ResponseWriter) { - for k, v := range rec.Header() { - rw.Header()[k] = v - } - rw.WriteHeader(rec.Result().StatusCode) - rw.Write(rec.Body.Bytes()) -} - -func marshalError(code, msg string) string { - var tmpErrs struct { - Errors []JSONError `json:"errors,omitempty"` - } - tmpErrs.Errors = append(tmpErrs.Errors, JSONError{ - Code: code, - Message: msg, - Detail: msg, - }) - - str, err := json.Marshal(tmpErrs) - if err != nil { - log.Debugf("failed to marshal json error, %v", err) - return msg - } - return string(str) -} - -// JSONError wraps a concrete Code and Message, it's readable for docker deamon. -type JSONError struct { - Code string `json:"code,omitempty"` - Message string `json:"message,omitempty"` - Detail string `json:"detail,omitempty"` -} diff --git a/src/core/proxy/proxy.go b/src/core/proxy/proxy.go deleted file mode 100644 index eadbfed38..000000000 --- a/src/core/proxy/proxy.go +++ /dev/null @@ -1,48 +0,0 @@ -package proxy - -import ( - "github.com/goharbor/harbor/src/core/config" - - "fmt" - "net/http" - "net/http/httputil" - "net/url" -) - -// Proxy is the instance of the reverse proxy in this package. -var Proxy *httputil.ReverseProxy - -var handlers handlerChain - -type handlerChain struct { - head http.Handler -} - -// Init initialize the Proxy instance and handler chain. -func Init(urls ...string) error { - var err error - var registryURL string - if len(urls) > 1 { - return fmt.Errorf("the parm, urls should have only 0 or 1 elements") - } - if len(urls) == 0 { - registryURL, err = config.RegistryURL() - if err != nil { - return err - } - } else { - registryURL = urls[0] - } - targetURL, err := url.Parse(registryURL) - if err != nil { - return err - } - Proxy = httputil.NewSingleHostReverseProxy(targetURL) - handlers = handlerChain{head: readonlyHandler{next: urlHandler{next: multipleManifestHandler{next: listReposHandler{next: contentTrustHandler{next: vulnerableHandler{next: Proxy}}}}}}} - return nil -} - -// Handle handles the request. -func Handle(rw http.ResponseWriter, req *http.Request) { - handlers.head.ServeHTTP(rw, req) -} diff --git a/src/core/router.go b/src/core/router.go old mode 100644 new mode 100755 index 1c4c31f3f..7e01b934e --- a/src/core/router.go +++ b/src/core/router.go @@ -15,17 +15,16 @@ package main import ( + "github.com/astaxie/beego" "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/core/api" "github.com/goharbor/harbor/src/core/config" "github.com/goharbor/harbor/src/core/controllers" "github.com/goharbor/harbor/src/core/service/notifications/admin" - "github.com/goharbor/harbor/src/core/service/notifications/clair" "github.com/goharbor/harbor/src/core/service/notifications/jobs" "github.com/goharbor/harbor/src/core/service/notifications/registry" + "github.com/goharbor/harbor/src/core/service/notifications/scheduler" "github.com/goharbor/harbor/src/core/service/token" - - "github.com/astaxie/beego" ) func initRouters() { @@ -67,6 +66,7 @@ func initRouters() { beego.Router("/api/ping", &api.SystemInfoAPI{}, "get:Ping") beego.Router("/api/search", &api.SearchAPI{}) beego.Router("/api/projects/", &api.ProjectAPI{}, "get:List;post:Post") + beego.Router("/api/projects/:id([0-9]+)/summary", &api.ProjectAPI{}, "get:Summary") beego.Router("/api/projects/:id([0-9]+)/logs", &api.ProjectAPI{}, "get:Logs") beego.Router("/api/projects/:id([0-9]+)/_deletable", &api.ProjectAPI{}, "get:Deletable") beego.Router("/api/projects/:id([0-9]+)/metadatas/?:name", &api.MetadataAPI{}, "get:Get") @@ -76,6 +76,9 @@ func initRouters() { beego.Router("/api/projects/:pid([0-9]+)/robots", &api.RobotAPI{}, "post:Post;get:List") beego.Router("/api/projects/:pid([0-9]+)/robots/:id([0-9]+)", &api.RobotAPI{}, "get:Get;put:Put;delete:Delete") + beego.Router("/api/quotas", &api.QuotaAPI{}, "get:List") + beego.Router("/api/quotas/:id([0-9]+)", &api.QuotaAPI{}, "get:Get;put:Put") + beego.Router("/api/repositories", &api.RepositoryAPI{}, "get:Get") beego.Router("/api/repositories/*", &api.RepositoryAPI{}, "delete:Delete;put:Put") beego.Router("/api/repositories/*/labels", &api.RepositoryLabelAPI{}, "get:GetOfRepository;post:AddToRepository") @@ -96,6 +99,8 @@ func initRouters() { beego.Router("/api/system/gc/:id([0-9]+)/log", &api.GCAPI{}, "get:GetLog") beego.Router("/api/system/gc/schedule", &api.GCAPI{}, "get:Get;put:Put;post:Post") beego.Router("/api/system/scanAll/schedule", &api.ScanAllAPI{}, "get:Get;put:Put;post:Post") + beego.Router("/api/system/CVEWhitelist", &api.SysCVEWhitelistAPI{}, "get:Get;put:Put") + beego.Router("/api/system/oidc/ping", &api.OIDCAPI{}, "post:Ping") beego.Router("/api/logs", &api.LogAPI{}) @@ -108,6 +113,14 @@ func initRouters() { beego.Router("/api/replication/policies", &api.ReplicationPolicyAPI{}, "get:List;post:Create") beego.Router("/api/replication/policies/:id([0-9]+)", &api.ReplicationPolicyAPI{}, "get:Get;put:Update;delete:Delete") + beego.Router("/api/projects/:pid([0-9]+)/webhook/policies", &api.NotificationPolicyAPI{}, "get:List;post:Post") + beego.Router("/api/projects/:pid([0-9]+)/webhook/policies/:id([0-9]+)", &api.NotificationPolicyAPI{}) + beego.Router("/api/projects/:pid([0-9]+)/webhook/policies/test", &api.NotificationPolicyAPI{}, "post:Test") + + beego.Router("/api/projects/:pid([0-9]+)/webhook/lasttrigger", &api.NotificationPolicyAPI{}, "get:ListGroupByEventType") + + beego.Router("/api/projects/:pid([0-9]+)/webhook/jobs/", &api.NotificationJobAPI{}, "get:List") + beego.Router("/api/internal/configurations", &api.ConfigAPI{}, "get:GetInternalConfig;put:Put") beego.Router("/api/configurations", &api.ConfigAPI{}, "get:Get;put:Put") beego.Router("/api/statistics", &api.StatisticAPI{}) @@ -121,14 +134,18 @@ func initRouters() { beego.Router("/api/internal/syncregistry", &api.InternalAPI{}, "post:SyncRegistry") beego.Router("/api/internal/renameadmin", &api.InternalAPI{}, "post:RenameAdmin") + beego.Router("/api/internal/switchquota", &api.InternalAPI{}, "put:SwitchQuota") + beego.Router("/api/internal/syncquota", &api.InternalAPI{}, "post:SyncQuota") // external service that hosted on harbor process: beego.Router("/service/notifications", ®istry.NotificationHandler{}) - beego.Router("/service/notifications/clair", &clair.Handler{}, "post:Handle") beego.Router("/service/notifications/jobs/scan/:id([0-9]+)", &jobs.Handler{}, "post:HandleScan") beego.Router("/service/notifications/jobs/adminjob/:id([0-9]+)", &admin.Handler{}, "post:HandleAdminJob") beego.Router("/service/notifications/jobs/replication/:id([0-9]+)", &jobs.Handler{}, "post:HandleReplicationScheduleJob") beego.Router("/service/notifications/jobs/replication/task/:id([0-9]+)", &jobs.Handler{}, "post:HandleReplicationTask") + beego.Router("/service/notifications/jobs/webhook/:id([0-9]+)", &jobs.Handler{}, "post:HandleNotificationJob") + beego.Router("/service/notifications/jobs/retention/task/:id([0-9]+)", &jobs.Handler{}, "post:HandleRetentionTask") + beego.Router("/service/notifications/schedules/:id([0-9]+)", &scheduler.Handler{}, "post:Handle") beego.Router("/service/token", &token.Handler{}) beego.Router("/api/registries", &api.RegistryAPI{}, "get:List;post:Post") @@ -138,6 +155,16 @@ func initRouters() { beego.Router("/api/registries/:id/info", &api.RegistryAPI{}, "get:GetInfo") beego.Router("/api/registries/:id/namespace", &api.RegistryAPI{}, "get:GetNamespace") + beego.Router("/api/retentions/metadatas", &api.RetentionAPI{}, "get:GetMetadatas") + beego.Router("/api/retentions/:id", &api.RetentionAPI{}, "get:GetRetention") + beego.Router("/api/retentions", &api.RetentionAPI{}, "post:CreateRetention") + beego.Router("/api/retentions/:id", &api.RetentionAPI{}, "put:UpdateRetention") + beego.Router("/api/retentions/:id/executions", &api.RetentionAPI{}, "post:TriggerRetentionExec") + beego.Router("/api/retentions/:id/executions/:eid", &api.RetentionAPI{}, "patch:OperateRetentionExec") + beego.Router("/api/retentions/:id/executions", &api.RetentionAPI{}, "get:ListRetentionExecs") + beego.Router("/api/retentions/:id/executions/:eid/tasks", &api.RetentionAPI{}, "get:ListRetentionExecTasks") + beego.Router("/api/retentions/:id/executions/:eid/tasks/:tid", &api.RetentionAPI{}, "get:GetRetentionExecTaskLog") + beego.Router("/v2/*", &controllers.RegistryProxy{}, "*:Handle") // APIs for chart repository diff --git a/src/core/service/notifications/clair/handler.go b/src/core/service/notifications/clair/handler.go deleted file mode 100644 index 0c96e6768..000000000 --- a/src/core/service/notifications/clair/handler.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2018 Project Harbor Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clair - -import ( - "encoding/json" - "time" - - "github.com/goharbor/harbor/src/common/dao" - "github.com/goharbor/harbor/src/common/models" - "github.com/goharbor/harbor/src/common/utils" - "github.com/goharbor/harbor/src/common/utils/clair" - "github.com/goharbor/harbor/src/common/utils/log" - "github.com/goharbor/harbor/src/core/api" - "github.com/goharbor/harbor/src/core/config" -) - -const ( - rescanInterval = 15 * time.Minute -) - -var ( - clairClient *clair.Client -) - -// Handler handles reqeust on /service/notifications/clair/, which listens to clair's notifications. -// When there's unexpected error it will silently fail without removing the notification such that it will be triggered again. -type Handler struct { - api.BaseController -} - -// Handle ... -func (h *Handler) Handle() { - if clairClient == nil { - clairClient = clair.NewClient(config.ClairEndpoint(), nil) - } - var ne models.ClairNotificationEnvelope - if err := json.Unmarshal(h.Ctx.Input.CopyBody(1<<32), &ne); err != nil { - log.Errorf("Failed to decode the request: %v", err) - return - } - log.Debugf("Received notification from Clair, name: %s", ne.Notification.Name) - notification, err := clairClient.GetNotification(ne.Notification.Name) - if err != nil { - log.Errorf("Failed to get notification details from Clair, name: %s, err: %v", ne.Notification.Name, err) - return - } - ns := make(map[string]bool) - if old := notification.Old; old != nil { - if vuln := old.Vulnerability; vuln != nil { - log.Debugf("old vulnerability namespace: %s", vuln.NamespaceName) - ns[vuln.NamespaceName] = true - } - } - if newNotification := notification.New; newNotification != nil { - if vuln := newNotification.Vulnerability; vuln != nil { - log.Debugf("new vulnerability namespace: %s", vuln.NamespaceName) - ns[vuln.NamespaceName] = true - } - } - for k, v := range ns { - if v { - if err := dao.SetClairVulnTimestamp(k, time.Now()); err == nil { - log.Debugf("Updated the timestamp for namespaces: %s", k) - } else { - log.Warningf("Failed to update the timestamp for namespaces: %s, error: %v", k, err) - } - } - } - if utils.ScanOverviewMarker().Check() { - go func() { - <-time.After(rescanInterval) - l, err := dao.ListImgScanOverviews() - if err != nil { - log.Errorf("Failed to list scan overview records, error: %v", err) - return - } - for _, e := range l { - if err := clair.UpdateScanOverview(e.Digest, e.DetailsKey, config.ClairEndpoint()); err != nil { - log.Errorf("Failed to refresh scan overview for image: %s", e.Digest) - } else { - log.Debugf("Refreshed scan overview for record with digest: %s", e.Digest) - } - } - }() - utils.ScanOverviewMarker().Mark() - } else { - log.Debugf("There is a rescan scheduled at %v already, skip.", utils.ScanOverviewMarker().Next()) - } - if err := clairClient.DeleteNotification(ne.Notification.Name); err != nil { - log.Warningf("Failed to remove notification from Clair, name: %s", ne.Notification.Name) - } else { - log.Debugf("Removed notification from Clair, name: %s", ne.Notification.Name) - } -} diff --git a/src/core/service/notifications/jobs/handler.go b/src/core/service/notifications/jobs/handler.go old mode 100644 new mode 100755 index 2ddf06b27..47377f9cc --- a/src/core/service/notifications/jobs/handler.go +++ b/src/core/service/notifications/jobs/handler.go @@ -16,6 +16,7 @@ package jobs import ( "encoding/json" + "time" "github.com/goharbor/harbor/src/common/dao" "github.com/goharbor/harbor/src/common/job" @@ -23,6 +24,9 @@ import ( "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/common/utils/log" "github.com/goharbor/harbor/src/core/api" + jjob "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/pkg/notification" + "github.com/goharbor/harbor/src/pkg/retention" "github.com/goharbor/harbor/src/replication" "github.com/goharbor/harbor/src/replication/operation/hook" "github.com/goharbor/harbor/src/replication/policy/scheduler" @@ -30,12 +34,11 @@ import ( var statusMap = map[string]string{ job.JobServiceStatusPending: models.JobPending, + job.JobServiceStatusScheduled: models.JobScheduled, job.JobServiceStatusRunning: models.JobRunning, job.JobServiceStatusStopped: models.JobStopped, - job.JobServiceStatusCancelled: models.JobCanceled, job.JobServiceStatusError: models.JobError, job.JobServiceStatusSuccess: models.JobFinished, - job.JobServiceStatusScheduled: models.JobScheduled, } // Handler handles reqeust on /service/notifications/jobs/*, which listens to the webhook of jobservice. @@ -44,6 +47,7 @@ type Handler struct { id int64 status string rawStatus string + checkIn string } // Prepare ... @@ -71,6 +75,7 @@ func (h *Handler) Prepare() { return } h.status = status + h.checkIn = data.CheckIn } // HandleScan handles the webhook of scan job @@ -97,7 +102,71 @@ func (h *Handler) HandleReplicationScheduleJob() { func (h *Handler) HandleReplicationTask() { log.Debugf("received replication task status update event: task-%d, status-%s", h.id, h.status) if err := hook.UpdateTask(replication.OperationCtl, h.id, h.rawStatus); err != nil { - log.Errorf("Failed to update replication task status, id: %d, status: %s", h.id, h.status) + log.Errorf("failed to update the status of the replication task %d: %v", h.id, err) + h.SendInternalServerError(err) + return + } +} + +// HandleRetentionTask handles the webhook of retention task +func (h *Handler) HandleRetentionTask() { + taskID := h.id + status := h.rawStatus + log.Debugf("received retention task status update event: task-%d, status-%s", taskID, status) + mgr := &retention.DefaultManager{} + // handle checkin + if h.checkIn != "" { + var retainObj struct { + Total int `json:"total"` + Retained int `json:"retained"` + } + if err := json.Unmarshal([]byte(h.checkIn), &retainObj); err != nil { + log.Errorf("failed to resolve checkin of retention task %d: %v", taskID, err) + return + } + task := &retention.Task{ + ID: taskID, + Total: retainObj.Total, + Retained: retainObj.Retained, + } + if err := mgr.UpdateTask(task, "Total", "Retained"); err != nil { + log.Errorf("failed to update of retention task %d: %v", taskID, err) + h.SendInternalServerError(err) + return + } + return + } + + // handle status updating + if err := mgr.UpdateTaskStatus(taskID, status); err != nil { + log.Errorf("failed to update the status of retention task %d: %v", taskID, err) + h.SendInternalServerError(err) + return + } + // if the status is the final status, update the end time + if status == jjob.StoppedStatus.String() || status == jjob.SuccessStatus.String() || + status == jjob.ErrorStatus.String() { + task := &retention.Task{ + ID: taskID, + EndTime: time.Now(), + } + if err := mgr.UpdateTask(task, "EndTime"); err != nil { + log.Errorf("failed to update of retention task %d: %v", taskID, err) + h.SendInternalServerError(err) + return + } + } +} + +// HandleNotificationJob handles the hook of notification job +func (h *Handler) HandleNotificationJob() { + log.Debugf("received notification job status update event: job-%d, status-%s", h.id, h.status) + if err := notification.JobMgr.Update(&models.NotificationJob{ + ID: h.id, + Status: h.status, + UpdateTime: time.Now(), + }, "Status", "UpdateTime"); err != nil { + log.Errorf("Failed to update notification job status, id: %d, status: %s", h.id, h.status) h.SendInternalServerError(err) return } diff --git a/src/core/service/notifications/registry/handler.go b/src/core/service/notifications/registry/handler.go old mode 100644 new mode 100755 index d3530f979..eb581ef1e --- a/src/core/service/notifications/registry/handler.go +++ b/src/core/service/notifications/registry/handler.go @@ -27,6 +27,7 @@ import ( "github.com/goharbor/harbor/src/common/utils/log" "github.com/goharbor/harbor/src/core/api" "github.com/goharbor/harbor/src/core/config" + notifierEvt "github.com/goharbor/harbor/src/core/notifier/event" coreutils "github.com/goharbor/harbor/src/core/utils" "github.com/goharbor/harbor/src/replication" "github.com/goharbor/harbor/src/replication/adapter" @@ -111,11 +112,30 @@ func (n *NotificationHandler) Post() { }() } - if !coreutils.WaitForManifestReady(repository, tag, 5) { + if !coreutils.WaitForManifestReady(repository, tag, 6) { log.Errorf("Manifest for image %s:%s is not ready, skip the follow up actions.", repository, tag) return } + // build and publish image push event + evt := ¬ifierEvt.Event{} + imgPushMetadata := ¬ifierEvt.ImagePushMetaData{ + Project: pro, + Tag: tag, + Digest: event.Target.Digest, + RepoName: event.Target.Repository, + OccurAt: time.Now(), + Operator: event.Actor.Name, + } + if err := evt.Build(imgPushMetadata); err != nil { + // do not return when building event metadata failed + log.Errorf("failed to build image push event metadata: %v", err) + } + if err := evt.Publish(); err != nil { + // do not return when publishing event failed + log.Errorf("failed to publish image push event: %v", err) + } + // TODO: handle image delete event and chart event go func() { e := &rep_event.Event{ @@ -148,12 +168,70 @@ func (n *NotificationHandler) Post() { } } if action == "pull" { + // build and publish image pull event + evt := ¬ifierEvt.Event{} + imgPullMetadata := ¬ifierEvt.ImagePullMetaData{ + Project: pro, + Tag: tag, + Digest: event.Target.Digest, + RepoName: event.Target.Repository, + OccurAt: time.Now(), + Operator: event.Actor.Name, + } + if err := evt.Build(imgPullMetadata); err != nil { + // do not return when building event metadata failed + log.Errorf("failed to build image push event metadata: %v", err) + } + if err := evt.Publish(); err != nil { + // do not return when publishing event failed + log.Errorf("failed to publish image pull event: %v", err) + } + go func() { log.Debugf("Increase the repository %s pull count.", repository) if err := dao.IncreasePullCount(repository); err != nil { log.Errorf("Error happens when increasing pull count: %v", repository) } }() + + // update the artifact pull time, and ignore the events without tag. + if tag != "" { + go func() { + artifactQuery := &models.ArtifactQuery{ + PID: pro.ProjectID, + Repo: repository, + } + + // handle pull by tag or digest + pullByDigest := utils.IsDigest(tag) + if pullByDigest { + artifactQuery.Digest = tag + } else { + artifactQuery.Tag = tag + } + + afs, err := dao.ListArtifacts(artifactQuery) + if err != nil { + log.Errorf("Error occurred when to get artifact %v", err) + return + } + if len(afs) > 0 { + log.Warningf("get multiple artifact records when to update pull time with query :%d-%s-%s, "+ + "all of them will be updated.", artifactQuery.PID, artifactQuery.Repo, artifactQuery.Tag) + } + + // ToDo: figure out how to do batch update in Pg as beego orm doesn't support update multiple like insert does. + for _, af := range afs { + log.Debugf("Update the artifact: %s pull time.", af.Repo) + af.PullTime = time.Now() + if err := dao.UpdateArtifactPullTime(af); err != nil { + log.Errorf("Error happens when updating the pull time of artifact: %d-%s, with err: %v", + artifactQuery.PID, artifactQuery.Repo, err) + } + } + }() + } + } } } diff --git a/src/core/service/notifications/scheduler/handler.go b/src/core/service/notifications/scheduler/handler.go new file mode 100644 index 000000000..b07cfd5b6 --- /dev/null +++ b/src/core/service/notifications/scheduler/handler.go @@ -0,0 +1,79 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scheduler + +import ( + "encoding/json" + "fmt" + + "github.com/goharbor/harbor/src/common/job/models" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/api" + "github.com/goharbor/harbor/src/pkg/scheduler" + "github.com/goharbor/harbor/src/pkg/scheduler/hook" +) + +// Handler handles the scheduler requests +type Handler struct { + api.BaseController +} + +// Handle ... +func (h *Handler) Handle() { + log.Debugf("received scheduler hook event for schedule %s", h.GetStringFromPath(":id")) + + var data models.JobStatusChange + if err := json.Unmarshal(h.Ctx.Input.CopyBody(1<<32), &data); err != nil { + log.Errorf("failed to decode hook event: %v", err) + return + } + // status update + if len(data.CheckIn) == 0 { + schedulerID, err := h.GetInt64FromPath(":id") + if err != nil { + log.Errorf("failed to get the schedule ID: %v", err) + return + } + if err := hook.GlobalController.UpdateStatus(schedulerID, data.Status); err != nil { + h.SendInternalServerError(fmt.Errorf("failed to update status of job %s: %v", data.JobID, err)) + return + } + log.Debugf("handle status update hook event for schedule %s completed", h.GetStringFromPath(":id")) + return + } + + // run callback function + // just log the error message when handling check in request if got any error + params := map[string]interface{}{} + if err := json.Unmarshal([]byte(data.CheckIn), ¶ms); err != nil { + log.Errorf("failed to unmarshal parameters from check in message: %v", err) + return + } + callbackFuncNameParam, exist := params[scheduler.JobParamCallbackFunc] + if !exist { + log.Error("cannot get the parameter \"callback_func_name\" from the check in message") + return + } + callbackFuncName, ok := callbackFuncNameParam.(string) + if !ok || len(callbackFuncName) == 0 { + log.Errorf("invalid \"callback_func_name\": %v", callbackFuncName) + return + } + if err := hook.GlobalController.Run(callbackFuncName, params[scheduler.JobParamCallbackFuncParams]); err != nil { + log.Errorf("failed to run the callback function %s: %v", callbackFuncName, err) + return + } + log.Debugf("callback function %s called for schedule %s", callbackFuncName, h.GetStringFromPath(":id")) +} diff --git a/src/core/utils/retag.go b/src/core/utils/retag.go index b53f5b713..449a758b7 100644 --- a/src/core/utils/retag.go +++ b/src/core/utils/retag.go @@ -28,13 +28,13 @@ import ( // Retag tags an image to another func Retag(srcImage, destImage *models.Image) error { isSameRepo := getRepoName(srcImage) == getRepoName(destImage) - srcClient, err := NewRepositoryClientForUI("harbor-ui", getRepoName(srcImage)) + srcClient, err := NewRepositoryClientForLocal("harbor-ui", getRepoName(srcImage)) if err != nil { return err } destClient := srcClient if !isSameRepo { - destClient, err = NewRepositoryClientForUI("harbor-ui", getRepoName(destImage)) + destClient, err = NewRepositoryClientForLocal("harbor-ui", getRepoName(destImage)) if err != nil { return err } diff --git a/src/core/utils/utils.go b/src/core/utils/utils.go index 5959c4514..e55f8a010 100644 --- a/src/core/utils/utils.go +++ b/src/core/utils/utils.go @@ -17,6 +17,7 @@ package utils import ( "net/http" + "os" "time" "github.com/goharbor/harbor/src/common/utils/log" @@ -33,7 +34,20 @@ func NewRepositoryClientForUI(username, repository string) (*registry.Repository if err != nil { return nil, err } + return newRepositoryClient(endpoint, username, repository) +} +// NewRepositoryClientForLocal creates a repository client that can only be used to +// access the internal registry with 127.0.0.1 +func NewRepositoryClientForLocal(username, repository string) (*registry.Repository, error) { + // The 127.0.0.1:8080 is not reachable as we do not enable core in UT env. + if os.Getenv("UTTEST") == "true" { + return NewRepositoryClientForUI(username, repository) + } + return newRepositoryClient(config.LocalCoreURL(), username, repository) +} + +func newRepositoryClient(endpoint, username, repository string) (*registry.Repository, error) { uam := &auth.UserAgentModifier{ UserAgent: "harbor-registry-client", } @@ -48,14 +62,19 @@ func NewRepositoryClientForUI(username, repository string) (*registry.Repository // WaitForManifestReady implements exponential sleeep to wait until manifest is ready in registry. // This is a workaround for https://github.com/docker/distribution/issues/2625 func WaitForManifestReady(repository string, tag string, maxRetry int) bool { - // The initial wait interval, hard-coded to 50ms - interval := 50 * time.Millisecond + // The initial wait interval, hard-coded to 80ms, interval will be 80ms,200ms,500ms,1.25s,3.124999936s + interval := 80 * time.Millisecond repoClient, err := NewRepositoryClientForUI("harbor-core", repository) if err != nil { log.Errorf("Failed to create repo client.") return false } for i := 0; i < maxRetry; i++ { + if i != 0 { + log.Warningf("manifest for image %s:%s is not ready, retry after %v", repository, tag, interval) + time.Sleep(interval) + interval = time.Duration(int64(float32(interval) * 2.5)) + } _, exist, err := repoClient.ManifestExist(tag) if err != nil { log.Errorf("Unexpected error when checking manifest existence, image: %s:%s, error: %v", repository, tag, err) @@ -64,9 +83,6 @@ func WaitForManifestReady(repository string, tag string, maxRetry int) bool { if exist { return true } - log.Warningf("manifest for image %s:%s is not ready, retry after %v", repository, tag, interval) - time.Sleep(interval) - interval = interval * 2 } return false } diff --git a/src/core/views/404.tpl b/src/core/views/404.tpl index 88213a5d5..e6d0d6f2e 100644 --- a/src/core/views/404.tpl +++ b/src/core/views/404.tpl @@ -67,7 +67,7 @@ a.underline, .underline{ Page Not Found diff --git a/src/go.mod b/src/go.mod new file mode 100644 index 000000000..fdc8554c8 --- /dev/null +++ b/src/go.mod @@ -0,0 +1,85 @@ +module github.com/goharbor/harbor/src + +go 1.12 + +replace github.com/goharbor/harbor => ../ + +require ( + github.com/Knetic/govaluate v3.0.0+incompatible // indirect + github.com/Masterminds/semver v1.4.2 + github.com/Microsoft/go-winio v0.4.12 // indirect + github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d // indirect + github.com/Unknwon/goconfig v0.0.0-20160216183935-5f601ca6ef4d // indirect + github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect + github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190726115642-cd293c93fd97 + github.com/astaxie/beego v1.9.0 + github.com/aws/aws-sdk-go v1.19.47 + github.com/beego/i18n v0.0.0-20140604031826-e87155e8f0c0 + github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 // indirect + github.com/bitly/go-simplejson v0.5.0 // indirect + github.com/bmatcuk/doublestar v1.1.1 + github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect + github.com/bugsnag/bugsnag-go v1.5.2 // indirect + github.com/bugsnag/panicwrap v1.2.0 // indirect + github.com/casbin/casbin v1.7.0 + github.com/cenkalti/backoff v2.1.1+incompatible // indirect + github.com/cloudflare/cfssl v0.0.0-20190510060611-9c027c93ba9e // indirect + github.com/coreos/go-oidc v2.0.0+incompatible + github.com/dghubble/sling v1.1.0 + github.com/dgrijalva/jwt-go v3.2.0+incompatible + github.com/docker/distribution v2.7.1+incompatible + github.com/docker/docker v1.13.1 // indirect + github.com/docker/go v0.0.0-20160303222718-d30aec9fd63c // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 // indirect + github.com/docker/go-units v0.4.0 // indirect + github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 + github.com/garyburd/redigo v1.6.0 + github.com/ghodss/yaml v1.0.0 + github.com/go-sql-driver/mysql v1.4.1 + github.com/gobwas/glob v0.2.3 // indirect + github.com/gocraft/work v0.5.1 + github.com/gofrs/uuid v3.2.0+incompatible // indirect + github.com/golang-migrate/migrate v3.3.0+incompatible + github.com/gomodule/redigo v2.0.0+incompatible + github.com/google/certificate-transparency-go v1.0.21 // indirect + github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135 // indirect + github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect + github.com/gorilla/handlers v1.3.0 + github.com/gorilla/mux v1.6.2 + github.com/graph-gophers/dataloader v5.0.0+incompatible + github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect + github.com/jinzhu/gorm v1.9.8 // indirect + github.com/justinas/alice v0.0.0-20171023064455-03f45bd4b7da + github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect + github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect + github.com/lib/pq v1.1.0 + github.com/mattn/go-runewidth v0.0.4 // indirect + github.com/miekg/pkcs11 v0.0.0-20170220202408-7283ca79f35e // indirect + github.com/olekukonko/tablewriter v0.0.1 + github.com/opencontainers/go-digest v1.0.0-rc0 + github.com/opencontainers/image-spec v1.0.1 // indirect + github.com/opentracing/opentracing-go v1.1.0 // indirect + github.com/pkg/errors v0.8.1 + github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 // indirect + github.com/prometheus/client_golang v0.9.4 // indirect + github.com/robfig/cron v1.0.0 + github.com/sirupsen/logrus v1.4.1 // indirect + github.com/spf13/viper v1.4.0 // indirect + github.com/stretchr/testify v1.3.0 + github.com/theupdateframework/notary v0.6.1 + golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c + golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 + gopkg.in/asn1-ber.v1 v1.0.0-20150924051756-4e86f4367175 // indirect + gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect + gopkg.in/fatih/pool.v2 v2.0.0 // indirect + gopkg.in/gorethink/gorethink.v3 v3.0.5 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ldap.v2 v2.5.0 + gopkg.in/square/go-jose.v2 v2.3.0 // indirect + gopkg.in/yaml.v2 v2.2.2 + k8s.io/api v0.0.0-20190222213804-5cb15d344471 + k8s.io/apimachinery v0.0.0-20180704011316-f534d624797b + k8s.io/client-go v8.0.0+incompatible + k8s.io/helm v2.9.1+incompatible +) diff --git a/src/go.sum b/src/go.sum new file mode 100644 index 000000000..5ac4284ff --- /dev/null +++ b/src/go.sum @@ -0,0 +1,396 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU= +cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Knetic/govaluate v3.0.0+incompatible h1:7o6+MAPhYTCF0+fdvoz1xDedhRb4f6s9Tn1Tt7/WTEg= +github.com/Knetic/govaluate v3.0.0+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiUOryc= +github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/Unknwon/goconfig v0.0.0-20160216183935-5f601ca6ef4d h1:RjxaKUAINjr+fYbaYjpdBUZc8R3+wF/Yr2XkDHho4Sg= +github.com/Unknwon/goconfig v0.0.0-20160216183935-5f601ca6ef4d/go.mod h1:wngxua9XCNjvHjDiTiV26DaKDT+0c63QR6H5hjVUUxw= +github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 h1:w1UutsfOrms1J05zt7ISrnJIXKzwaspym5BTKGx93EI= +github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190726115642-cd293c93fd97 h1:bNE5ID4C3YOkROfvBjXJUG53gyb+8az3TQN02LqnGBk= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190726115642-cd293c93fd97/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ= +github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/astaxie/beego v1.9.0 h1:tPzS+D1oCLi+SEb/TLNRNYpCjaMVfAGoy9OTLwS5ul4= +github.com/astaxie/beego v1.9.0/go.mod h1:0R4++1tUqERR0WYFWdfkcrsyoVBCG4DgpDGokT3yb+U= +github.com/aws/aws-sdk-go v1.19.47 h1:ZEze0mpk8Fttrsz6UNLqhH/jRGYbMPfWFA2ILas4AmM= +github.com/aws/aws-sdk-go v1.19.47/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= +github.com/beego/i18n v0.0.0-20140604031826-e87155e8f0c0 h1:fQaDnUQvBXHHQdGBu9hz8nPznB4BeiPQokvmQVjmNEw= +github.com/beego/i18n v0.0.0-20140604031826-e87155e8f0c0/go.mod h1:KLeFCpAMq2+50NkXC8iiJxLLiiTfTqrGtKEVm+2fk7s= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bmatcuk/doublestar v1.1.1 h1:YroD6BJCZBYx06yYFEWvUuKVWQn3vLLQAVmDmvTSaiQ= +github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bugsnag/bugsnag-go v1.5.2 h1:fdaGJJEReigPzSE6HajOhpJwE2IEP/TdHDHXKGeOJtc= +github.com/bugsnag/bugsnag-go v1.5.2/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/panicwrap v1.2.0 h1:OzrKrRvXis8qEvOkfcxNcYbOd2O7xXS2nnKMEMABFQA= +github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/casbin/casbin v1.7.0 h1:PuzlE8w0JBg/DhIqnkF1Dewf3z+qmUZMVN07PonvVUQ= +github.com/casbin/casbin v1.7.0/go.mod h1:c67qKN6Oum3UF5Q1+BByfFxkwKvhwW57ITjqwtzR1KE= +github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY= +github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/cfssl v0.0.0-20190510060611-9c027c93ba9e h1:ZtyhUG4s94BMUCdgvRZySr/AXYL5CDcjxhIV/83xJog= +github.com/cloudflare/cfssl v0.0.0-20190510060611-9c027c93ba9e/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-oidc v2.0.0+incompatible h1:+RStIopZ8wooMx+Vs5Bt8zMXxV1ABl5LbakNExNmZIg= +github.com/coreos/go-oidc v2.0.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denisenkom/go-mssqldb v0.0.0-20190423183735-731ef375ac02 h1:PS3xfVPa8N84AzoWZHFCbA0+ikz4f4skktfjQoNMsgk= +github.com/denisenkom/go-mssqldb v0.0.0-20190423183735-731ef375ac02/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= +github.com/dghubble/sling v1.1.0 h1:DLu20Bq2qsB9cI5Hldaxj+TMPEaPpPE8IR2kvD22Atg= +github.com/dghubble/sling v1.1.0/go.mod h1:ZcPRuLm0qrcULW2gOrjXrAWgf76sahqSyxXyVOvkunE= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.13.1 h1:IkZjBSIc8hBjLpqeAbeE5mca5mNgeatLHBy3GO78BWo= +github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go v0.0.0-20160303222718-d30aec9fd63c h1:Ggg7IiOtghyZzn3ozi31kPHpV6qSjMgmesXaWCijYNM= +github.com/docker/go v0.0.0-20160303222718-d30aec9fd63c/go.mod h1:CADgU4DSXK5QUlFslkQu2yW2TKzFZcXq/leZfM0UH5Q= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 h1:X0fj836zx99zFu83v/M79DuBn84IL/Syx1SY6Y5ZEMA= +github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y= +github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/garyburd/redigo v1.6.0 h1:0VruCpn7yAIIu7pWVClQC8wxCJEcG3nyzpMSHKi1PQc= +github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gocraft/work v0.5.1 h1:3bRjMiOo6N4zcRgZWV3Y7uX7R22SF+A9bPTk4xRXr34= +github.com/gocraft/work v0.5.1/go.mod h1:pc3n9Pb5FAESPPGfM0nL+7Q1xtgtRnF8rr/azzhQVlM= +github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= +github.com/golang-migrate/migrate v3.3.0+incompatible h1:RuACw4Vio/z4aebypBmpU9xKKmSiZBiHOx/Ro1QLcYc= +github.com/golang-migrate/migrate v3.3.0+incompatible/go.mod h1:IsVUlFN5puWOmXrqjgGUfIRIbU7mr8oNBE2tyERd9Wk= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0= +github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/certificate-transparency-go v1.0.21 h1:Yf1aXowfZ2nuboBsg7iYGLmwsOARdV86pfH3g95wXmE= +github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135 h1:zLTLjkaOFEFIOxY5BWLFLwh+cL8vOBW4XJ2aqLE/Tf0= +github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.3.0 h1:tsg9qP3mjt1h4Roxp+M1paRjrVBfPSOpBuVclh6YluI= +github.com/gorilla/handlers v1.3.0/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/graph-gophers/dataloader v5.0.0+incompatible h1:R+yjsbrNq1Mo3aPG+Z/EKYrXrXXUNJHOgbRt+U6jOug= +github.com/graph-gophers/dataloader v5.0.0+incompatible/go.mod h1:jk4jk0c5ZISbKaMe8WsVopGB5/15GvGHMdMdPtwlRp4= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/jinzhu/gorm v1.9.8 h1:n5uvxqLepIP2R1XF7pudpt9Rv8I3m7G9trGxJVjLZ5k= +github.com/jinzhu/gorm v1.9.8/go.mod h1:bdqTT3q6dhSph2K3pWxrHP6nqxuAp2yQ3KFtc3U3F84= +github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a h1:eeaG9XMUvRBYXJi4pg1ZKM7nxc5AfXfojeLLW7O5J3k= +github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.0.0 h1:6WV8LvwPpDhKjo5U9O6b4+xdG/jTXNPwlDme/MTo8Ns= +github.com/jinzhu/now v1.0.0/go.mod h1:oHTiXerJ20+SfYcrdlBO7rzZRJWGwSTQ0iUY2jI6Gfc= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/justinas/alice v0.0.0-20171023064455-03f45bd4b7da h1:5y58+OCjoHCYB8182mpf/dEsq0vwTKPOo4zGfH0xW9A= +github.com/justinas/alice v0.0.0-20171023064455-03f45bd4b7da/go.mod h1:oLH0CmIaxCGXD67VKGR5AacGXZSMznlmeqM8RzPrcY8= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lib/pq v1.1.0 h1:/5u4a+KGJptBRqGzPvYQL9p0d/tPR4S31+Tnzj9lEO4= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o= +github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/pkcs11 v0.0.0-20170220202408-7283ca79f35e h1:Gp+x7hv/aFRJUV6O0nu77E8N0T5PPfJGXjzQ9qgxVvE= +github.com/miekg/pkcs11 v0.0.0-20170220202408-7283ca79f35e/go.mod h1:WCBAbTOdfhHhz7YXujeZMF7owC4tPb1naKFsgfUISjo= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.1 h1:b3iUnf1v+ppJiOfNX4yxxqfWKMQPZR5yoh8urCTFX88= +github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opencontainers/go-digest v1.0.0-rc0 h1:YHPGfp+qlmg7loi376Jk5jNEgjgUUIdXGFsel8aFHnA= +github.com/opencontainers/go-digest v1.0.0-rc0/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 h1:J9b7z+QKAmPf4YLrFg6oQUotqHQeUNWwkvo7jZp1GLU= +github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v0.9.4 h1:Y8E/JaaPbmFSW2V81Ab/d8yZFYQQGbni1b1jPcG9Y6A= +github.com/prometheus/client_golang v0.9.4/go.mod h1:oCXIBxdI62A4cR6aTRJCgetEjecSIYzOEaeAn4iYEpM= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/robfig/cron v1.0.0 h1:slmQxIUH6U9ruw4XoJ7C2pyyx4yYeiHx8S9pNootHsM= +github.com/robfig/cron v1.0.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/theupdateframework/notary v0.6.1 h1:7wshjstgS9x9F5LuB1L5mBI2xNMObWqjz+cjWoom6l0= +github.com/theupdateframework/notary v0.6.1/go.mod h1:MOfgIfmox8s7/7fduvB2xyPPMJCrjRLRizA8OFwpnKY= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c h1:Vj5n4GlwjmQteupaxJ9+0FNOmBrHfq7vN4btdGoDZgI= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/asn1-ber.v1 v1.0.0-20150924051756-4e86f4367175 h1:nn6Zav2sOQHCFJHEspya8KqxhFwKci30UxHy3HXPTyQ= +gopkg.in/asn1-ber.v1 v1.0.0-20150924051756-4e86f4367175/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/dancannon/gorethink.v3 v3.0.5 h1:/g7PWP7zUS6vSNmHSDbjCHQh1Rqn8Jy6zSMQxAsBSMQ= +gopkg.in/dancannon/gorethink.v3 v3.0.5/go.mod h1:GXsi1e3N2OcKhcP6nsYABTiUejbWMFO4GY5a4pEaeEc= +gopkg.in/fatih/pool.v2 v2.0.0 h1:xIFeWtxifuQJGk/IEPKsTduEKcKvPmhoiVDGpC40nKg= +gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gorethink/gorethink.v3 v3.0.5 h1:e2Uc/Xe+hpcVQFsj6MuHlYog3r0JYpnTzwDj/y2O4MU= +gopkg.in/gorethink/gorethink.v3 v3.0.5/go.mod h1:+3yIIHJUGMBK+wyPH+iN5TP+88ikFDfZdqTlK3Y9q8I= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk= +gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ldap.v2 v2.5.0 h1:1rO3ojzsHUk+gq4ZYhC4Pg+EzWaaKIV8+DJwExS5/QQ= +gopkg.in/ldap.v2 v2.5.0/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.3.0 h1:nLzhkFyl5bkblqYBoiWJUt5JkWOzmiaBtCxdJAqJd3U= +gopkg.in/square/go-jose.v2 v2.3.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.0.0-20190222213804-5cb15d344471 h1:MzQGt8qWQCR+39kbYRd0uQqsvSidpYqJLFeWiJ9l4OE= +k8s.io/api v0.0.0-20190222213804-5cb15d344471/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= +k8s.io/apimachinery v0.0.0-20180704011316-f534d624797b h1:IEJ1jhyB5TOkHdq5dBEdef+MV3YAK9UYckpKYXI4Vsw= +k8s.io/apimachinery v0.0.0-20180704011316-f534d624797b/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= +k8s.io/client-go v8.0.0+incompatible h1:tTI4hRmb1DRMl4fG6Vclfdi6nTM82oIrTT7HfitmxC4= +k8s.io/client-go v8.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +k8s.io/helm v2.9.1+incompatible h1:IafoSdCxLzN1yqabsnwwAMSyjuplWVO/jy+MTyHMLIE= +k8s.io/helm v2.9.1+incompatible/go.mod h1:LZzlS4LQBHfciFOurYBFkCMTaZ0D1l+p0teMg7TSULI= diff --git a/src/jobservice/common/rds/keys.go b/src/jobservice/common/rds/keys.go index db6ae90e0..4f9f09ba4 100644 --- a/src/jobservice/common/rds/keys.go +++ b/src/jobservice/common/rds/keys.go @@ -34,7 +34,7 @@ func RedisKeyScheduled(namespace string) string { // RedisKeyLastPeriodicEnqueue returns key of timestamp if last periodic enqueue. func RedisKeyLastPeriodicEnqueue(namespace string) string { - return RedisNamespacePrefix(namespace) + "last_periodic_enqueue" + return RedisNamespacePrefix(namespace) + "last_periodic_enqueue_h" } // KeyNamespacePrefix returns the based key based on the namespace. diff --git a/src/jobservice/config.yml b/src/jobservice/config.yml index 562317698..745e53f8b 100644 --- a/src/jobservice/config.yml +++ b/src/jobservice/config.yml @@ -19,8 +19,8 @@ worker_pool: redis_pool: #redis://[arbitrary_username:password@]ipaddress:port/database_index #or ipaddress:port[,weight,password,database_index] - redis_url: "localhost:6379" - namespace: "harbor_job_service" + redis_url: "redis://localhost:6379/2" + namespace: "harbor_job_service_namespace" #Loggers for the running job job_loggers: @@ -29,11 +29,11 @@ job_loggers: - name: "FILE" level: "DEBUG" settings: # Customized settings of logger - base_dir: "/Users/szou/tmp/job_logs" + base_dir: "/tmp/job_logs" sweeper: duration: 1 #days settings: # Customized settings of sweeper - work_dir: "/Users/szou/tmp/job_logs" + work_dir: "/tmp/job_logs" #Loggers for the job service loggers: diff --git a/src/jobservice/config/config.go b/src/jobservice/config/config.go index eec9c10f1..56614737b 100644 --- a/src/jobservice/config/config.go +++ b/src/jobservice/config/config.go @@ -24,7 +24,7 @@ import ( "strings" "github.com/goharbor/harbor/src/jobservice/common/utils" - "gopkg.in/yaml.v2" + yaml "gopkg.in/yaml.v2" ) const ( @@ -37,6 +37,7 @@ const ( jobServiceRedisURL = "JOB_SERVICE_POOL_REDIS_URL" jobServiceRedisNamespace = "JOB_SERVICE_POOL_REDIS_NAMESPACE" jobServiceAuthSecret = "JOBSERVICE_SECRET" + coreURL = "CORE_URL" // JobServiceProtocolHTTPS points to the 'https' protocol JobServiceProtocolHTTPS = "https" @@ -163,6 +164,11 @@ func GetAuthSecret() string { return utils.ReadEnv(jobServiceAuthSecret) } +// GetCoreURL get the core url from the env +func GetCoreURL() string { + return utils.ReadEnv(coreURL) +} + // GetUIAuthSecret get the auth secret of UI side func GetUIAuthSecret() string { return utils.ReadEnv(uiAuthSecret) diff --git a/src/jobservice/config/config_test.go b/src/jobservice/config/config_test.go index 7385156fc..d6676b2e6 100644 --- a/src/jobservice/config/config_test.go +++ b/src/jobservice/config/config_test.go @@ -14,11 +14,12 @@ package config import ( + "os" + "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "os" - "testing" ) // ConfigurationTestSuite tests the configuration loading @@ -84,6 +85,7 @@ func (suite *ConfigurationTestSuite) TestConfigLoadingWithEnv() { ) assert.Equal(suite.T(), "js_secret", GetAuthSecret(), "expect auth secret 'js_secret' but got '%s'", GetAuthSecret()) assert.Equal(suite.T(), "core_secret", GetUIAuthSecret(), "expect auth secret 'core_secret' but got '%s'", GetUIAuthSecret()) + assert.Equal(suite.T(), "core_url", GetCoreURL(), "expect core url 'core_url' but got '%s'", GetCoreURL()) } // TestDefaultConfig ... @@ -134,6 +136,7 @@ func setENV() error { err = os.Setenv("JOB_SERVICE_POOL_REDIS_NAMESPACE", "ut_namespace") err = os.Setenv("JOBSERVICE_SECRET", "js_secret") err = os.Setenv("CORE_SECRET", "core_secret") + err = os.Setenv("CORE_URL", "core_url") return err } diff --git a/src/jobservice/hook/hook_client.go b/src/jobservice/hook/hook_client.go index 075614322..820880f7b 100644 --- a/src/jobservice/hook/hook_client.go +++ b/src/jobservice/hook/hook_client.go @@ -21,18 +21,10 @@ import ( "io/ioutil" "net" "net/http" - "net/url" - "os" "strings" "time" "context" - "github.com/goharbor/harbor/src/jobservice/common/utils" -) - -const ( - proxyEnvHTTP = "http_proxy" - proxyEnvHTTPS = "https_proxy" ) // Client for handling the hook events @@ -60,19 +52,7 @@ func NewClient(ctx context.Context) Client { TLSHandshakeTimeout: 10 * time.Second, ResponseHeaderTimeout: 10 * time.Second, ExpectContinueTimeout: 1 * time.Second, - } - - // Get the http/https proxies - proxyAddr, ok := os.LookupEnv(proxyEnvHTTP) - if !ok { - proxyAddr, ok = os.LookupEnv(proxyEnvHTTPS) - } - - if ok && !utils.IsEmptyStr(proxyAddr) { - proxyURL, err := url.Parse(proxyAddr) - if err == nil { - transport.Proxy = http.ProxyURL(proxyURL) - } + Proxy: http.ProxyFromEnvironment, } client := &http.Client{ diff --git a/src/jobservice/job/impl/notification/webhook_job.go b/src/jobservice/job/impl/notification/webhook_job.go new file mode 100644 index 000000000..b8c56966b --- /dev/null +++ b/src/jobservice/job/impl/notification/webhook_job.go @@ -0,0 +1,99 @@ +package notification + +import ( + "bytes" + "fmt" + commonhttp "github.com/goharbor/harbor/src/common/http" + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/jobservice/logger" + "net/http" + "os" + "strconv" +) + +// Max retry has the same meaning as max fails. +const maxFails = "JOBSERVICE_WEBHOOK_JOB_MAX_RETRY" + +// WebhookJob implements the job interface, which send notification by http or https. +type WebhookJob struct { + client *http.Client + logger logger.Interface + ctx job.Context +} + +// MaxFails returns that how many times this job can fail, get this value from ctx. +func (wj *WebhookJob) MaxFails() uint { + if maxFails, exist := os.LookupEnv(maxFails); exist { + result, err := strconv.ParseUint(maxFails, 10, 32) + // Unable to log error message because the logger isn't initialized when calling this function. + if err == nil { + return uint(result) + } + } + + // Default max fails count is 10, and its max retry interval is around 3h + // Large enough to ensure most situations can notify successfully + return 10 +} + +// ShouldRetry ... +func (wj *WebhookJob) ShouldRetry() bool { + return true +} + +// Validate implements the interface in job/Interface +func (wj *WebhookJob) Validate(params job.Parameters) error { + return nil +} + +// Run implements the interface in job/Interface +func (wj *WebhookJob) Run(ctx job.Context, params job.Parameters) error { + if err := wj.init(ctx, params); err != nil { + return err + } + + return wj.execute(ctx, params) +} + +// init webhook job +func (wj *WebhookJob) init(ctx job.Context, params map[string]interface{}) error { + wj.logger = ctx.GetLogger() + wj.ctx = ctx + + // default insecureSkipVerify is false + insecureSkipVerify := false + if v, ok := params["skip_cert_verify"]; ok { + insecureSkipVerify = v.(bool) + } + wj.client = &http.Client{ + Transport: commonhttp.GetHTTPTransport(insecureSkipVerify), + } + + return nil +} + +// execute webhook job +func (wj *WebhookJob) execute(ctx job.Context, params map[string]interface{}) error { + payload := params["payload"].(string) + address := params["address"].(string) + + req, err := http.NewRequest(http.MethodPost, address, bytes.NewReader([]byte(payload))) + if err != nil { + return err + } + if v, ok := params["auth_header"]; ok && len(v.(string)) > 0 { + req.Header.Set("Authorization", v.(string)) + } + req.Header.Set("Content-Type", "application/json") + + resp, err := wj.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return fmt.Errorf("webhook job(target: %s) response code is %d", address, resp.StatusCode) + } + + return nil +} diff --git a/src/jobservice/job/impl/notification/webhook_job_test.go b/src/jobservice/job/impl/notification/webhook_job_test.go new file mode 100644 index 000000000..d5a1db69a --- /dev/null +++ b/src/jobservice/job/impl/notification/webhook_job_test.go @@ -0,0 +1,75 @@ +package notification + +import ( + "github.com/goharbor/harbor/src/jobservice/job/impl" + "github.com/stretchr/testify/assert" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "testing" +) + +func TestMaxFails(t *testing.T) { + rep := &WebhookJob{} + // test default max fails + assert.Equal(t, uint(10), rep.MaxFails()) + + // test user defined max fails + _ = os.Setenv(maxFails, "15") + assert.Equal(t, uint(15), rep.MaxFails()) + + // test user defined wrong max fails + _ = os.Setenv(maxFails, "abc") + assert.Equal(t, uint(10), rep.MaxFails()) +} + +func TestShouldRetry(t *testing.T) { + rep := &WebhookJob{} + assert.True(t, rep.ShouldRetry()) +} + +func TestValidate(t *testing.T) { + rep := &WebhookJob{} + assert.Nil(t, rep.Validate(nil)) +} + +func TestRun(t *testing.T) { + rep := &WebhookJob{} + + // test webhook request + ts := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, _ := ioutil.ReadAll(r.Body) + + // test request method + assert.Equal(t, http.MethodPost, r.Method) + // test request header + assert.Equal(t, "auth_test", r.Header.Get("Authorization")) + // test request body + assert.Equal(t, string(body), `{"key": "value"}`) + })) + defer ts.Close() + params := map[string]interface{}{ + "skip_cert_verify": true, + "payload": `{"key": "value"}`, + "address": ts.URL, + "auth_header": "auth_test", + } + // test correct webhook response + assert.Nil(t, rep.Run(&impl.Context{}, params)) + + tsWrong := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + })) + defer tsWrong.Close() + paramsWrong := map[string]interface{}{ + "skip_cert_verify": true, + "payload": `{"key": "value"}`, + "address": tsWrong.URL, + "auth_header": "auth_test", + } + // test incorrect webhook response + assert.NotNil(t, rep.Run(&impl.Context{}, paramsWrong)) +} diff --git a/src/jobservice/job/impl/replication/replication.go b/src/jobservice/job/impl/replication/replication.go index 0543c90ab..849c5c0a2 100644 --- a/src/jobservice/job/impl/replication/replication.go +++ b/src/jobservice/job/impl/replication/replication.go @@ -34,6 +34,16 @@ import ( _ "github.com/goharbor/harbor/src/replication/adapter/native" // register the Huawei adapter _ "github.com/goharbor/harbor/src/replication/adapter/huawei" + // register the Google Gcr adapter + _ "github.com/goharbor/harbor/src/replication/adapter/googlegcr" + // register the AwsEcr adapter + _ "github.com/goharbor/harbor/src/replication/adapter/awsecr" + // register the AzureAcr adapter + _ "github.com/goharbor/harbor/src/replication/adapter/azurecr" + // register the AliACR adapter + _ "github.com/goharbor/harbor/src/replication/adapter/aliacr" + // register the Helm Hub adapter + _ "github.com/goharbor/harbor/src/replication/adapter/helmhub" ) // Replication implements the job interface diff --git a/src/jobservice/job/known_jobs.go b/src/jobservice/job/known_jobs.go index 5fd50cde0..307141e2d 100644 --- a/src/jobservice/job/known_jobs.go +++ b/src/jobservice/job/known_jobs.go @@ -30,4 +30,8 @@ const ( Replication = "REPLICATION" // ReplicationScheduler : the name of the replication scheduler job in job service ReplicationScheduler = "IMAGE_REPLICATE" + // WebhookJob : the name of the webhook job in job service + WebhookJob = "WEBHOOK" + // Retention : the name of the retention job + Retention = "RETENTION" ) diff --git a/src/jobservice/main.go b/src/jobservice/main.go index a5a1706f3..ca146b102 100644 --- a/src/jobservice/main.go +++ b/src/jobservice/main.go @@ -19,7 +19,6 @@ import ( "errors" "flag" "fmt" - "os" "github.com/goharbor/harbor/src/common" comcfg "github.com/goharbor/harbor/src/common/config" @@ -64,7 +63,7 @@ func main() { if utils.IsEmptyStr(secret) { return nil, errors.New("empty auth secret") } - coreURL := os.Getenv("CORE_URL") + coreURL := config.GetCoreURL() configURL := coreURL + common.CoreConfigPath cfgMgr := comcfg.NewRESTCfgManager(configURL, secret) jobCtx := impl.NewContext(ctx, cfgMgr) diff --git a/src/jobservice/migration/manager.go b/src/jobservice/migration/manager.go new file mode 100644 index 000000000..2c540d579 --- /dev/null +++ b/src/jobservice/migration/manager.go @@ -0,0 +1,148 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package migration + +import ( + "github.com/Masterminds/semver" + "reflect" + + "github.com/gomodule/redigo/redis" + + "github.com/goharbor/harbor/src/jobservice/logger" + "github.com/pkg/errors" +) + +// Manager for managing the related migrators +type Manager interface { + // Register the specified migrator to the execution chain + Register(migratorFactory MigratorFactory) + + // Migrate data + Migrate() error +} + +// MigratorChainNode is a wrapper to append the migrator to the chain with a next reference +type MigratorChainNode struct { + // Migrator implementation + migrator RDBMigrator + // Refer the next migration of the chain if existing + next *MigratorChainNode +} + +// BasicManager is the default implementation of manager interface +type BasicManager struct { + // The head of migrator chain + head *MigratorChainNode + // Pool for connecting to redis + pool *redis.Pool + // RDB namespace + namespace string +} + +// New a basic manager +func New(pool *redis.Pool, ns string) Manager { + return &BasicManager{ + pool: pool, + namespace: ns, + } +} + +// Register the migrator to the chain +func (bm *BasicManager) Register(migratorFactory MigratorFactory) { + if migratorFactory == nil { + return // ignore, do nothing + } + + migrator, err := migratorFactory(bm.pool, bm.namespace) + if err != nil { + logger.Errorf("migrator register error: %s", err) + return + } + + newNode := &MigratorChainNode{ + migrator: migrator, + next: nil, + } + + if bm.head == nil { + bm.head = newNode + return + } + + bm.head.next = newNode +} + +// Migrate data +func (bm *BasicManager) Migrate() error { + conn := bm.pool.Get() + defer func() { + _ = conn.Close() + }() + + // Read schema version first + v, err := redis.String(conn.Do("GET", VersionKey(bm.namespace))) + if err != nil && err != redis.ErrNil { + return errors.Wrap(err, "read schema version failed") + } + + if len(v) > 0 { + current, err := semver.NewVersion(v) + if err != nil { + return errors.Wrap(err, "malformed schema version") + } + nowV, _ := semver.NewVersion(SchemaVersion) + + diff := nowV.Compare(current) + if diff < 0 { + return errors.Errorf("the schema version of migrator is smaller that the one in the rdb: %s<%s", nowV.String(), current.String()) + } else if diff == 0 { + logger.Info("No migration needed") + return nil + } + } + + if bm.head == nil { + logger.Warning("No migrator registered, passed migration") + return nil + } + + logger.Info("Process for migrating data is started") + + h := bm.head + for h != nil { + meta := h.migrator.Metadata() + if meta == nil { + // Make metadata required + return errors.Errorf("no metadata provided for the migrator %s", reflect.TypeOf(h.migrator).String()) + } + + logger.Infof("Migrate %s from %s to %s", meta.ObjectRef, meta.FromVersion, meta.ToVersion) + if err := h.migrator.Migrate(); err != nil { + return errors.Wrap(err, "migration chain calling failed") + } + + // Next one if existing + h = h.next + } + + // Set schema version + if _, err = conn.Do("SET", VersionKey(bm.namespace), SchemaVersion); err != nil { + return errors.Wrap(err, "write schema version failed") + } + + logger.Infof("Data schema version upgraded to %s", SchemaVersion) + + return nil +} diff --git a/src/jobservice/migration/manager_test.go b/src/jobservice/migration/manager_test.go new file mode 100644 index 000000000..c63b0a076 --- /dev/null +++ b/src/jobservice/migration/manager_test.go @@ -0,0 +1,200 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package migration + +import ( + "encoding/json" + "fmt" + "strings" + "testing" + "time" + + "github.com/goharbor/harbor/src/jobservice/common/rds" + "github.com/goharbor/harbor/src/jobservice/common/utils" + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/jobservice/tests" + "github.com/gomodule/redigo/redis" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +// ManagerTestSuite tests functions of manager +type ManagerTestSuite struct { + suite.Suite + + pool *redis.Pool + namespace string + + manager Manager + + jobID string + numbericID int64 +} + +// TestManagerTestSuite is entry of executing ManagerTestSuite +func TestManagerTestSuite(t *testing.T) { + suite.Run(t, new(ManagerTestSuite)) +} + +// SetupAllSuite sets up env for test suite +func (suite *ManagerTestSuite) SetupSuite() { + suite.pool = tests.GiveMeRedisPool() + suite.namespace = tests.GiveMeTestNamespace() + + suite.manager = New(suite.pool, suite.namespace) +} + +// SetupTestSuite sets up env for each test case +func (suite *ManagerTestSuite) SetupTest() { + // Mock fake data + conn := suite.pool.Get() + defer func() { + _ = conn.Close() + }() + + id := utils.MakeIdentifier() + suite.jobID = id + // Mock stats of periodic job + args := []interface{}{ + rds.KeyJobStats(suite.namespace, id), + "status_hook", + "http://core:8080/hook", + "id", + id, + "name", + job.ImageGC, + "kind", + job.KindPeriodic, + "unique", + 0, + "status", + job.SuccessStatus.String(), // v1.6 issue + "ref_link", + fmt.Sprintf("/api/v1/jobs/%s", id), + "enqueue_time", + time.Now().Unix(), + "update_time", + time.Now().Unix(), + "run_at", + time.Now().Add(5 * time.Minute).Unix(), + "cron_spec", + "0 0 17 * * *", + "multiple_executions", // V1.7 + 1, + } + reply, err := redis.String(conn.Do("HMSET", args...)) + require.NoError(suite.T(), err, "mock job stats data error") + require.Equal(suite.T(), "ok", strings.ToLower(reply), "ok expected") + + // Mock periodic job policy object + params := make(map[string]interface{}) + params["redis_url_reg"] = "redis://redis:6379/1" + + policy := make(map[string]interface{}) + policy["job_name"] = job.ImageGC + policy["job_params"] = params + policy["cron_spec"] = "0 0 17 * * *" + + rawJSON, err := json.Marshal(&policy) + require.NoError(suite.T(), err, "mock periodic job policy error") + + policy["cron_spec"] = "0 0 8 * * *" + duplicatedRawJSON, err := json.Marshal(&policy) + require.NoError(suite.T(), err, "mock duplicated periodic job policy error") + + score := time.Now().Unix() + suite.numbericID = score + zaddArgs := []interface{}{ + rds.KeyPeriodicPolicy(suite.namespace), + score, + rawJSON, + score - 10, + duplicatedRawJSON, // duplicated one + } + count, err := redis.Int(conn.Do("ZADD", zaddArgs...)) + require.NoError(suite.T(), err, "add raw policy error") + require.Equal(suite.T(), 2, count) + + // Mock key score mapping + keyScoreArgs := []interface{}{ + fmt.Sprintf("%s%s", rds.KeyNamespacePrefix(suite.namespace), "period:key_score"), + score, + id, + } + + count, err = redis.Int(conn.Do("ZADD", keyScoreArgs...)) + require.NoError(suite.T(), err, "add key score mapping error") + require.Equal(suite.T(), 1, count) +} + +// SetupTestSuite clears up env for each test case +func (suite *ManagerTestSuite) TearDownTest() { + conn := suite.pool.Get() + defer func() { + _ = conn.Close() + }() + + err := tests.ClearAll(suite.namespace, conn) + assert.NoError(suite.T(), err, "clear all of redis db error") +} + +// TestManager test the basic functions of the manager +func (suite *ManagerTestSuite) TestManager() { + require.NotNil(suite.T(), suite.manager, "nil migration manager") + + suite.manager.Register(PolicyMigratorFactory) + err := suite.manager.Migrate() + require.NoError(suite.T(), err, "migrating rdb error") + + // Check data + conn := suite.pool.Get() + defer func() { + _ = conn.Close() + }() + + count, err := redis.Int(conn.Do("ZCARD", rds.KeyPeriodicPolicy(suite.namespace))) + assert.NoError(suite.T(), err, "get count of policies error") + assert.Equal(suite.T(), 1, count) + + innerConn := suite.pool.Get() + p, err := getPeriodicPolicy(suite.numbericID, innerConn, suite.namespace) + assert.NoError(suite.T(), err, "get migrated policy error") + assert.NotEmpty(suite.T(), p.ID, "ID of policy") + assert.NotEmpty(suite.T(), p.WebHookURL, "Web hook URL of policy") + + key := fmt.Sprintf("%s%s", rds.KeyNamespacePrefix(suite.namespace), "period:key_score") + count, err = redis.Int(conn.Do("EXISTS", key)) + assert.NoError(suite.T(), err, "check existence of key score mapping error") + assert.Equal(suite.T(), 0, count) + + hmGetArgs := []interface{}{ + rds.KeyJobStats(suite.namespace, suite.jobID), + "id", + "status", + "web_hook_url", + "numeric_policy_id", + "multiple_executions", + "status_hook", + } + fields, err := redis.Values(conn.Do("HMGET", hmGetArgs...)) + assert.NoError(suite.T(), err, "check migrated job stats error") + assert.Equal(suite.T(), suite.jobID, toString(fields[0]), "check job ID") + assert.Equal(suite.T(), job.ScheduledStatus.String(), toString(fields[1]), "check job status") + assert.Equal(suite.T(), "http://core:8080/hook", toString(fields[2]), "check web hook URL") + assert.Equal(suite.T(), suite.numbericID, toInt(fields[3]), "check numberic ID") + assert.Nil(suite.T(), fields[4], "'multiple_executions' removed") + assert.Nil(suite.T(), fields[5], "'status_hook' removed") +} diff --git a/src/jobservice/migration/migrator.go b/src/jobservice/migration/migrator.go new file mode 100644 index 000000000..e7535c692 --- /dev/null +++ b/src/jobservice/migration/migrator.go @@ -0,0 +1,38 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package migration + +import ( + "github.com/gomodule/redigo/redis" +) + +// RDBMigrator defines the action to migrate redis data +type RDBMigrator interface { + // Metadata info of the migrator + Metadata() *MigratorMeta + + // Migrate executes the real migration work + Migrate() error +} + +// MigratorMeta keeps the base info of the migrator +type MigratorMeta struct { + FromVersion string + ToVersion string + ObjectRef string +} + +// MigratorFactory is factory function to create RDBMigrator interface +type MigratorFactory func(pool *redis.Pool, namespace string) (RDBMigrator, error) diff --git a/src/jobservice/migration/migrator_v180.go b/src/jobservice/migration/migrator_v180.go new file mode 100644 index 000000000..2a9a1c6b6 --- /dev/null +++ b/src/jobservice/migration/migrator_v180.go @@ -0,0 +1,381 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package migration + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + + "github.com/goharbor/harbor/src/jobservice/common/rds" + "github.com/goharbor/harbor/src/jobservice/common/utils" + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/jobservice/logger" + "github.com/goharbor/harbor/src/jobservice/period" + "github.com/gomodule/redigo/redis" + "github.com/pkg/errors" +) + +// PolicyMigrator migrate the cron job policy to new schema +type PolicyMigrator struct { + // namespace of rdb + namespace string + + // Pool for connecting to redis + pool *redis.Pool +} + +// PolicyMigratorFactory is a factory func to create PolicyMigrator +func PolicyMigratorFactory(pool *redis.Pool, namespace string) (RDBMigrator, error) { + if pool == nil { + return nil, errors.New("PolicyMigratorFactory: missing pool") + } + + if utils.IsEmptyStr(namespace) { + return nil, errors.New("PolicyMigratorFactory: missing namespace") + } + + return &PolicyMigrator{ + namespace: namespace, + pool: pool, + }, nil +} + +// Metadata returns the base information of this migrator +func (pm *PolicyMigrator) Metadata() *MigratorMeta { + return &MigratorMeta{ + FromVersion: "<1.8.0", + ToVersion: "1.8.1", + ObjectRef: "{namespace}:period:policies", + } +} + +// Migrate data +func (pm *PolicyMigrator) Migrate() error { + conn := pm.pool.Get() + defer func() { + if err := conn.Close(); err != nil { + logger.Errorf("close redis connection error: %s", err) + } + }() + + allJobIDs, err := getAllJobStatsIDs(conn, pm.namespace) + if err != nil { + return errors.Wrap(err, "get job stats list error") + } + + args := []interface{}{ + "id_placeholder", + "id", + "kind", + "status", + "status_hook", // valid for 1.6 and 1.7, + "multiple_executions", // valid for 1.7 + "numeric_policy_id", // valid for 1.8 + } + + count := 0 + for _, fullID := range allJobIDs { + args[0] = fullID + values, err := redis.Values(conn.Do("HMGET", args...)) + if err != nil { + logger.Errorf("Get stats fields of job %s failed with error: %s", fullID, err) + continue + } + + pID := toString(values[0]) + kind := toString(values[1]) + + if !utils.IsEmptyStr(pID) && job.KindPeriodic == kind { + logger.Debugf("Periodic job found: %s", pID) + + // Data requires migration + // Missing 'numeric_policy_id' which is introduced in 1.8 + if values[5] == nil { + logger.Infof("Migrate periodic job stats data is started: %s", pID) + + numbericPolicyID, err := getScoreByID(pID, conn, pm.namespace) + if err != nil { + logger.Errorf("Get numberic ID of periodic job policy failed with error: %s", err) + continue + } + + // Transaction + err = conn.Send("MULTI") + setArgs := []interface{}{ + fullID, + "status", + job.ScheduledStatus.String(), // make sure the status of periodic job is "Scheduled" + "numeric_policy_id", + numbericPolicyID, + } + // If status hook existing + hookURL := toString(values[3]) + if !utils.IsEmptyStr(hookURL) { + setArgs = append(setArgs, "web_hook_url", hookURL) + } + // Set fields + err = conn.Send("HMSET", setArgs...) + + // Remove useless fields + rmArgs := []interface{}{ + fullID, + "status_hook", + "multiple_executions", + } + err = conn.Send("HDEL", rmArgs...) + + // Update periodic policy model + // conn is working, we need new conn + // this inner connection will be closed by the calling method + innerConn := pm.pool.Get() + + policy, er := getPeriodicPolicy(numbericPolicyID, innerConn, pm.namespace) + if er == nil { + policy.ID = pID + if !utils.IsEmptyStr(hookURL) { + // Copy web hook URL + policy.WebHookURL = fmt.Sprintf("%s", hookURL) + } + + if rawJSON, er := policy.Serialize(); er == nil { + // Remove the old one first + err = conn.Send("ZREMRANGEBYSCORE", rds.KeyPeriodicPolicy(pm.namespace), numbericPolicyID, numbericPolicyID) + // Save back to the rdb + err = conn.Send("ZADD", rds.KeyPeriodicPolicy(pm.namespace), numbericPolicyID, rawJSON) + } else { + logger.Errorf("Serialize policy %s failed with error: %s", pID, er) + } + } else { + logger.Errorf("Get periodic policy %s failed with error: %s", pID, er) + } + + // Check error before executing + if err != nil { + logger.Errorf("Build redis transaction failed with error: %s", err) + continue + } + + // Exec + if _, err := conn.Do("EXEC"); err != nil { + logger.Errorf("Migrate periodic job %s failed with error: %s", pID, err) + continue + } + + count++ + logger.Infof("Migrate periodic job stats data is completed: %s", pID) + } + } + } + + logger.Infof("Migrate %d periodic policies", count) + + delScoreZset(conn, pm.namespace) + + return clearDuplicatedPolicies(conn, pm.namespace) +} + +// getAllJobStatsIDs get all the IDs of the existing jobs +func getAllJobStatsIDs(conn redis.Conn, ns string) ([]string, error) { + pattern := rds.KeyJobStats(ns, "*") + args := []interface{}{ + 0, + "MATCH", + pattern, + "COUNT", + 100, + } + + allFullIDs := make([]interface{}, 0) + + for { + // Use SCAN to iterate the IDs + values, err := redis.Values(conn.Do("SCAN", args...)) + if err != nil { + return nil, err + } + + // In case something wrong happened + if len(values) != 2 { + return nil, errors.Errorf("Invalid result returned for the SCAN command: %#v", values) + } + + if fullIDs, ok := values[1].([]interface{}); ok { + allFullIDs = append(allFullIDs, fullIDs...) + } + + // Check the next cursor + cur := toInt(values[0]) + if cur == -1 { + // No valid next cursor got + return nil, errors.Errorf("Failed to get the next SCAN cursor: %#v", values[0]) + } + + if cur != 0 { + args[0] = cur + } else { + // end + break + } + } + + IDs := make([]string, 0) + for _, fullIDValue := range allFullIDs { + if fullID, ok := fullIDValue.([]byte); ok { + IDs = append(IDs, string(fullID)) + } else { + logger.Debugf("Invalid job stats key: %#v", fullIDValue) + } + } + + return IDs, nil +} + +// Get the score with the provided ID +func getScoreByID(id string, conn redis.Conn, ns string) (int64, error) { + scoreKey := fmt.Sprintf("%s%s:%s", rds.KeyNamespacePrefix(ns), "period", "key_score") + return redis.Int64(conn.Do("ZSCORE", scoreKey, id)) +} + +// Get periodic policy object by the numeric ID +func getPeriodicPolicy(numericID int64, conn redis.Conn, ns string) (*period.Policy, error) { + // close this inner connection here + defer func() { + if err := conn.Close(); err != nil { + logger.Errorf("close redis connection error: %s", err) + } + }() + + bytes, err := redis.Values(conn.Do("ZRANGEBYSCORE", rds.KeyPeriodicPolicy(ns), numericID, numericID)) + if err != nil { + return nil, err + } + + p := &period.Policy{} + if len(bytes) > 0 { + if rawPolicy, ok := bytes[0].([]byte); ok { + if err = p.DeSerialize(rawPolicy); err == nil { + return p, nil + } + } + } + + if err == nil { + err = errors.Errorf("invalid data for periodic policy %d: %#v", numericID, bytes) + } + + return nil, err +} + +// Clear the duplicated policy entries for the job "IMAGE_GC" and "IMAGE_SCAN_ALL" +func clearDuplicatedPolicies(conn redis.Conn, ns string) error { + hash := make(map[string]interface{}) + + bytes, err := redis.Values(conn.Do("ZREVRANGE", rds.KeyPeriodicPolicy(ns), 0, -1, "WITHSCORES")) + if err != nil { + return err + } + + count := 0 + for i, l := 0, len(bytes); i < l; i = i + 2 { + rawPolicy := bytes[i].([]byte) + p := &period.Policy{} + + if err := p.DeSerialize(rawPolicy); err != nil { + logger.Errorf("DeSerialize policy: %s; error: %s\n", rawPolicy, err) + continue + } + + if p.JobName == job.ImageScanAllJob || + p.JobName == job.ImageGC || + p.JobName == job.ReplicationScheduler { + score, _ := strconv.ParseInt(string(bytes[i+1].([]byte)), 10, 64) + + key := hashKey(p) + if _, exists := hash[key]; exists { + // Already existing, remove the duplicated one + res, err := redis.Int(conn.Do("ZREMRANGEBYSCORE", rds.KeyPeriodicPolicy(ns), score, score)) + if err != nil || res == 0 { + logger.Errorf("Failed to clear duplicated periodic policy: %s-%s:%v", p.JobName, p.ID, score) + } else { + logger.Infof("Remove duplicated periodic policy: %s-%s:%v", p.JobName, p.ID, score) + count++ + } + } else { + hash[key] = score + } + } + } + + logger.Infof("Clear %d duplicated periodic policies", count) + + return nil +} + +// Remove the non-used key +func delScoreZset(conn redis.Conn, ns string) { + key := fmt.Sprintf("%s%s", rds.KeyNamespacePrefix(ns), "period:key_score") + reply, err := redis.Int(conn.Do("EXISTS", key)) + if err == nil && reply == 1 { + reply, err = redis.Int(conn.Do("DEL", key)) + if err == nil && reply > 0 { + logger.Infof("%s removed", key) + return // success + } + } + + if err != nil { + // Just logged + logger.Errorf("Remove %s failed with error: %s", key, err) + } +} + +func toString(v interface{}) string { + if v == nil { + return "" + } + + if bytes, ok := v.([]byte); ok { + return string(bytes) + } + + return "" +} + +func toInt(v interface{}) int64 { + if v == nil { + return -1 + } + + if bytes, ok := v.([]byte); ok { + if intV, err := strconv.ParseInt(string(bytes), 10, 64); err == nil { + return intV + } + } + + return -1 +} + +func hashKey(p *period.Policy) string { + key := p.JobName + if p.JobParameters != nil && len(p.JobParameters) > 0 { + if bytes, err := json.Marshal(p.JobParameters); err == nil { + key = fmt.Sprintf("%s:%s", key, string(bytes)) + } + } + + return base64.StdEncoding.EncodeToString([]byte(key)) +} diff --git a/src/jobservice/migration/version.go b/src/jobservice/migration/version.go new file mode 100644 index 000000000..028b9b6a9 --- /dev/null +++ b/src/jobservice/migration/version.go @@ -0,0 +1,31 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package migration + +import ( + "fmt" + + "github.com/goharbor/harbor/src/jobservice/common/rds" +) + +const ( + // SchemaVersion identifies the schema version of RDB + SchemaVersion = "1.8.1" +) + +// VersionKey returns the key of redis schema +func VersionKey(ns string) string { + return fmt.Sprintf("%s%s", rds.KeyNamespacePrefix(ns), "_schema_version") +} diff --git a/src/jobservice/period/enqueuer_test.go b/src/jobservice/period/enqueuer_test.go index 5c349e2a8..5c3cc4aff 100644 --- a/src/jobservice/period/enqueuer_test.go +++ b/src/jobservice/period/enqueuer_test.go @@ -16,6 +16,12 @@ package period import ( "context" "fmt" + "sync" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/goharbor/harbor/src/jobservice/common/rds" "github.com/goharbor/harbor/src/jobservice/common/utils" "github.com/goharbor/harbor/src/jobservice/env" @@ -26,9 +32,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "sync" - "testing" - "time" ) // EnqueuerTestSuite tests functions of enqueuer @@ -89,19 +92,30 @@ func (suite *EnqueuerTestSuite) TestEnqueuer() { suite.enqueuer.stopChan <- true }() - <-time.After(1 * time.Second) - key := rds.RedisKeyScheduled(suite.namespace) conn := suite.pool.Get() defer func() { _ = conn.Close() }() - count, err := redis.Int(conn.Do("ZCARD", key)) - require.Nil(suite.T(), err, "count scheduled: nil error expected but got %s", err) - assert.Condition(suite.T(), func() bool { - return count > 0 - }, "count of scheduled jobs should be greater than 0 but got %d", count) + tk := time.NewTicker(500 * time.Millisecond) + defer tk.Stop() + + for { + select { + case <-tk.C: + count, err := redis.Int(conn.Do("ZCARD", key)) + require.Nil(suite.T(), err, "count scheduled: nil error expected but got %s", err) + if assert.Condition(suite.T(), func() (success bool) { + return count > 0 + }, "at least one job should be scheduled for the periodic job policy") { + return + } + case <-time.After(15 * time.Second): + require.NoError(suite.T(), errors.New("timeout (15s): expect at 1 scheduled job but still get nothing")) + return + } + } }() err := suite.enqueuer.start() @@ -112,7 +126,7 @@ func (suite *EnqueuerTestSuite) prepare() { now := time.Now() minute := now.Minute() - coreSpec := fmt.Sprintf("30,50 %d * * * *", minute+2) + coreSpec := fmt.Sprintf("0-59 %d * * * *", minute) // Prepare one p := &Policy{ diff --git a/src/jobservice/runtime/bootstrap.go b/src/jobservice/runtime/bootstrap.go index 6e722bcd8..88dac6081 100644 --- a/src/jobservice/runtime/bootstrap.go +++ b/src/jobservice/runtime/bootstrap.go @@ -17,13 +17,14 @@ package runtime import ( "context" "fmt" - "github.com/goharbor/harbor/src/jobservice/mgt" "os" "os/signal" "sync" "syscall" "time" + "github.com/goharbor/harbor/src/pkg/scheduler" + "github.com/goharbor/harbor/src/jobservice/api" "github.com/goharbor/harbor/src/jobservice/common/utils" "github.com/goharbor/harbor/src/jobservice/config" @@ -32,13 +33,17 @@ import ( "github.com/goharbor/harbor/src/jobservice/hook" "github.com/goharbor/harbor/src/jobservice/job" "github.com/goharbor/harbor/src/jobservice/job/impl/gc" + "github.com/goharbor/harbor/src/jobservice/job/impl/notification" "github.com/goharbor/harbor/src/jobservice/job/impl/replication" "github.com/goharbor/harbor/src/jobservice/job/impl/sample" "github.com/goharbor/harbor/src/jobservice/job/impl/scan" "github.com/goharbor/harbor/src/jobservice/lcm" "github.com/goharbor/harbor/src/jobservice/logger" + "github.com/goharbor/harbor/src/jobservice/mgt" + "github.com/goharbor/harbor/src/jobservice/migration" "github.com/goharbor/harbor/src/jobservice/worker" "github.com/goharbor/harbor/src/jobservice/worker/cworker" + "github.com/goharbor/harbor/src/pkg/retention" "github.com/gomodule/redigo/redis" "github.com/pkg/errors" ) @@ -97,6 +102,14 @@ func (bs *Bootstrap) LoadAndRun(ctx context.Context, cancel context.CancelFunc) // Get redis connection pool redisPool := bs.getRedisPool(cfg.PoolConfig.RedisPoolCfg.RedisURL) + // Do data migration if necessary + rdbMigrator := migration.New(redisPool, namespace) + rdbMigrator.Register(migration.PolicyMigratorFactory) + if err := rdbMigrator.Migrate(); err != nil { + // Just logged, should not block the starting process + logger.Error(err) + } + // Create stats manager manager = mgt.NewManager(ctx, namespace, redisPool) // Create hook agent, it's a singleton object @@ -229,11 +242,14 @@ func (bs *Bootstrap) loadAndRunRedisWorkerPool( // Only for debugging and testing purpose job.SampleJob: (*sample.Job)(nil), // Functional jobs - job.ImageScanJob: (*scan.ClairJob)(nil), - job.ImageScanAllJob: (*scan.All)(nil), - job.ImageGC: (*gc.GarbageCollector)(nil), - job.Replication: (*replication.Replication)(nil), - job.ReplicationScheduler: (*replication.Scheduler)(nil), + job.ImageScanJob: (*scan.ClairJob)(nil), + job.ImageScanAllJob: (*scan.All)(nil), + job.ImageGC: (*gc.GarbageCollector)(nil), + job.Replication: (*replication.Replication)(nil), + job.ReplicationScheduler: (*replication.Scheduler)(nil), + job.Retention: (*retention.Job)(nil), + scheduler.JobNameScheduler: (*scheduler.PeriodicJob)(nil), + job.WebhookJob: (*notification.WebhookJob)(nil), }); err != nil { // exit return nil, err @@ -249,9 +265,8 @@ func (bs *Bootstrap) loadAndRunRedisWorkerPool( // Get a redis connection pool func (bs *Bootstrap) getRedisPool(redisURL string) *redis.Pool { return &redis.Pool{ - MaxActive: 6, - MaxIdle: 6, - Wait: true, + MaxIdle: 6, + Wait: true, Dial: func() (redis.Conn, error) { return redis.DialURL( redisURL, diff --git a/src/jobservice/runtime/bootstrap_test.go b/src/jobservice/runtime/bootstrap_test.go index 74bfa9a59..257e58479 100644 --- a/src/jobservice/runtime/bootstrap_test.go +++ b/src/jobservice/runtime/bootstrap_test.go @@ -16,6 +16,7 @@ package runtime import ( "context" + "fmt" "github.com/goharbor/harbor/src/jobservice/common/utils" "github.com/goharbor/harbor/src/jobservice/config" "github.com/goharbor/harbor/src/jobservice/logger" @@ -56,15 +57,16 @@ func (suite *BootStrapTestSuite) SetupSuite() { // TearDownSuite clears the test suite func (suite *BootStrapTestSuite) TearDownSuite() { - suite.cancel() - pool := tests.GiveMeRedisPool() conn := pool.Get() defer func() { _ = conn.Close() }() - _ = tests.ClearAll(tests.GiveMeTestNamespace(), conn) + err := tests.ClearAll(fmt.Sprintf("{%s}", tests.GiveMeTestNamespace()), conn) + require.NoError(suite.T(), err, "clear rdb error") + + suite.cancel() } // TestBootStrapTestSuite is entry of go test diff --git a/src/jobservice/worker/cworker/c_worker.go b/src/jobservice/worker/cworker/c_worker.go index e9cafe0c1..6de36856f 100644 --- a/src/jobservice/worker/cworker/c_worker.go +++ b/src/jobservice/worker/cworker/c_worker.go @@ -17,6 +17,7 @@ package cworker import ( "fmt" "reflect" + "sync" "time" "github.com/gocraft/work" @@ -30,7 +31,6 @@ import ( "github.com/goharbor/harbor/src/jobservice/worker" "github.com/gomodule/redigo/redis" "github.com/pkg/errors" - "sync" ) var ( @@ -66,7 +66,10 @@ type workerContext struct{} // log the job func (rpc *workerContext) logJob(job *work.Job, next work.NextMiddlewareFunc) error { - jobInfo, _ := utils.SerializeJob(job) + jobCopy := *job + // as the args may contain sensitive information, ignore them when logging the detail + jobCopy.Args = nil + jobInfo, _ := utils.SerializeJob(&jobCopy) logger.Infof("Job incoming: %s", jobInfo) return next() diff --git a/src/jobservice/worker/cworker/c_worker_test.go b/src/jobservice/worker/cworker/c_worker_test.go index e80810dc4..be7931c17 100644 --- a/src/jobservice/worker/cworker/c_worker_test.go +++ b/src/jobservice/worker/cworker/c_worker_test.go @@ -188,11 +188,23 @@ func (suite *CWorkerTestSuite) TestStopJob() { t, err := suite.lcmCtl.New(genericJob) require.NoError(suite.T(), err, "new job stats: nil error expected but got %s", err) - time.Sleep(3 * time.Second) + tk := time.NewTicker(500 * time.Millisecond) + defer tk.Stop() - latest, err := t.Status() - require.NoError(suite.T(), err, "get latest status: nil error expected but got %s", err) - assert.EqualValues(suite.T(), job.RunningStatus, latest, "expect job is running now") +LOOP: + for { + select { + case <-tk.C: + latest, err := t.Status() + require.NoError(suite.T(), err, "get latest status: nil error expected but got %s", err) + if latest.Compare(job.RunningStatus) == 0 { + break LOOP + } + case <-time.After(30 * time.Second): + require.NoError(suite.T(), errors.New("check running status time out")) + break LOOP + } + } err = suite.cWorker.StopJob(genericJob.Info.JobID) require.NoError(suite.T(), err, "stop job: nil error expected but got %s", err) @@ -255,7 +267,7 @@ func (j *fakeLongRunJob) Validate(params job.Parameters) error { } func (j *fakeLongRunJob) Run(ctx job.Context, params job.Parameters) error { - time.Sleep(5 * time.Second) + time.Sleep(3 * time.Second) if _, stopped := ctx.OPCommand(); stopped { return nil diff --git a/src/pkg/authproxy/http.go b/src/pkg/authproxy/http.go new file mode 100644 index 000000000..baeed17cf --- /dev/null +++ b/src/pkg/authproxy/http.go @@ -0,0 +1,65 @@ +package authproxy + +import ( + "encoding/json" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils/log" + k8s_api_v1beta1 "k8s.io/api/authentication/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" +) + +// TokenReview ... +func TokenReview(sessionID string, authProxyConfig *models.HTTPAuthProxy) (*k8s_api_v1beta1.TokenReview, error) { + + // Init auth client with the auth proxy endpoint. + authClientCfg := &rest.Config{ + Host: authProxyConfig.TokenReviewEndpoint, + ContentConfig: rest.ContentConfig{ + GroupVersion: &schema.GroupVersion{}, + NegotiatedSerializer: serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}, + }, + BearerToken: sessionID, + TLSClientConfig: rest.TLSClientConfig{ + Insecure: !authProxyConfig.VerifyCert, + }, + } + authClient, err := rest.RESTClientFor(authClientCfg) + if err != nil { + return nil, err + } + + // Do auth with the token. + tokenReviewRequest := &k8s_api_v1beta1.TokenReview{ + TypeMeta: metav1.TypeMeta{ + Kind: "TokenReview", + APIVersion: "authentication.k8s.io/v1beta1", + }, + Spec: k8s_api_v1beta1.TokenReviewSpec{ + Token: sessionID, + }, + } + res := authClient.Post().Body(tokenReviewRequest).Do() + err = res.Error() + if err != nil { + log.Errorf("fail to POST auth request, %v", err) + return nil, err + } + resRaw, err := res.Raw() + if err != nil { + log.Errorf("fail to get raw data of token review, %v", err) + return nil, err + } + // Parse the auth response, check the user name and authenticated status. + tokenReviewResponse := &k8s_api_v1beta1.TokenReview{} + err = json.Unmarshal(resRaw, &tokenReviewResponse) + if err != nil { + log.Errorf("fail to decode token review, %v", err) + return nil, err + } + return tokenReviewResponse, nil + +} diff --git a/src/pkg/clients/core/chart.go b/src/pkg/clients/core/chart.go new file mode 100644 index 000000000..75d8c3983 --- /dev/null +++ b/src/pkg/clients/core/chart.go @@ -0,0 +1,40 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "fmt" + + "github.com/goharbor/harbor/src/chartserver" +) + +func (c *client) ListAllCharts(project, repository string) ([]*chartserver.ChartVersion, error) { + url := c.buildURL(fmt.Sprintf("/api/chartrepo/%s/charts/%s", project, repository)) + var charts []*chartserver.ChartVersion + if err := c.httpclient.Get(url, &charts); err != nil { + return nil, err + } + return charts, nil +} + +func (c *client) DeleteChart(project, repository, version string) error { + url := c.buildURL(fmt.Sprintf("/api/chartrepo/%s/charts/%s/%s", project, repository, version)) + return c.httpclient.Delete(url) +} + +func (c *client) DeleteChartRepository(project, repository string) error { + url := c.buildURL(fmt.Sprintf("/api/chartrepo/%s/charts/%s", project, repository)) + return c.httpclient.Delete(url) +} diff --git a/src/pkg/clients/core/client.go b/src/pkg/clients/core/client.go new file mode 100644 index 000000000..2234fd17c --- /dev/null +++ b/src/pkg/clients/core/client.go @@ -0,0 +1,65 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "fmt" + "net/http" + + "github.com/goharbor/harbor/src/common/models" + + "github.com/goharbor/harbor/src/chartserver" + chttp "github.com/goharbor/harbor/src/common/http" + "github.com/goharbor/harbor/src/common/http/modifier" +) + +// Client defines the methods that a core client should implement +// Currently, it contains only part of the whole method collection +// and we should expand it when needed +type Client interface { + ImageClient + ChartClient +} + +// ImageClient defines the methods that an image client should implement +type ImageClient interface { + ListAllImages(project, repository string) ([]*models.TagResp, error) + DeleteImage(project, repository, tag string) error + DeleteImageRepository(project, repository string) error +} + +// ChartClient defines the methods that a chart client should implement +type ChartClient interface { + ListAllCharts(project, repository string) ([]*chartserver.ChartVersion, error) + DeleteChart(project, repository, version string) error + DeleteChartRepository(project, repository string) error +} + +// New returns an instance of the client which is a default implement for Client +func New(url string, httpclient *http.Client, authorizer modifier.Modifier) Client { + return &client{ + url: url, + httpclient: chttp.NewClient(httpclient, authorizer), + } +} + +type client struct { + url string + httpclient *chttp.Client +} + +func (c *client) buildURL(path string) string { + return fmt.Sprintf("%s/%s", c.url, path) +} diff --git a/src/pkg/clients/core/image.go b/src/pkg/clients/core/image.go new file mode 100644 index 000000000..1b8811790 --- /dev/null +++ b/src/pkg/clients/core/image.go @@ -0,0 +1,40 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "fmt" + + "github.com/goharbor/harbor/src/common/models" +) + +func (c *client) ListAllImages(project, repository string) ([]*models.TagResp, error) { + url := c.buildURL(fmt.Sprintf("/api/repositories/%s/%s/tags", project, repository)) + var images []*models.TagResp + if err := c.httpclient.GetAndIteratePagination(url, &images); err != nil { + return nil, err + } + return images, nil +} + +func (c *client) DeleteImage(project, repository, tag string) error { + url := c.buildURL(fmt.Sprintf("/api/repositories/%s/%s/tags/%s", project, repository, tag)) + return c.httpclient.Delete(url) +} + +func (c *client) DeleteImageRepository(project, repository string) error { + url := c.buildURL(fmt.Sprintf("/api/repositories/%s/%s", project, repository)) + return c.httpclient.Delete(url) +} diff --git a/src/pkg/notification/hook/hook.go b/src/pkg/notification/hook/hook.go new file mode 100755 index 000000000..8524a0e0e --- /dev/null +++ b/src/pkg/notification/hook/hook.go @@ -0,0 +1,85 @@ +package hook + +import ( + "encoding/json" + "fmt" + "time" + + cJob "github.com/goharbor/harbor/src/common/job" + "github.com/goharbor/harbor/src/common/job/models" + cModels "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/core/notifier/model" + "github.com/goharbor/harbor/src/core/utils" + "github.com/goharbor/harbor/src/pkg/notification/job" + "github.com/goharbor/harbor/src/pkg/notification/job/manager" +) + +// Manager send hook +type Manager interface { + StartHook(*model.HookEvent, *models.JobData) error +} + +// DefaultManager ... +type DefaultManager struct { + jobMgr job.Manager + client cJob.Client +} + +// NewHookManager ... +func NewHookManager() *DefaultManager { + return &DefaultManager{ + jobMgr: manager.NewDefaultManager(), + client: utils.GetJobServiceClient(), + } +} + +// StartHook create a notification job record in database, and submit it to jobservice +func (hm *DefaultManager) StartHook(event *model.HookEvent, data *models.JobData) error { + payload, err := json.Marshal(event.Payload) + if err != nil { + return err + } + + t := time.Now() + id, err := hm.jobMgr.Create(&cModels.NotificationJob{ + PolicyID: event.PolicyID, + EventType: event.EventType, + NotifyType: event.Target.Type, + Status: cModels.JobPending, + CreationTime: t, + UpdateTime: t, + JobDetail: string(payload), + }) + if err != nil { + return fmt.Errorf("failed to create the job record for notification based on policy %d: %v", event.PolicyID, err) + } + statusHookURL := fmt.Sprintf("%s/service/notifications/jobs/webhook/%d", config.InternalCoreURL(), id) + data.StatusHook = statusHookURL + + log.Debugf("created a notification job %d for the policy %d", id, event.PolicyID) + + // submit hook job to jobservice + jobUUID, err := hm.client.SubmitJob(data) + if err != nil { + log.Errorf("failed to submit job with notification event: %v", err) + e := hm.jobMgr.Update(&cModels.NotificationJob{ + ID: id, + Status: cModels.JobError, + }, "Status") + if e != nil { + log.Errorf("failed to update the notification job status %d: %v", id, e) + } + return err + } + + if err = hm.jobMgr.Update(&cModels.NotificationJob{ + ID: id, + UUID: jobUUID, + }, "UUID"); err != nil { + log.Errorf("failed to update the notification job %d: %v", id, err) + return err + } + return nil +} diff --git a/src/pkg/notification/job/manager.go b/src/pkg/notification/job/manager.go new file mode 100755 index 000000000..da8ac8027 --- /dev/null +++ b/src/pkg/notification/job/manager.go @@ -0,0 +1,20 @@ +package job + +import ( + "github.com/goharbor/harbor/src/common/models" +) + +// Manager manages notification jobs recorded in database +type Manager interface { + // Create create a notification job + Create(job *models.NotificationJob) (int64, error) + + // List list notification jobs + List(...*models.NotificationJobQuery) (int64, []*models.NotificationJob, error) + + // Update update notification job + Update(job *models.NotificationJob, props ...string) error + + // ListJobsGroupByEventType lists last triggered jobs group by event type + ListJobsGroupByEventType(policyID int64) ([]*models.NotificationJob, error) +} diff --git a/src/pkg/notification/job/manager/manager.go b/src/pkg/notification/job/manager/manager.go new file mode 100755 index 000000000..8db3aecd6 --- /dev/null +++ b/src/pkg/notification/job/manager/manager.go @@ -0,0 +1,55 @@ +package manager + +import ( + "fmt" + + "github.com/goharbor/harbor/src/common/dao/notification" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/pkg/notification/job" +) + +// DefaultManager .. +type DefaultManager struct { +} + +// NewDefaultManager ... +func NewDefaultManager() job.Manager { + return &DefaultManager{} +} + +// Create ... +func (d *DefaultManager) Create(job *models.NotificationJob) (int64, error) { + return notification.AddNotificationJob(job) +} + +// List ... +func (d *DefaultManager) List(query ...*models.NotificationJobQuery) (int64, []*models.NotificationJob, error) { + total, err := notification.GetTotalCountOfNotificationJobs(query...) + if err != nil { + return 0, nil, err + } + + executions, err := notification.GetNotificationJobs(query...) + if err != nil { + return 0, nil, err + } + return total, executions, nil +} + +// Update ... +func (d *DefaultManager) Update(job *models.NotificationJob, props ...string) error { + n, err := notification.UpdateNotificationJob(job, props...) + if err != nil { + return err + } + + if n == 0 { + return fmt.Errorf("execution %d not found", job.ID) + } + return nil +} + +// ListJobsGroupByEventType lists last triggered jobs group by event type +func (d *DefaultManager) ListJobsGroupByEventType(policyID int64) ([]*models.NotificationJob, error) { + return notification.GetLastTriggerJobsGroupByEventType(policyID) +} diff --git a/src/pkg/notification/job/manager/manager_test.go b/src/pkg/notification/job/manager/manager_test.go new file mode 100644 index 000000000..a373f618b --- /dev/null +++ b/src/pkg/notification/job/manager/manager_test.go @@ -0,0 +1,22 @@ +package manager + +import ( + "reflect" + "testing" +) + +func TestNewDefaultManger(t *testing.T) { + tests := []struct { + name string + want *DefaultManager + }{ + {want: &DefaultManager{}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := NewDefaultManager(); !reflect.DeepEqual(got, tt.want) { + t.Errorf("NewDefaultManager() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/src/pkg/notification/model/const.go b/src/pkg/notification/model/const.go new file mode 100644 index 000000000..51b8288ee --- /dev/null +++ b/src/pkg/notification/model/const.go @@ -0,0 +1,16 @@ +package model + +// const definitions +const ( + EventTypePushImage = "pushImage" + EventTypePullImage = "pullImage" + EventTypeDeleteImage = "deleteImage" + EventTypeUploadChart = "uploadChart" + EventTypeDeleteChart = "deleteChart" + EventTypeDownloadChart = "downloadChart" + EventTypeScanningCompleted = "scanningCompleted" + EventTypeScanningFailed = "scanningFailed" + EventTypeTestEndpoint = "testEndpoint" + + NotifyTypeHTTP = "http" +) diff --git a/src/pkg/notification/notification.go b/src/pkg/notification/notification.go new file mode 100755 index 000000000..4de7479d1 --- /dev/null +++ b/src/pkg/notification/notification.go @@ -0,0 +1,63 @@ +package notification + +import ( + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/pkg/notification/hook" + "github.com/goharbor/harbor/src/pkg/notification/job" + jobMgr "github.com/goharbor/harbor/src/pkg/notification/job/manager" + "github.com/goharbor/harbor/src/pkg/notification/model" + "github.com/goharbor/harbor/src/pkg/notification/policy" + "github.com/goharbor/harbor/src/pkg/notification/policy/manager" +) + +var ( + // PolicyMgr is a global notification policy manager + PolicyMgr policy.Manager + + // JobMgr is a notification job controller + JobMgr job.Manager + + // HookManager is a hook manager + HookManager hook.Manager + + // SupportedEventTypes is a map to store supported event type, eg. pushImage, pullImage etc + SupportedEventTypes map[string]struct{} + + // SupportedNotifyTypes is a map to store notification type, eg. HTTP, Email etc + SupportedNotifyTypes map[string]struct{} +) + +// Init ... +func Init() { + // init notification policy manager + PolicyMgr = manager.NewDefaultManger() + // init hook manager + HookManager = hook.NewHookManager() + // init notification job manager + JobMgr = jobMgr.NewDefaultManager() + + SupportedEventTypes = make(map[string]struct{}) + SupportedNotifyTypes = make(map[string]struct{}) + + initSupportedEventType( + model.EventTypePushImage, model.EventTypePullImage, model.EventTypeDeleteImage, + model.EventTypeUploadChart, model.EventTypeDeleteChart, model.EventTypeDownloadChart, + model.EventTypeScanningCompleted, model.EventTypeScanningFailed, + ) + + initSupportedNotifyType(model.NotifyTypeHTTP) + + log.Info("notification initialization completed") +} + +func initSupportedEventType(eventTypes ...string) { + for _, eventType := range eventTypes { + SupportedEventTypes[eventType] = struct{}{} + } +} + +func initSupportedNotifyType(notifyTypes ...string) { + for _, notifyType := range notifyTypes { + SupportedNotifyTypes[notifyType] = struct{}{} + } +} diff --git a/src/pkg/notification/policy/manager.go b/src/pkg/notification/policy/manager.go new file mode 100755 index 000000000..d08ffc3bd --- /dev/null +++ b/src/pkg/notification/policy/manager.go @@ -0,0 +1,25 @@ +package policy + +import ( + "github.com/goharbor/harbor/src/common/models" +) + +// Manager manages the notification policies +type Manager interface { + // Create new policy + Create(*models.NotificationPolicy) (int64, error) + // List the policies, returns the policy list and error + List(int64) ([]*models.NotificationPolicy, error) + // Get policy with specified ID + Get(int64) (*models.NotificationPolicy, error) + // GetByNameAndProjectID get policy by the name and projectID + GetByNameAndProjectID(string, int64) (*models.NotificationPolicy, error) + // Update the specified policy + Update(*models.NotificationPolicy) error + // Delete the specified policy + Delete(int64) error + // Test the specified policy + Test(*models.NotificationPolicy) error + // GetRelatedPolices get event type related policies in project + GetRelatedPolices(int64, string) ([]*models.NotificationPolicy, error) +} diff --git a/src/pkg/notification/policy/manager/manager.go b/src/pkg/notification/policy/manager/manager.go new file mode 100755 index 000000000..c4f6681c2 --- /dev/null +++ b/src/pkg/notification/policy/manager/manager.go @@ -0,0 +1,159 @@ +package manager + +import ( + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/goharbor/harbor/src/common/dao/notification" + commonhttp "github.com/goharbor/harbor/src/common/http" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils/log" + notifierModel "github.com/goharbor/harbor/src/core/notifier/model" + "github.com/goharbor/harbor/src/pkg/notification/model" +) + +// DefaultManager ... +type DefaultManager struct { +} + +// NewDefaultManger ... +func NewDefaultManger() *DefaultManager { + return &DefaultManager{} +} + +// Create notification policy +func (m *DefaultManager) Create(policy *models.NotificationPolicy) (int64, error) { + t := time.Now() + policy.CreationTime = t + policy.UpdateTime = t + + err := policy.ConvertToDBModel() + if err != nil { + return 0, err + } + return notification.AddNotificationPolicy(policy) +} + +// List the notification policies, returns the policy list and error +func (m *DefaultManager) List(projectID int64) ([]*models.NotificationPolicy, error) { + policies := []*models.NotificationPolicy{} + persisPolicies, err := notification.GetNotificationPolicies(projectID) + if err != nil { + return nil, err + } + + for _, policy := range persisPolicies { + err := policy.ConvertFromDBModel() + if err != nil { + return nil, err + } + policies = append(policies, policy) + } + + return policies, nil +} + +// Get notification policy with specified ID +func (m *DefaultManager) Get(id int64) (*models.NotificationPolicy, error) { + policy, err := notification.GetNotificationPolicy(id) + if err != nil { + return nil, err + } + if policy == nil { + return nil, nil + } + err = policy.ConvertFromDBModel() + return policy, err +} + +// GetByNameAndProjectID notification policy by the name and projectID +func (m *DefaultManager) GetByNameAndProjectID(name string, projectID int64) (*models.NotificationPolicy, error) { + policy, err := notification.GetNotificationPolicyByName(name, projectID) + if err != nil { + return nil, err + } + err = policy.ConvertFromDBModel() + return policy, err +} + +// Update the specified notification policy +func (m *DefaultManager) Update(policy *models.NotificationPolicy) error { + policy.UpdateTime = time.Now() + err := policy.ConvertToDBModel() + if err != nil { + return err + } + return notification.UpdateNotificationPolicy(policy) +} + +// Delete the specified notification policy +func (m *DefaultManager) Delete(policyID int64) error { + return notification.DeleteNotificationPolicy(policyID) +} + +// Test the specified notification policy, just test for network connection without request body +func (m *DefaultManager) Test(policy *models.NotificationPolicy) error { + p, err := json.Marshal(notifierModel.Payload{ + Type: model.EventTypeTestEndpoint, + }) + if err != nil { + return err + } + + for _, target := range policy.Targets { + switch target.Type { + case "http": + return m.policyHTTPTest(target.Address, target.SkipCertVerify, p) + default: + return fmt.Errorf("invalid policy target type: %s", target.Type) + } + } + return nil +} + +func (m *DefaultManager) policyHTTPTest(address string, skipCertVerify bool, p []byte) error { + req, err := http.NewRequest(http.MethodPost, address, nil) + if err != nil { + return err + } + + req.Header.Set("Content-Type", "application/json") + + client := http.Client{ + Transport: commonhttp.GetHTTPTransport(skipCertVerify), + } + + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + log.Debugf("policy test success with address %s, skip cert verify :%v", address, skipCertVerify) + + return nil +} + +// GetRelatedPolices get policies including event type in project +func (m *DefaultManager) GetRelatedPolices(projectID int64, eventType string) ([]*models.NotificationPolicy, error) { + policies, err := m.List(projectID) + if err != nil { + return nil, fmt.Errorf("failed to get notification policies with projectID %d: %v", projectID, err) + } + + var result []*models.NotificationPolicy + + for _, ply := range policies { + if !ply.Enabled { + continue + } + for _, t := range ply.EventTypes { + if t != eventType { + continue + } + result = append(result, ply) + } + } + return result, nil +} diff --git a/src/pkg/notification/policy/manager/manager_test.go b/src/pkg/notification/policy/manager/manager_test.go new file mode 100644 index 000000000..9dfd6970f --- /dev/null +++ b/src/pkg/notification/policy/manager/manager_test.go @@ -0,0 +1,22 @@ +package manager + +import ( + "reflect" + "testing" +) + +func TestNewDefaultManger(t *testing.T) { + tests := []struct { + name string + want *DefaultManager + }{ + {want: &DefaultManager{}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := NewDefaultManger(); !reflect.DeepEqual(got, tt.want) { + t.Errorf("NewDefaultManger() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/src/pkg/project/manager.go b/src/pkg/project/manager.go new file mode 100644 index 000000000..f4d5a8910 --- /dev/null +++ b/src/pkg/project/manager.go @@ -0,0 +1,61 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package project + +import ( + "fmt" + + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" +) + +// Manager is used for project management +// currently, the interface only defines the methods needed for tag retention +// will expand it when doing refactor +type Manager interface { + // List projects according to the query + List(...*models.ProjectQueryParam) ([]*models.Project, error) + // Get the project specified by the ID or name + Get(interface{}) (*models.Project, error) +} + +// New returns a default implementation of Manager +func New() Manager { + return &manager{} +} + +type manager struct{} + +// List projects according to the query +func (m *manager) List(query ...*models.ProjectQueryParam) ([]*models.Project, error) { + var q *models.ProjectQueryParam + if len(query) > 0 { + q = query[0] + } + return dao.GetProjects(q) +} + +// Get the project specified by the ID +func (m *manager) Get(idOrName interface{}) (*models.Project, error) { + id, ok := idOrName.(int64) + if ok { + return dao.GetProjectByID(id) + } + name, ok := idOrName.(string) + if ok { + return dao.GetProjectByName(name) + } + return nil, fmt.Errorf("invalid parameter: %v, should be ID(int64) or name(string)", idOrName) +} diff --git a/src/pkg/repository/manager.go b/src/pkg/repository/manager.go new file mode 100644 index 000000000..3631baac3 --- /dev/null +++ b/src/pkg/repository/manager.go @@ -0,0 +1,61 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package repository + +import ( + "github.com/goharbor/harbor/src/chartserver" + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/pkg/project" +) + +// Manager is used for repository management +// currently, the interface only defines the methods needed for tag retention +// will expand it when doing refactor +type Manager interface { + // List image repositories under the project specified by the ID + ListImageRepositories(projectID int64) ([]*models.RepoRecord, error) + // List chart repositories under the project specified by the ID + ListChartRepositories(projectID int64) ([]*chartserver.ChartInfo, error) +} + +// New returns a default implementation of Manager +func New(projectMgr project.Manager, chartCtl *chartserver.Controller) Manager { + return &manager{ + projectMgr: projectMgr, + chartCtl: chartCtl, + } +} + +type manager struct { + projectMgr project.Manager + chartCtl *chartserver.Controller +} + +// List image repositories under the project specified by the ID +func (m *manager) ListImageRepositories(projectID int64) ([]*models.RepoRecord, error) { + return dao.GetRepositories(&models.RepositoryQuery{ + ProjectIDs: []int64{projectID}, + }) +} + +// List chart repositories under the project specified by the ID +func (m *manager) ListChartRepositories(projectID int64) ([]*chartserver.ChartInfo, error) { + project, err := m.projectMgr.Get(projectID) + if err != nil { + return nil, err + } + return m.chartCtl.ListCharts(project.Name) +} diff --git a/src/pkg/retention/controller.go b/src/pkg/retention/controller.go new file mode 100644 index 000000000..d12ef3996 --- /dev/null +++ b/src/pkg/retention/controller.go @@ -0,0 +1,280 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package retention + +import ( + "fmt" + "time" + + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/pkg/project" + "github.com/goharbor/harbor/src/pkg/repository" + "github.com/goharbor/harbor/src/pkg/retention/policy" + "github.com/goharbor/harbor/src/pkg/retention/q" + "github.com/goharbor/harbor/src/pkg/scheduler" +) + +// APIController to handle the requests related with retention +type APIController interface { + // Handle the related hooks from the job service and launch the corresponding actions if needed + // + // Arguments: + // PolicyID string : uuid of the retention policy + // event *job.StatusChange : event object sent by job service + // + // Returns: + // common error object if any errors occurred + HandleHook(policyID string, event *job.StatusChange) error + + GetRetention(id int64) (*policy.Metadata, error) + + CreateRetention(p *policy.Metadata) (int64, error) + + UpdateRetention(p *policy.Metadata) error + + DeleteRetention(id int64) error + + TriggerRetentionExec(policyID int64, trigger string, dryRun bool) (int64, error) + + OperateRetentionExec(eid int64, action string) error + + GetRetentionExec(eid int64) (*Execution, error) + + ListRetentionExecs(policyID int64, query *q.Query) ([]*Execution, error) + + GetTotalOfRetentionExecs(policyID int64) (int64, error) + + ListRetentionExecTasks(executionID int64, query *q.Query) ([]*Task, error) + + GetTotalOfRetentionExecTasks(executionID int64) (int64, error) + + GetRetentionExecTaskLog(taskID int64) ([]byte, error) +} + +// DefaultAPIController ... +type DefaultAPIController struct { + manager Manager + launcher Launcher + projectManager project.Manager + repositoryMgr repository.Manager + scheduler scheduler.Scheduler +} + +const ( + // SchedulerCallback ... + SchedulerCallback = "SchedulerCallback" +) + +// TriggerParam ... +type TriggerParam struct { + PolicyID int64 + Trigger string +} + +// GetRetention Get Retention +func (r *DefaultAPIController) GetRetention(id int64) (*policy.Metadata, error) { + return r.manager.GetPolicy(id) +} + +// CreateRetention Create Retention +func (r *DefaultAPIController) CreateRetention(p *policy.Metadata) (int64, error) { + if p.Trigger.Kind == policy.TriggerKindSchedule { + cron, ok := p.Trigger.Settings[policy.TriggerSettingsCron] + if ok && len(cron.(string)) > 0 { + jobid, err := r.scheduler.Schedule(cron.(string), SchedulerCallback, TriggerParam{ + PolicyID: p.ID, + Trigger: ExecutionTriggerSchedule, + }) + if err != nil { + return 0, err + } + if p.Trigger.References == nil { + p.Trigger.References = map[string]interface{}{} + } + p.Trigger.References[policy.TriggerReferencesJobid] = jobid + } + } + id, err := r.manager.CreatePolicy(p) + if err != nil { + return 0, err + } + return id, nil +} + +// UpdateRetention Update Retention +func (r *DefaultAPIController) UpdateRetention(p *policy.Metadata) error { + p0, err := r.manager.GetPolicy(p.ID) + if err != nil { + return err + } + needUn := false + needSch := false + + if p0.Trigger.Kind != p.Trigger.Kind { + if p0.Trigger.Kind == policy.TriggerKindSchedule { + needUn = true + } + + if p.Trigger.Kind == policy.TriggerKindSchedule { + needSch = true + } + } else { + switch p.Trigger.Kind { + case policy.TriggerKindSchedule: + if p0.Trigger.Settings["cron"] != p.Trigger.Settings["cron"] { + // unschedule old + if len(p0.Trigger.Settings[policy.TriggerSettingsCron].(string)) > 0 { + needUn = true + } + // schedule new + if len(p.Trigger.Settings[policy.TriggerSettingsCron].(string)) > 0 { + // valid cron + needSch = true + } + } + case "": + + default: + return fmt.Errorf("not support Trigger %s", p.Trigger.Kind) + } + } + if needUn { + err = r.scheduler.UnSchedule(p0.Trigger.References[policy.TriggerReferencesJobid].(int64)) + if err != nil { + return err + } + } + if needSch { + jobid, err := r.scheduler.Schedule(p.Trigger.Settings[policy.TriggerSettingsCron].(string), SchedulerCallback, TriggerParam{ + PolicyID: p.ID, + Trigger: ExecutionTriggerSchedule, + }) + if err != nil { + return err + } + p.Trigger.References[policy.TriggerReferencesJobid] = jobid + } + + return r.manager.UpdatePolicy(p) +} + +// DeleteRetention Delete Retention +func (r *DefaultAPIController) DeleteRetention(id int64) error { + p, err := r.manager.GetPolicy(id) + if err != nil { + return err + } + if p.Trigger.Kind == policy.TriggerKindSchedule && len(p.Trigger.Settings[policy.TriggerSettingsCron].(string)) > 0 { + err = r.scheduler.UnSchedule(p.Trigger.References[policy.TriggerReferencesJobid].(int64)) + if err != nil { + return err + } + } + + return r.manager.DeletePolicyAndExec(id) +} + +// TriggerRetentionExec Trigger Retention Execution +func (r *DefaultAPIController) TriggerRetentionExec(policyID int64, trigger string, dryRun bool) (int64, error) { + p, err := r.manager.GetPolicy(policyID) + if err != nil { + return 0, err + } + + exec := &Execution{ + PolicyID: policyID, + StartTime: time.Now(), + Trigger: trigger, + DryRun: dryRun, + } + id, err := r.manager.CreateExecution(exec) + if _, err = r.launcher.Launch(p, id, dryRun); err != nil { + // clean execution if launch failed + _ = r.manager.DeleteExecution(id) + return 0, err + } + return id, err + +} + +// OperateRetentionExec Operate Retention Execution +func (r *DefaultAPIController) OperateRetentionExec(eid int64, action string) error { + e, err := r.manager.GetExecution(eid) + if err != nil { + return err + } + if e == nil { + return fmt.Errorf("execution %d not found", eid) + } + switch action { + case "stop": + return r.launcher.Stop(eid) + default: + return fmt.Errorf("not support action %s", action) + } +} + +// GetRetentionExec Get Retention Execution +func (r *DefaultAPIController) GetRetentionExec(executionID int64) (*Execution, error) { + return r.manager.GetExecution(executionID) +} + +// ListRetentionExecs List Retention Executions +func (r *DefaultAPIController) ListRetentionExecs(policyID int64, query *q.Query) ([]*Execution, error) { + return r.manager.ListExecutions(policyID, query) +} + +// GetTotalOfRetentionExecs Count Retention Executions +func (r *DefaultAPIController) GetTotalOfRetentionExecs(policyID int64) (int64, error) { + return r.manager.GetTotalOfRetentionExecs(policyID) +} + +// ListRetentionExecTasks List Retention Execution Histories +func (r *DefaultAPIController) ListRetentionExecTasks(executionID int64, query *q.Query) ([]*Task, error) { + q1 := &q.TaskQuery{ + ExecutionID: executionID, + } + if query != nil { + q1.PageSize = query.PageSize + q1.PageNumber = query.PageNumber + } + return r.manager.ListTasks(q1) +} + +// GetTotalOfRetentionExecTasks Count Retention Execution Histories +func (r *DefaultAPIController) GetTotalOfRetentionExecTasks(executionID int64) (int64, error) { + return r.manager.GetTotalOfTasks(executionID) +} + +// GetRetentionExecTaskLog Get Retention Execution Task Log +func (r *DefaultAPIController) GetRetentionExecTaskLog(taskID int64) ([]byte, error) { + return r.manager.GetTaskLog(taskID) +} + +// HandleHook HandleHook +func (r *DefaultAPIController) HandleHook(policyID string, event *job.StatusChange) error { + panic("implement me") +} + +// NewAPIController ... +func NewAPIController(retentionMgr Manager, projectManager project.Manager, repositoryMgr repository.Manager, scheduler scheduler.Scheduler, retentionLauncher Launcher) APIController { + return &DefaultAPIController{ + manager: retentionMgr, + launcher: retentionLauncher, + projectManager: projectManager, + repositoryMgr: repositoryMgr, + scheduler: scheduler, + } +} diff --git a/src/pkg/retention/controller_test.go b/src/pkg/retention/controller_test.go new file mode 100644 index 000000000..28202dd71 --- /dev/null +++ b/src/pkg/retention/controller_test.go @@ -0,0 +1,234 @@ +package retention + +import ( + "testing" + + "github.com/goharbor/harbor/src/pkg/retention/dep" + "github.com/goharbor/harbor/src/pkg/retention/policy" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/stretchr/testify/suite" +) + +type ControllerTestSuite struct { + suite.Suite + + oldClient dep.Client +} + +// SetupSuite ... +func (s *ControllerTestSuite) SetupSuite() { + +} + +// TestController ... +func TestController(t *testing.T) { + suite.Run(t, new(ControllerTestSuite)) +} + +func (s *ControllerTestSuite) TestPolicy() { + projectMgr := &fakeProjectManager{} + repositoryMgr := &fakeRepositoryManager{} + retentionScheduler := &fakeRetentionScheduler{} + retentionLauncher := &fakeLauncher{} + retentionMgr := NewManager() + c := NewAPIController(retentionMgr, projectMgr, repositoryMgr, retentionScheduler, retentionLauncher) + + p1 := &policy.Metadata{ + Algorithm: "or", + Rules: []rule.Metadata{ + { + ID: 1, + Priority: 1, + Template: "recentXdays", + Parameters: rule.Parameters{ + "num": 10, + }, + TagSelectors: []*rule.Selector{ + { + Kind: "label", + Decoration: "with", + Pattern: "latest", + }, + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: "release-[\\d\\.]+", + }, + }, + ScopeSelectors: map[string][]*rule.Selector{ + "repository": { + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: ".+", + }, + }, + }, + }, + { + ID: 2, + Priority: 1, + Template: "recentXdays", + Disabled: true, + Parameters: rule.Parameters{ + "num": 3, + }, + TagSelectors: []*rule.Selector{ + { + Kind: "label", + Decoration: "with", + Pattern: "latest", + }, + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: "release-[\\d\\.]+", + }, + }, + ScopeSelectors: map[string][]*rule.Selector{ + "repository": { + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: ".+", + }, + }, + }, + }, + }, + Trigger: &policy.Trigger{ + Kind: "Schedule", + Settings: map[string]interface{}{ + "cron": "* 22 11 * * *", + }, + }, + Scope: &policy.Scope{ + Level: "project", + Reference: 1, + }, + } + + id, err := c.CreateRetention(p1) + s.Require().Nil(err) + s.Require().True(id > 0) + + p1, err = c.GetRetention(id) + s.Require().Nil(err) + s.Require().EqualValues("project", p1.Scope.Level) + s.Require().True(p1.ID > 0) + + p1.Scope.Level = "test" + err = c.UpdateRetention(p1) + s.Require().Nil(err) + p1, err = c.GetRetention(id) + s.Require().Nil(err) + s.Require().EqualValues("test", p1.Scope.Level) + + err = c.DeleteRetention(id) + s.Require().Nil(err) + + p1, err = c.GetRetention(id) + s.Require().Nil(err) + s.Require().Nil(p1) +} + +func (s *ControllerTestSuite) TestExecution() { + projectMgr := &fakeProjectManager{} + repositoryMgr := &fakeRepositoryManager{} + retentionScheduler := &fakeRetentionScheduler{} + retentionLauncher := &fakeLauncher{} + retentionMgr := NewManager() + m := NewAPIController(retentionMgr, projectMgr, repositoryMgr, retentionScheduler, retentionLauncher) + + p1 := &policy.Metadata{ + Algorithm: "or", + Rules: []rule.Metadata{ + { + ID: 1, + Priority: 1, + Template: "recentXdays", + Parameters: rule.Parameters{ + "num": 10, + }, + TagSelectors: []*rule.Selector{ + { + Kind: "label", + Decoration: "with", + Pattern: "latest", + }, + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: "release-[\\d\\.]+", + }, + }, + ScopeSelectors: map[string][]*rule.Selector{ + "repository": { + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: ".+", + }, + }, + }, + }, + }, + Trigger: &policy.Trigger{ + Kind: "Schedule", + Settings: map[string]interface{}{ + "cron": "* 22 11 * * *", + }, + }, + Scope: &policy.Scope{ + Level: "project", + Reference: 1, + }, + } + + policyID, err := m.CreateRetention(p1) + s.Require().Nil(err) + s.Require().True(policyID > 0) + + id, err := m.TriggerRetentionExec(policyID, ExecutionTriggerManual, false) + s.Require().Nil(err) + s.Require().True(id > 0) + + e1, err := m.GetRetentionExec(id) + s.Require().Nil(err) + s.Require().NotNil(e1) + s.Require().EqualValues(id, e1.ID) + + err = m.OperateRetentionExec(id, "stop") + s.Require().Nil(err) + + es, err := m.ListRetentionExecs(policyID, nil) + s.Require().Nil(err) + s.Require().EqualValues(1, len(es)) + + ts, err := m.ListRetentionExecTasks(id, nil) + s.Require().Nil(err) + s.Require().EqualValues(0, len(ts)) + +} + +type fakeRetentionScheduler struct { +} + +func (f *fakeRetentionScheduler) Schedule(cron string, callbackFuncName string, params interface{}) (int64, error) { + return 111, nil +} + +func (f *fakeRetentionScheduler) UnSchedule(id int64) error { + return nil +} + +type fakeLauncher struct { +} + +func (f *fakeLauncher) Stop(executionID int64) error { + return nil +} + +func (f *fakeLauncher) Launch(policy *policy.Metadata, executionID int64, isDryRun bool) (int64, error) { + return 0, nil +} diff --git a/src/pkg/retention/dao/models/retention.go b/src/pkg/retention/dao/models/retention.go new file mode 100644 index 000000000..8e7d94590 --- /dev/null +++ b/src/pkg/retention/dao/models/retention.go @@ -0,0 +1,62 @@ +package models + +import ( + "time" + + "github.com/astaxie/beego/orm" +) + +// const definitions +const ( + ExecutionStatusInProgress string = "InProgress" + ExecutionStatusSucceed string = "Succeed" + ExecutionStatusFailed string = "Failed" + ExecutionStatusStopped string = "Stopped" +) + +func init() { + orm.RegisterModel( + new(RetentionPolicy), + new(RetentionExecution), + new(RetentionTask), + ) +} + +// RetentionPolicy Retention Policy +type RetentionPolicy struct { + ID int64 `orm:"pk;auto;column(id)" json:"id"` + // 'system', 'project' and 'repository' + ScopeLevel string + ScopeReference int64 + TriggerKind string + // json format, include algorithm, rules, exclusions + Data string + CreateTime time.Time + UpdateTime time.Time +} + +// RetentionExecution Retention Execution +type RetentionExecution struct { + ID int64 `orm:"pk;auto;column(id)" json:"id"` + PolicyID int64 `orm:"column(policy_id)"` + DryRun bool + // manual, scheduled + Trigger string + StartTime time.Time + EndTime time.Time `orm:"-"` + Status string `orm:"-"` +} + +// RetentionTask ... +type RetentionTask struct { + ID int64 `orm:"pk;auto;column(id)"` + ExecutionID int64 `orm:"column(execution_id)"` + Repository string `orm:"column(repository)"` + JobID string `orm:"column(job_id)"` + Status string `orm:"column(status)"` + StatusCode int `orm:"column(status_code)"` + StartTime time.Time `orm:"column(start_time)"` + EndTime time.Time `orm:"column(end_time)"` + Total int `orm:"column(total)"` + Retained int `orm:"column(retained)"` +} diff --git a/src/pkg/retention/dao/retention.go b/src/pkg/retention/dao/retention.go new file mode 100644 index 000000000..2c810923d --- /dev/null +++ b/src/pkg/retention/dao/retention.go @@ -0,0 +1,291 @@ +package dao + +import ( + "errors" + "fmt" + "strconv" + + "github.com/astaxie/beego/orm" + "github.com/goharbor/harbor/src/common/dao" + jobmodels "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/pkg/retention/dao/models" + "github.com/goharbor/harbor/src/pkg/retention/q" +) + +// CreatePolicy Create Policy +func CreatePolicy(p *models.RetentionPolicy) (int64, error) { + o := dao.GetOrmer() + return o.Insert(p) +} + +// UpdatePolicy Update Policy +func UpdatePolicy(p *models.RetentionPolicy, cols ...string) error { + o := dao.GetOrmer() + _, err := o.Update(p, cols...) + return err +} + +// DeletePolicyAndExec Delete Policy and Exec +func DeletePolicyAndExec(id int64) error { + o := dao.GetOrmer() + if _, err := o.Raw("delete from retention_task where execution_id in (select id from retention_execution where policy_id = ?) ", id).Exec(); err != nil { + return nil + } + if _, err := o.Raw("delete from retention_execution where policy_id = ?", id).Exec(); err != nil { + return err + } + if _, err := o.Delete(&models.RetentionExecution{ + PolicyID: id, + }); err != nil { + return err + } + _, err := o.Delete(&models.RetentionPolicy{ + ID: id, + }) + return err +} + +// GetPolicy Get Policy +func GetPolicy(id int64) (*models.RetentionPolicy, error) { + o := dao.GetOrmer() + p := &models.RetentionPolicy{ + ID: id, + } + if err := o.Read(p); err != nil { + return nil, err + } + return p, nil +} + +// CreateExecution Create Execution +func CreateExecution(e *models.RetentionExecution) (int64, error) { + o := dao.GetOrmer() + return o.Insert(e) +} + +// UpdateExecution Update Execution +func UpdateExecution(e *models.RetentionExecution, cols ...string) error { + o := dao.GetOrmer() + _, err := o.Update(e, cols...) + return err +} + +// DeleteExecution Delete Execution +func DeleteExecution(id int64) error { + o := dao.GetOrmer() + _, err := o.Delete(&models.RetentionExecution{ + ID: id, + }) + return err +} + +// GetExecution Get Execution +func GetExecution(id int64) (*models.RetentionExecution, error) { + o := dao.GetOrmer() + e := &models.RetentionExecution{ + ID: id, + } + if err := o.Read(e); err != nil { + return nil, err + } + if err := fillStatus(e); err != nil { + return nil, err + } + return e, nil +} + +// fillStatus the priority is InProgress Stopped Failed Succeed +func fillStatus(exec *models.RetentionExecution) error { + o := dao.GetOrmer() + var r orm.Params + if _, err := o.Raw("select status, count(*) num from retention_task where execution_id = ? group by status", exec.ID). + RowsToMap(&r, "status", "num"); err != nil { + return err + } + var ( + total, running, succeed, failed, stopped int64 + ) + for k, s := range r { + v, err := strconv.ParseInt(s.(string), 10, 64) + if err != nil { + return err + } + total += v + switch k { + case jobmodels.JobScheduled: + running += v + case jobmodels.JobPending: + running += v + case jobmodels.JobRunning: + running += v + case jobmodels.JobRetrying: + running += v + case jobmodels.JobFinished: + succeed += v + case jobmodels.JobCanceled: + stopped += v + case jobmodels.JobStopped: + stopped += v + case jobmodels.JobError: + failed += v + } + } + if total == 0 { + exec.Status = models.ExecutionStatusSucceed + exec.EndTime = exec.StartTime + return nil + } + if running > 0 { + exec.Status = models.ExecutionStatusInProgress + } else if stopped > 0 { + exec.Status = models.ExecutionStatusStopped + } else if failed > 0 { + exec.Status = models.ExecutionStatusFailed + } else { + exec.Status = models.ExecutionStatusSucceed + } + if exec.Status != models.ExecutionStatusInProgress { + if err := o.Raw("select max(end_time) from retention_task where execution_id = ?", exec.ID). + QueryRow(&exec.EndTime); err != nil { + return err + } + } + return nil +} + +// ListExecutions List Executions +func ListExecutions(policyID int64, query *q.Query) ([]*models.RetentionExecution, error) { + o := dao.GetOrmer() + qs := o.QueryTable(new(models.RetentionExecution)) + + qs = qs.Filter("policy_id", policyID) + qs = qs.OrderBy("-id") + if query != nil { + qs = qs.Limit(query.PageSize, (query.PageNumber-1)*query.PageSize) + } + var execs []*models.RetentionExecution + _, err := qs.All(&execs) + if err != nil { + return nil, err + } + for _, e := range execs { + if err := fillStatus(e); err != nil { + return nil, err + } + } + return execs, nil +} + +// GetTotalOfRetentionExecs Count Executions +func GetTotalOfRetentionExecs(policyID int64) (int64, error) { + o := dao.GetOrmer() + qs := o.QueryTable(new(models.RetentionExecution)) + + qs = qs.Filter("policy_id", policyID) + return qs.Count() +} + +/* +// ListExecHistories List Execution Histories +func ListExecHistories(executionID int64, query *q.Query) ([]*models.RetentionTask, error) { + o := dao.GetOrmer() + qs := o.QueryTable(new(models.RetentionTask)) + qs = qs.Filter("Execution_ID", executionID) + if query != nil { + qs = qs.Limit(query.PageSize, (query.PageNumber-1)*query.PageSize) + } + var tasks []*models.RetentionTask + _, err := qs.All(&tasks) + if err != nil { + return nil, err + } + return tasks, nil +} + +// AppendExecHistory Append Execution History +func AppendExecHistory(t *models.RetentionTask) (int64, error) { + o := dao.GetOrmer() + return o.Insert(t) +} +*/ + +// CreateTask creates task record in database +func CreateTask(task *models.RetentionTask) (int64, error) { + if task == nil { + return 0, errors.New("nil task") + } + return dao.GetOrmer().Insert(task) +} + +// UpdateTask updates the task record in database +func UpdateTask(task *models.RetentionTask, cols ...string) error { + if task == nil { + return errors.New("nil task") + } + if task.ID <= 0 { + return fmt.Errorf("invalid task ID: %d", task.ID) + } + _, err := dao.GetOrmer().Update(task, cols...) + return err +} + +// UpdateTaskStatus updates the status of task whose status code is less than the statusCode provided +func UpdateTaskStatus(taskID int64, status string, statusCode int) error { + _, err := dao.GetOrmer().QueryTable(&models.RetentionTask{}). + Filter("ID", taskID). + Filter("StatusCode__lt", statusCode). + Update(orm.Params{ + "Status": status, + "StatusCode": statusCode, + }) + return err +} + +// DeleteTask deletes the task record specified by ID in database +func DeleteTask(id int64) error { + _, err := dao.GetOrmer().Delete(&models.RetentionTask{ + ID: id, + }) + return err +} + +// GetTask get the task record specified by ID in database +func GetTask(id int64) (*models.RetentionTask, error) { + task := &models.RetentionTask{ + ID: id, + } + if err := dao.GetOrmer().Read(task); err != nil { + return nil, err + } + return task, nil +} + +// ListTask lists the tasks according to the query +func ListTask(query ...*q.TaskQuery) ([]*models.RetentionTask, error) { + qs := dao.GetOrmer().QueryTable(&models.RetentionTask{}) + if len(query) > 0 && query[0] != nil { + q := query[0] + if q.ExecutionID > 0 { + qs = qs.Filter("ExecutionID", q.ExecutionID) + } + if len(q.Status) > 0 { + qs = qs.Filter("Status", q.Status) + } + if q.PageSize > 0 { + qs = qs.Limit(q.PageSize) + if q.PageNumber > 0 { + qs = qs.Offset((q.PageNumber - 1) * q.PageSize) + } + } + } + tasks := []*models.RetentionTask{} + _, err := qs.All(&tasks) + return tasks, err +} + +// GetTotalOfTasks Count tasks +func GetTotalOfTasks(executionID int64) (int64, error) { + qs := dao.GetOrmer().QueryTable(&models.RetentionTask{}) + qs = qs.Filter("ExecutionID", executionID) + return qs.Count() +} diff --git a/src/pkg/retention/dao/retention_test.go b/src/pkg/retention/dao/retention_test.go new file mode 100644 index 000000000..597b86c40 --- /dev/null +++ b/src/pkg/retention/dao/retention_test.go @@ -0,0 +1,220 @@ +package dao + +import ( + "encoding/json" + "os" + "strings" + "testing" + "time" + + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/pkg/retention/dao/models" + "github.com/goharbor/harbor/src/pkg/retention/policy" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/goharbor/harbor/src/pkg/retention/q" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMain(m *testing.M) { + dao.PrepareTestForPostgresSQL() + os.Exit(m.Run()) +} + +func TestPolicy(t *testing.T) { + p := &policy.Metadata{ + Algorithm: "or", + Rules: []rule.Metadata{ + { + ID: 1, + Priority: 1, + Template: "recentXdays", + Parameters: rule.Parameters{ + "num": 10, + }, + TagSelectors: []*rule.Selector{ + { + Kind: "label", + Decoration: "with", + Pattern: "latest", + }, + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: "release-[\\d\\.]+", + }, + }, + ScopeSelectors: map[string][]*rule.Selector{ + "repository": { + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: ".+", + }, + }, + }, + }, + }, + Trigger: &policy.Trigger{ + Kind: "Schedule", + Settings: map[string]interface{}{ + "cron": "* 22 11 * * *", + }, + }, + Scope: &policy.Scope{ + Level: "project", + Reference: 1, + }, + } + p1 := &models.RetentionPolicy{ + ScopeLevel: p.Scope.Level, + TriggerKind: p.Trigger.Kind, + CreateTime: time.Now(), + UpdateTime: time.Now(), + } + data, _ := json.Marshal(p) + p1.Data = string(data) + + id, err := CreatePolicy(p1) + assert.Nil(t, err) + assert.True(t, id > 0) + + p1, err = GetPolicy(id) + assert.Nil(t, err) + assert.EqualValues(t, "project", p1.ScopeLevel) + assert.True(t, p1.ID > 0) + + p1.ScopeLevel = "test" + err = UpdatePolicy(p1) + assert.Nil(t, err) + p1, err = GetPolicy(id) + assert.Nil(t, err) + assert.EqualValues(t, "test", p1.ScopeLevel) + + err = DeletePolicyAndExec(id) + assert.Nil(t, err) + + p1, err = GetPolicy(id) + assert.NotNil(t, err) + assert.True(t, strings.Contains(err.Error(), "no row found")) +} + +func TestExecution(t *testing.T) { + p := &policy.Metadata{ + Algorithm: "or", + Rules: []rule.Metadata{ + { + ID: 1, + Priority: 1, + Template: "recentXdays", + Parameters: rule.Parameters{ + "num": 10, + }, + TagSelectors: []*rule.Selector{ + { + Kind: "label", + Decoration: "with", + Pattern: "latest", + }, + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: "release-[\\d\\.]+", + }, + }, + ScopeSelectors: map[string][]*rule.Selector{ + "repository": { + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: ".+", + }, + }, + }, + }, + }, + Trigger: &policy.Trigger{ + Kind: "Schedule", + Settings: map[string]interface{}{ + "cron": "* 22 11 * * *", + }, + }, + Scope: &policy.Scope{ + Level: "project", + Reference: 1, + }, + } + p1 := &models.RetentionPolicy{ + ScopeLevel: p.Scope.Level, + TriggerKind: p.Trigger.Kind, + CreateTime: time.Now(), + UpdateTime: time.Now(), + } + data, _ := json.Marshal(p) + p1.Data = string(data) + + policyID, err := CreatePolicy(p1) + assert.Nil(t, err) + assert.True(t, policyID > 0) + + e := &models.RetentionExecution{ + PolicyID: policyID, + DryRun: false, + Trigger: "manual", + StartTime: time.Now(), + } + id, err := CreateExecution(e) + assert.Nil(t, err) + assert.True(t, id > 0) + + e1, err := GetExecution(id) + assert.Nil(t, err) + assert.NotNil(t, e1) + assert.EqualValues(t, id, e1.ID) + + es, err := ListExecutions(policyID, nil) + assert.Nil(t, err) + assert.EqualValues(t, 1, len(es)) +} + +func TestTask(t *testing.T) { + task := &models.RetentionTask{ + ExecutionID: 1, + Status: "pending", + } + // create + id, err := CreateTask(task) + require.Nil(t, err) + + // get + tk, err := GetTask(id) + require.Nil(t, err) + require.Equal(t, id, tk.ID) + require.Equal(t, "pending", tk.Status) + + // update + task.ID = id + task.Total = 1 + err = UpdateTask(task, "Total") + require.Nil(t, err) + + // update status + err = UpdateTaskStatus(id, "running", 1) + require.Nil(t, err) + + // list + tasks, err := ListTask(&q.TaskQuery{ + ExecutionID: 1, + Status: "running", + }) + require.Nil(t, err) + require.Equal(t, 1, len(tasks)) + assert.Equal(t, 1, tasks[0].Total) + assert.Equal(t, int64(1), tasks[0].ExecutionID) + assert.Equal(t, "running", tasks[0].Status) + assert.Equal(t, 1, tasks[0].StatusCode) + + // delete + err = DeleteTask(id) + require.Nil(t, err) +} diff --git a/src/pkg/retention/dep/client.go b/src/pkg/retention/dep/client.go new file mode 100644 index 000000000..9ccb951f7 --- /dev/null +++ b/src/pkg/retention/dep/client.go @@ -0,0 +1,181 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dep + +import ( + "errors" + "fmt" + "net/http" + + "github.com/goharbor/harbor/src/common/http/modifier/auth" + "github.com/goharbor/harbor/src/jobservice/config" + "github.com/goharbor/harbor/src/pkg/clients/core" + "github.com/goharbor/harbor/src/pkg/retention/res" +) + +// DefaultClient for the retention +var DefaultClient = NewClient() + +// Client is designed to access core service to get required infos +type Client interface { + // Get the tag candidates under the repository + // + // Arguments: + // repo *res.Repository : repository info + // + // Returns: + // []*res.Candidate : candidates returned + // error : common error if any errors occurred + GetCandidates(repo *res.Repository) ([]*res.Candidate, error) + + // Delete the given repository + // + // Arguments: + // repo *res.Repository : repository info + // + // Returns: + // error : common error if any errors occurred + DeleteRepository(repo *res.Repository) error + + // Delete the specified candidate + // + // Arguments: + // candidate *res.Candidate : the deleting candidate + // + // Returns: + // error : common error if any errors occurred + Delete(candidate *res.Candidate) error +} + +// NewClient new a basic client +func NewClient(client ...*http.Client) Client { + var c *http.Client + if len(client) > 0 { + c = client[0] + } + if c == nil { + c = http.DefaultClient + } + + // init core client + internalCoreURL := config.GetCoreURL() + jobserviceSecret := config.GetAuthSecret() + authorizer := auth.NewSecretAuthorizer(jobserviceSecret) + coreClient := core.New(internalCoreURL, c, authorizer) + + return &basicClient{ + internalCoreURL: internalCoreURL, + coreClient: coreClient, + } +} + +// basicClient is a default +type basicClient struct { + internalCoreURL string + coreClient core.Client +} + +// GetCandidates gets the tag candidates under the repository +func (bc *basicClient) GetCandidates(repository *res.Repository) ([]*res.Candidate, error) { + if repository == nil { + return nil, errors.New("repository is nil") + } + candidates := make([]*res.Candidate, 0) + switch repository.Kind { + case res.Image: + images, err := bc.coreClient.ListAllImages(repository.Namespace, repository.Name) + if err != nil { + return nil, err + } + for _, image := range images { + labels := make([]string, 0) + for _, label := range image.Labels { + labels = append(labels, label.Name) + } + candidate := &res.Candidate{ + Kind: res.Image, + Namespace: repository.Namespace, + Repository: repository.Name, + Tag: image.Name, + Labels: labels, + CreationTime: image.Created.Unix(), + PulledTime: image.PullTime.Unix(), + PushedTime: image.PushTime.Unix(), + } + candidates = append(candidates, candidate) + } + /* + case res.Chart: + charts, err := bc.coreClient.ListAllCharts(repository.Namespace, repository.Name) + if err != nil { + return nil, err + } + for _, chart := range charts { + labels := make([]string, 0) + for _, label := range chart.Labels { + labels = append(labels, label.Name) + } + candidate := &res.Candidate{ + Kind: res.Chart, + Namespace: repository.Namespace, + Repository: repository.Name, + Tag: chart.Name, + Labels: labels, + CreationTime: chart.Created.Unix(), + PushedTime: , + PulledTime: , + } + candidates = append(candidates, candidate) + } + */ + default: + return nil, fmt.Errorf("unsupported repository kind: %s", repository.Kind) + } + return candidates, nil +} + +// DeleteRepository deletes the specified repository +func (bc *basicClient) DeleteRepository(repo *res.Repository) error { + if repo == nil { + return errors.New("repository is nil") + } + switch repo.Kind { + case res.Image: + return bc.coreClient.DeleteImageRepository(repo.Namespace, repo.Name) + /* + case res.Chart: + return bc.coreClient.DeleteChartRepository(repo.Namespace, repo.Name) + */ + default: + return fmt.Errorf("unsupported repository kind: %s", repo.Kind) + } +} + +// Deletes the specified candidate +func (bc *basicClient) Delete(candidate *res.Candidate) error { + if candidate == nil { + return errors.New("candidate is nil") + } + switch candidate.Kind { + case res.Image: + return bc.coreClient.DeleteImage(candidate.Namespace, candidate.Repository, candidate.Tag) + /* + case res.Chart: + return bc.coreClient.DeleteChart(candidate.Namespace, candidate.Repository, candidate.Tag) + */ + default: + return fmt.Errorf("unsupported candidate kind: %s", candidate.Kind) + } +} diff --git a/src/pkg/retention/dep/client_test.go b/src/pkg/retention/dep/client_test.go new file mode 100644 index 000000000..071cc230c --- /dev/null +++ b/src/pkg/retention/dep/client_test.go @@ -0,0 +1,138 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dep + +import ( + "testing" + + "github.com/goharbor/harbor/src/chartserver" + jmodels "github.com/goharbor/harbor/src/common/job/models" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/goharbor/harbor/src/testing/clients" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "k8s.io/helm/pkg/proto/hapi/chart" + "k8s.io/helm/pkg/repo" +) + +type fakeCoreClient struct { + clients.DumbCoreClient +} + +func (f *fakeCoreClient) ListAllImages(project, repository string) ([]*models.TagResp, error) { + image := &models.TagResp{} + image.Name = "latest" + return []*models.TagResp{image}, nil +} + +func (f *fakeCoreClient) ListAllCharts(project, repository string) ([]*chartserver.ChartVersion, error) { + metadata := &chart.Metadata{ + Name: "1.0", + } + chart := &chartserver.ChartVersion{} + chart.ChartVersion = repo.ChartVersion{ + Metadata: metadata, + } + return []*chartserver.ChartVersion{chart}, nil +} + +type fakeJobserviceClient struct{} + +func (f *fakeJobserviceClient) SubmitJob(*jmodels.JobData) (string, error) { + return "1", nil +} +func (f *fakeJobserviceClient) GetJobLog(uuid string) ([]byte, error) { + return nil, nil +} +func (f *fakeJobserviceClient) PostAction(uuid, action string) error { + return nil +} +func (f *fakeJobserviceClient) GetExecutions(uuid string) ([]job.Stats, error) { + return nil, nil +} + +type clientTestSuite struct { + suite.Suite +} + +func (c *clientTestSuite) TestGetCandidates() { + client := &basicClient{} + client.coreClient = &fakeCoreClient{} + var repository *res.Repository + // nil repository + candidates, err := client.GetCandidates(repository) + require.NotNil(c.T(), err) + + // image repository + repository = &res.Repository{} + repository.Kind = res.Image + repository.Namespace = "library" + repository.Name = "hello-world" + candidates, err = client.GetCandidates(repository) + require.Nil(c.T(), err) + assert.Equal(c.T(), 1, len(candidates)) + assert.Equal(c.T(), res.Image, candidates[0].Kind) + assert.Equal(c.T(), "library", candidates[0].Namespace) + assert.Equal(c.T(), "hello-world", candidates[0].Repository) + assert.Equal(c.T(), "latest", candidates[0].Tag) + + /* + // chart repository + repository.Kind = res.Chart + repository.Namespace = "goharbor" + repository.Name = "harbor" + candidates, err = client.GetCandidates(repository) + require.Nil(c.T(), err) + assert.Equal(c.T(), 1, len(candidates)) + assert.Equal(c.T(), res.Chart, candidates[0].Kind) + assert.Equal(c.T(), "goharbor", candidates[0].Namespace) + assert.Equal(c.T(), "1.0", candidates[0].Tag) + */ +} + +func (c *clientTestSuite) TestDelete() { + client := &basicClient{} + client.coreClient = &fakeCoreClient{} + + var candidate *res.Candidate + // nil candidate + err := client.Delete(candidate) + require.NotNil(c.T(), err) + + // image + candidate = &res.Candidate{} + candidate.Kind = res.Image + err = client.Delete(candidate) + require.Nil(c.T(), err) + + /* + // chart + candidate.Kind = res.Chart + err = client.Delete(candidate) + require.Nil(c.T(), err) + */ + + // unsupported type + candidate.Kind = "unsupported" + err = client.Delete(candidate) + require.NotNil(c.T(), err) +} + +func TestClientTestSuite(t *testing.T) { + suite.Run(t, new(clientTestSuite)) +} diff --git a/src/pkg/retention/job.go b/src/pkg/retention/job.go new file mode 100644 index 000000000..533e9dddc --- /dev/null +++ b/src/pkg/retention/job.go @@ -0,0 +1,271 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package retention + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/jobservice/logger" + "github.com/goharbor/harbor/src/pkg/retention/dep" + "github.com/goharbor/harbor/src/pkg/retention/policy" + "github.com/goharbor/harbor/src/pkg/retention/policy/lwp" + "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/olekukonko/tablewriter" + "github.com/pkg/errors" +) + +const ( + actionMarkRetain = "RETAIN" + actionMarkDeletion = "DEL" + actionMarkError = "ERR" +) + +// Job of running retention process +type Job struct{} + +// MaxFails of the job +func (pj *Job) MaxFails() uint { + return 3 +} + +// ShouldRetry indicates job can be retried if failed +func (pj *Job) ShouldRetry() bool { + return true +} + +// Validate the parameters +func (pj *Job) Validate(params job.Parameters) (err error) { + if _, err = getParamRepo(params); err == nil { + if _, err = getParamMeta(params); err == nil { + _, err = getParamDryRun(params) + } + } + + return +} + +// Run the job +func (pj *Job) Run(ctx job.Context, params job.Parameters) error { + // logger for logging + myLogger := ctx.GetLogger() + + // Parameters have been validated, ignore error checking + repo, _ := getParamRepo(params) + liteMeta, _ := getParamMeta(params) + isDryRun, _ := getParamDryRun(params) + + // Log stage: start + repoPath := fmt.Sprintf("%s/%s", repo.Namespace, repo.Name) + myLogger.Infof("Run retention process.\n Repository: %s \n Rule Algorithm: %s \n Dry Run: %v", repoPath, liteMeta.Algorithm, isDryRun) + + // Stop check point 1: + if isStopped(ctx) { + logStop(myLogger) + return nil + } + + // Retrieve all the candidates under the specified repository + allCandidates, err := dep.DefaultClient.GetCandidates(repo) + if err != nil { + return logError(myLogger, err) + } + + // Log stage: load candidates + myLogger.Infof("Load %d candidates from repository %s", len(allCandidates), repoPath) + + // Build the processor + builder := policy.NewBuilder(allCandidates) + processor, err := builder.Build(liteMeta, isDryRun) + if err != nil { + return logError(myLogger, err) + } + + // Stop check point 2: + if isStopped(ctx) { + logStop(myLogger) + return nil + } + + // Run the flow + results, err := processor.Process(allCandidates) + if err != nil { + return logError(myLogger, err) + } + + // Log stage: results with table view + logResults(myLogger, allCandidates, results) + + // Save retain and total num in DB + return saveRetainNum(ctx, results, allCandidates) +} + +func saveRetainNum(ctx job.Context, retained []*res.Result, allCandidates []*res.Candidate) error { + var delNum int + for _, r := range retained { + if r.Error == nil { + delNum++ + } + } + retainObj := struct { + Total int `json:"total"` + Retained int `json:"retained"` + }{ + Total: len(allCandidates), + Retained: len(allCandidates) - delNum, + } + c, err := json.Marshal(retainObj) + if err != nil { + return err + } + _ = ctx.Checkin(string(c)) + return nil +} + +func logResults(logger logger.Interface, all []*res.Candidate, results []*res.Result) { + hash := make(map[string]error, len(results)) + for _, r := range results { + if r.Target != nil { + hash[r.Target.Hash()] = r.Error + } + } + + op := func(art *res.Candidate) string { + if e, exists := hash[art.Hash()]; exists { + if e != nil { + return actionMarkError + } + + return actionMarkDeletion + } + + return actionMarkRetain + } + + var buf bytes.Buffer + + data := make([][]string, len(all)) + + for _, c := range all { + row := []string{ + arn(c), + c.Kind, + strings.Join(c.Labels, ","), + t(c.PushedTime), + t(c.PulledTime), + t(c.CreationTime), + op(c), + } + data = append(data, row) + } + + table := tablewriter.NewWriter(&buf) + table.SetHeader([]string{"Artifact", "Kind", "labels", "PushedTime", "PulledTime", "CreatedTime", "Retention"}) + table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false}) + table.SetCenterSeparator("|") + table.AppendBulk(data) + table.Render() + + logger.Infof("\n%s", buf.String()) + + // log all the concrete errors if have + for _, r := range results { + if r.Error != nil { + logger.Infof("Retention error for artifact %s:%s : %s", r.Target.Kind, arn(r.Target), r.Error) + } + } +} + +func arn(art *res.Candidate) string { + return fmt.Sprintf("%s/%s:%s", art.Namespace, art.Repository, art.Tag) +} + +func t(tm int64) string { + return time.Unix(tm, 0).Format("2006/01/02 15:04:05") +} + +func isStopped(ctx job.Context) (stopped bool) { + cmd, ok := ctx.OPCommand() + stopped = ok && cmd == job.StopCommand + + return +} + +func logStop(logger logger.Interface) { + logger.Info("Retention job is stopped") +} + +func logError(logger logger.Interface, err error) error { + wrappedErr := errors.Wrap(err, "retention job") + logger.Error(wrappedErr) + + return wrappedErr +} + +func getParamDryRun(params job.Parameters) (bool, error) { + v, ok := params[ParamDryRun] + if !ok { + return false, errors.Errorf("missing parameter: %s", ParamDryRun) + } + + dryRun, ok := v.(bool) + if !ok { + return false, errors.Errorf("invalid parameter: %s", ParamDryRun) + } + + return dryRun, nil +} + +func getParamRepo(params job.Parameters) (*res.Repository, error) { + v, ok := params[ParamRepo] + if !ok { + return nil, errors.Errorf("missing parameter: %s", ParamRepo) + } + + repoJSON, ok := v.(string) + if !ok { + return nil, errors.Errorf("invalid parameter: %s", ParamRepo) + } + + repo := &res.Repository{} + if err := repo.FromJSON(repoJSON); err != nil { + return nil, errors.Wrap(err, "parse repository from JSON") + } + + return repo, nil +} + +func getParamMeta(params job.Parameters) (*lwp.Metadata, error) { + v, ok := params[ParamMeta] + if !ok { + return nil, errors.Errorf("missing parameter: %s", ParamMeta) + } + + metaJSON, ok := v.(string) + if !ok { + return nil, errors.Errorf("invalid parameter: %s", ParamMeta) + } + + meta := &lwp.Metadata{} + if err := meta.FromJSON(metaJSON); err != nil { + return nil, errors.Wrap(err, "parse retention policy from JSON") + } + + return meta, nil +} diff --git a/src/pkg/retention/job_test.go b/src/pkg/retention/job_test.go new file mode 100644 index 000000000..cd9c137f1 --- /dev/null +++ b/src/pkg/retention/job_test.go @@ -0,0 +1,224 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package retention + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/jobservice/logger" + "github.com/goharbor/harbor/src/pkg/retention/dep" + "github.com/goharbor/harbor/src/pkg/retention/policy" + "github.com/goharbor/harbor/src/pkg/retention/policy/action" + "github.com/goharbor/harbor/src/pkg/retention/policy/lwp" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule/latestps" + "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/goharbor/harbor/src/pkg/retention/res/selectors/doublestar" + "github.com/goharbor/harbor/src/pkg/retention/res/selectors/label" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +// JobTestSuite is test suite for testing job +type JobTestSuite struct { + suite.Suite + + oldClient dep.Client +} + +// TestJob is entry of running JobTestSuite +func TestJob(t *testing.T) { + suite.Run(t, new(JobTestSuite)) +} + +// SetupSuite ... +func (suite *JobTestSuite) SetupSuite() { + suite.oldClient = dep.DefaultClient + dep.DefaultClient = &fakeRetentionClient{} +} + +// TearDownSuite ... +func (suite *JobTestSuite) TearDownSuite() { + dep.DefaultClient = suite.oldClient +} + +func (suite *JobTestSuite) TestRunSuccess() { + params := make(job.Parameters) + params[ParamDryRun] = false + repository := &res.Repository{ + Namespace: "library", + Name: "harbor", + Kind: res.Image, + } + repoJSON, err := repository.ToJSON() + require.Nil(suite.T(), err) + params[ParamRepo] = repoJSON + + scopeSelectors := make(map[string][]*rule.Selector) + scopeSelectors["project"] = []*rule.Selector{{ + Kind: doublestar.Kind, + Decoration: doublestar.RepoMatches, + Pattern: "{harbor}", + }} + + ruleParams := make(rule.Parameters) + ruleParams[latestps.ParameterK] = 10 + + meta := &lwp.Metadata{ + Algorithm: policy.AlgorithmOR, + Rules: []*rule.Metadata{ + { + ID: 1, + Priority: 999, + Action: action.Retain, + Template: latestps.TemplateID, + Parameters: ruleParams, + TagSelectors: []*rule.Selector{{ + Kind: label.Kind, + Decoration: label.With, + Pattern: "L3", + }, { + Kind: doublestar.Kind, + Decoration: doublestar.Matches, + Pattern: "**", + }}, + ScopeSelectors: scopeSelectors, + }, + }, + } + metaJSON, err := meta.ToJSON() + require.Nil(suite.T(), err) + params[ParamMeta] = metaJSON + + j := &Job{} + err = j.Validate(params) + require.NoError(suite.T(), err) + + err = j.Run(&fakeJobContext{}, params) + require.NoError(suite.T(), err) +} + +type fakeRetentionClient struct{} + +// GetCandidates ... +func (frc *fakeRetentionClient) GetCandidates(repo *res.Repository) ([]*res.Candidate, error) { + return []*res.Candidate{ + { + Namespace: "library", + Repository: "harbor", + Kind: "image", + Tag: "latest", + PushedTime: time.Now().Unix() - 11, + PulledTime: time.Now().Unix() - 2, + CreationTime: time.Now().Unix() - 10, + Labels: []string{"L1", "L2"}, + }, + { + Namespace: "library", + Repository: "harbor", + Kind: "image", + Tag: "dev", + PushedTime: time.Now().Unix() - 10, + PulledTime: time.Now().Unix() - 3, + CreationTime: time.Now().Unix() - 20, + Labels: []string{"L3"}, + }, + }, nil +} + +// Delete ... +func (frc *fakeRetentionClient) Delete(candidate *res.Candidate) error { + return nil +} + +// SubmitTask ... +func (frc *fakeRetentionClient) DeleteRepository(repo *res.Repository) error { + return nil +} + +type fakeLogger struct{} + +// For debuging +func (l *fakeLogger) Debug(v ...interface{}) {} + +// For debuging with format +func (l *fakeLogger) Debugf(format string, v ...interface{}) {} + +// For logging info +func (l *fakeLogger) Info(v ...interface{}) { + fmt.Println(v...) +} + +// For logging info with format +func (l *fakeLogger) Infof(format string, v ...interface{}) { + fmt.Printf(format+"\n", v...) +} + +// For warning +func (l *fakeLogger) Warning(v ...interface{}) {} + +// For warning with format +func (l *fakeLogger) Warningf(format string, v ...interface{}) {} + +// For logging error +func (l *fakeLogger) Error(v ...interface{}) { + fmt.Println(v...) +} + +// For logging error with format +func (l *fakeLogger) Errorf(format string, v ...interface{}) { +} + +// For fatal error +func (l *fakeLogger) Fatal(v ...interface{}) {} + +// For fatal error with error +func (l *fakeLogger) Fatalf(format string, v ...interface{}) {} + +type fakeJobContext struct{} + +func (c *fakeJobContext) Build(tracker job.Tracker) (job.Context, error) { + return nil, nil +} + +func (c *fakeJobContext) Get(prop string) (interface{}, bool) { + return nil, false +} + +func (c *fakeJobContext) SystemContext() context.Context { + return context.TODO() +} + +func (c *fakeJobContext) Checkin(status string) error { + fmt.Printf("Check in: %s\n", status) + + return nil +} + +func (c *fakeJobContext) OPCommand() (job.OPCommand, bool) { + return "", false +} + +func (c *fakeJobContext) GetLogger() logger.Interface { + return &fakeLogger{} +} + +func (c *fakeJobContext) Tracker() job.Tracker { + return nil +} diff --git a/src/pkg/retention/launcher.go b/src/pkg/retention/launcher.go new file mode 100644 index 000000000..8bdf91421 --- /dev/null +++ b/src/pkg/retention/launcher.go @@ -0,0 +1,379 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package retention + +import ( + "fmt" + "time" + + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/pkg/retention/res/selectors/index" + + cjob "github.com/goharbor/harbor/src/common/job" + "github.com/goharbor/harbor/src/common/job/models" + cmodels "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/pkg/project" + "github.com/goharbor/harbor/src/pkg/repository" + "github.com/goharbor/harbor/src/pkg/retention/policy" + "github.com/goharbor/harbor/src/pkg/retention/policy/lwp" + "github.com/goharbor/harbor/src/pkg/retention/q" + "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/pkg/errors" +) + +const ( + // ParamRepo ... + ParamRepo = "repository" + // ParamMeta ... + ParamMeta = "liteMeta" + // ParamDryRun ... + ParamDryRun = "dryRun" +) + +// Launcher provides function to launch the async jobs to run retentions based on the provided policy. +type Launcher interface { + // Launch async jobs for the retention policy + // A separate job will be launched for each repository + // + // Arguments: + // policy *policy.Metadata: the policy info + // executionID int64 : the execution ID + // isDryRun bool : indicate if it is a dry run + // + // Returns: + // int64 : the count of tasks + // error : common error if any errors occurred + Launch(policy *policy.Metadata, executionID int64, isDryRun bool) (int64, error) + // Stop the jobs for one execution + // + // Arguments: + // executionID int64 : the execution ID + // + // Returns: + // error : common error if any errors occurred + Stop(executionID int64) error +} + +// NewLauncher returns an instance of Launcher +func NewLauncher(projectMgr project.Manager, repositoryMgr repository.Manager, + retentionMgr Manager) Launcher { + return &launcher{ + projectMgr: projectMgr, + repositoryMgr: repositoryMgr, + retentionMgr: retentionMgr, + jobserviceClient: cjob.GlobalClient, + internalCoreURL: config.InternalCoreURL(), + chartServerEnabled: config.WithChartMuseum(), + } +} + +type jobData struct { + TaskID int64 + Repository res.Repository + JobName string + JobParams map[string]interface{} +} + +type launcher struct { + retentionMgr Manager + projectMgr project.Manager + repositoryMgr repository.Manager + jobserviceClient cjob.Client + internalCoreURL string + chartServerEnabled bool +} + +func (l *launcher) Launch(ply *policy.Metadata, executionID int64, isDryRun bool) (int64, error) { + if ply == nil { + return 0, launcherError(fmt.Errorf("the policy is nil")) + } + // no rules, return directly + if len(ply.Rules) == 0 { + log.Debugf("no rules for policy %d, skip", ply.ID) + return 0, nil + } + scope := ply.Scope + if scope == nil { + return 0, launcherError(fmt.Errorf("the scope of policy is nil")) + } + repositoryRules := make(map[res.Repository]*lwp.Metadata, 0) + level := scope.Level + var allProjects []*res.Candidate + var err error + if level == "system" { + // get projects + allProjects, err = getProjects(l.projectMgr) + if err != nil { + return 0, launcherError(err) + } + } + + for _, rule := range ply.Rules { + if rule.Disabled { + log.Infof("Policy %d rule %d %s is disabled", ply.ID, rule.ID, rule.Template) + continue + } + projectCandidates := allProjects + switch level { + case "system": + // filter projects according to the project selectors + for _, projectSelector := range rule.ScopeSelectors["project"] { + selector, err := index.Get(projectSelector.Kind, projectSelector.Decoration, + projectSelector.Pattern) + if err != nil { + return 0, launcherError(err) + } + projectCandidates, err = selector.Select(projectCandidates) + if err != nil { + return 0, launcherError(err) + } + } + case "project": + projectCandidates = append(projectCandidates, &res.Candidate{ + NamespaceID: scope.Reference, + }) + } + + var repositoryCandidates []*res.Candidate + // get repositories of projects + for _, projectCandidate := range projectCandidates { + repositories, err := getRepositories(l.projectMgr, l.repositoryMgr, projectCandidate.NamespaceID, l.chartServerEnabled) + if err != nil { + return 0, launcherError(err) + } + for _, repository := range repositories { + repositoryCandidates = append(repositoryCandidates, repository) + } + } + // filter repositories according to the repository selectors + for _, repositorySelector := range rule.ScopeSelectors["repository"] { + selector, err := index.Get(repositorySelector.Kind, repositorySelector.Decoration, + repositorySelector.Pattern) + if err != nil { + return 0, launcherError(err) + } + repositoryCandidates, err = selector.Select(repositoryCandidates) + if err != nil { + return 0, launcherError(err) + } + } + + for _, repositoryCandidate := range repositoryCandidates { + reposit := res.Repository{ + Namespace: repositoryCandidate.Namespace, + Name: repositoryCandidate.Repository, + Kind: repositoryCandidate.Kind, + } + if repositoryRules[reposit] == nil { + repositoryRules[reposit] = &lwp.Metadata{ + Algorithm: ply.Algorithm, + } + } + r := rule + repositoryRules[reposit].Rules = append(repositoryRules[reposit].Rules, &r) + } + } + + // create job data list + jobDatas, err := createJobs(repositoryRules, isDryRun) + if err != nil { + return 0, launcherError(err) + } + + // no jobs, return directly + if len(jobDatas) == 0 { + log.Debugf("no candidates for policy %d, skip", ply.ID) + return 0, nil + } + + // create task records in database + if err = l.createTasks(executionID, jobDatas); err != nil { + return 0, launcherError(err) + } + + // submit jobs to jobservice + if err = l.submitJobs(jobDatas); err != nil { + return 0, launcherError(err) + } + + return int64(len(jobDatas)), nil +} + +func createJobs(repositoryRules map[res.Repository]*lwp.Metadata, isDryRun bool) ([]*jobData, error) { + jobDatas := []*jobData{} + for repository, policy := range repositoryRules { + jobData := &jobData{ + Repository: repository, + JobName: job.Retention, + JobParams: make(map[string]interface{}, 3), + } + // set dry run + jobData.JobParams[ParamDryRun] = isDryRun + // set repository + repoJSON, err := repository.ToJSON() + if err != nil { + return nil, err + } + jobData.JobParams[ParamRepo] = repoJSON + // set retention policy + policyJSON, err := policy.ToJSON() + if err != nil { + return nil, err + } + jobData.JobParams[ParamMeta] = policyJSON + jobDatas = append(jobDatas, jobData) + } + return jobDatas, nil +} + +// create task records in database +func (l *launcher) createTasks(executionID int64, jobDatas []*jobData) error { + now := time.Now() + for _, jobData := range jobDatas { + taskID, err := l.retentionMgr.CreateTask(&Task{ + ExecutionID: executionID, + Repository: jobData.Repository.Name, + StartTime: now, + }) + if err != nil { + return err + } + jobData.TaskID = taskID + } + return nil +} + +// create task records in database +func (l *launcher) submitJobs(jobDatas []*jobData) error { + allFailed := true + for _, jobData := range jobDatas { + task := &Task{ + ID: jobData.TaskID, + } + props := []string{"Status"} + j := &models.JobData{ + Name: jobData.JobName, + Metadata: &models.JobMetadata{ + JobKind: job.KindGeneric, + }, + StatusHook: fmt.Sprintf("%s/service/notifications/jobs/retention/task/%d", l.internalCoreURL, jobData.TaskID), + Parameters: jobData.JobParams, + } + // Submit job + jobID, err := l.jobserviceClient.SubmitJob(j) + if err != nil { + log.Error(launcherError(fmt.Errorf("failed to submit task %d: %v", jobData.TaskID, err))) + task.Status = cmodels.JobError + task.EndTime = time.Now() + props = append(props, "EndTime") + } else { + allFailed = false + task.JobID = jobID + task.Status = cmodels.JobPending + props = append(props, "JobID") + } + if err = l.retentionMgr.UpdateTask(task, props...); err != nil { + log.Errorf("failed to update the status of task %d: %v", task.ID, err) + } + } + if allFailed { + return launcherError(fmt.Errorf("all tasks failed")) + } + return nil +} + +func (l *launcher) Stop(executionID int64) error { + if executionID <= 0 { + return launcherError(fmt.Errorf("invalid execution ID: %d", executionID)) + } + tasks, err := l.retentionMgr.ListTasks(&q.TaskQuery{ + ExecutionID: executionID, + }) + if err != nil { + return err + } + for _, task := range tasks { + if err = l.jobserviceClient.PostAction(task.JobID, cjob.JobActionStop); err != nil { + log.Errorf("failed to stop task %d, job ID: %s : %v", task.ID, task.JobID, err) + continue + } + } + return nil +} + +func launcherError(err error) error { + return errors.Wrap(err, "launcher") +} + +func getProjects(projectMgr project.Manager) ([]*res.Candidate, error) { + projects, err := projectMgr.List() + if err != nil { + return nil, err + } + var candidates []*res.Candidate + for _, pro := range projects { + candidates = append(candidates, &res.Candidate{ + NamespaceID: pro.ProjectID, + Namespace: pro.Name, + }) + } + return candidates, nil +} + +func getRepositories(projectMgr project.Manager, repositoryMgr repository.Manager, + projectID int64, chartServerEnabled bool) ([]*res.Candidate, error) { + var candidates []*res.Candidate + /* + pro, err := projectMgr.Get(projectID) + if err != nil { + return nil, err + } + */ + // get image repositories + imageRepositories, err := repositoryMgr.ListImageRepositories(projectID) + if err != nil { + return nil, err + } + for _, r := range imageRepositories { + namespace, repo := utils.ParseRepository(r.Name) + candidates = append(candidates, &res.Candidate{ + Namespace: namespace, + Repository: repo, + Kind: "image", + }) + } + // currently, doesn't support retention for chart + /* + if chartServerEnabled { + // get chart repositories when chart server is enabled + chartRepositories, err := repositoryMgr.ListChartRepositories(projectID) + if err != nil { + return nil, err + } + for _, r := range chartRepositories { + candidates = append(candidates, &res.Candidate{ + Namespace: pro.Name, + Repository: r.Name, + Kind: "chart", + }) + } + } + */ + + return candidates, nil +} diff --git a/src/pkg/retention/launcher_test.go b/src/pkg/retention/launcher_test.go new file mode 100644 index 000000000..27ce757b6 --- /dev/null +++ b/src/pkg/retention/launcher_test.go @@ -0,0 +1,301 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package retention + +import ( + "fmt" + "testing" + + "github.com/goharbor/harbor/src/chartserver" + "github.com/goharbor/harbor/src/common/job" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/pkg/project" + "github.com/goharbor/harbor/src/pkg/repository" + "github.com/goharbor/harbor/src/pkg/retention/policy" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/goharbor/harbor/src/pkg/retention/q" + _ "github.com/goharbor/harbor/src/pkg/retention/res/selectors/doublestar" + hjob "github.com/goharbor/harbor/src/testing/job" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type fakeProjectManager struct { + projects []*models.Project +} + +func (f *fakeProjectManager) List(...*models.ProjectQueryParam) ([]*models.Project, error) { + return f.projects, nil +} +func (f *fakeProjectManager) Get(idOrName interface{}) (*models.Project, error) { + id, ok := idOrName.(int64) + if ok { + for _, pro := range f.projects { + if pro.ProjectID == id { + return pro, nil + } + } + return nil, nil + } + name, ok := idOrName.(string) + if ok { + for _, pro := range f.projects { + if pro.Name == name { + return pro, nil + } + } + return nil, nil + } + return nil, fmt.Errorf("invalid parameter: %v, should be ID(int64) or name(string)", idOrName) +} + +type fakeRepositoryManager struct { + imageRepositories []*models.RepoRecord + chartRepositories []*chartserver.ChartInfo +} + +func (f *fakeRepositoryManager) ListImageRepositories(projectID int64) ([]*models.RepoRecord, error) { + return f.imageRepositories, nil +} +func (f *fakeRepositoryManager) ListChartRepositories(projectID int64) ([]*chartserver.ChartInfo, error) { + return f.chartRepositories, nil +} + +type fakeRetentionManager struct{} + +func (f *fakeRetentionManager) GetTotalOfRetentionExecs(policyID int64) (int64, error) { + return 0, nil +} + +func (f *fakeRetentionManager) GetTotalOfTasks(executionID int64) (int64, error) { + return 0, nil +} + +func (f *fakeRetentionManager) CreatePolicy(p *policy.Metadata) (int64, error) { + return 0, nil +} +func (f *fakeRetentionManager) UpdatePolicy(p *policy.Metadata) error { + return nil +} +func (f *fakeRetentionManager) DeletePolicyAndExec(ID int64) error { + return nil +} +func (f *fakeRetentionManager) GetPolicy(ID int64) (*policy.Metadata, error) { + return nil, nil +} +func (f *fakeRetentionManager) CreateExecution(execution *Execution) (int64, error) { + return 0, nil +} +func (f *fakeRetentionManager) UpdateExecution(execution *Execution) error { + return nil +} +func (f *fakeRetentionManager) GetExecution(eid int64) (*Execution, error) { + return nil, nil +} +func (f *fakeRetentionManager) DeleteExecution(eid int64) error { + return nil +} +func (f *fakeRetentionManager) ListTasks(query ...*q.TaskQuery) ([]*Task, error) { + return []*Task{ + { + ID: 1, + ExecutionID: 1, + JobID: "1", + }, + }, nil +} +func (f *fakeRetentionManager) GetTask(taskID int64) (*Task, error) { + return nil, nil +} +func (f *fakeRetentionManager) CreateTask(task *Task) (int64, error) { + return 0, nil +} +func (f *fakeRetentionManager) UpdateTask(task *Task, cols ...string) error { + return nil +} +func (f *fakeRetentionManager) UpdateTaskStatus(int64, string) error { + return nil +} +func (f *fakeRetentionManager) GetTaskLog(taskID int64) ([]byte, error) { + return nil, nil +} +func (f *fakeRetentionManager) ListExecutions(policyID int64, query *q.Query) ([]*Execution, error) { + return nil, nil +} +func (f *fakeRetentionManager) AppendHistory(history *History) (int64, error) { + return 0, nil +} +func (f *fakeRetentionManager) ListHistories(executionID int64, query *q.Query) ([]*History, error) { + return nil, nil +} + +type launchTestSuite struct { + suite.Suite + projectMgr project.Manager + repositoryMgr repository.Manager + retentionMgr Manager + jobserviceClient job.Client +} + +func (l *launchTestSuite) SetupTest() { + pro1 := &models.Project{ + ProjectID: 1, + Name: "library", + } + pro2 := &models.Project{ + ProjectID: 2, + Name: "test", + } + l.projectMgr = &fakeProjectManager{ + projects: []*models.Project{ + pro1, pro2, + }} + l.repositoryMgr = &fakeRepositoryManager{ + imageRepositories: []*models.RepoRecord{ + { + Name: "library/image", + }, + { + Name: "test/image", + }, + }, + chartRepositories: []*chartserver.ChartInfo{ + { + Name: "chart", + }, + }, + } + l.retentionMgr = &fakeRetentionManager{} + l.jobserviceClient = &hjob.MockJobClient{ + JobUUID: []string{"1"}, + } +} + +func (l *launchTestSuite) TestGetProjects() { + projects, err := getProjects(l.projectMgr) + require.Nil(l.T(), err) + assert.Equal(l.T(), 2, len(projects)) + assert.Equal(l.T(), int64(1), projects[0].NamespaceID) + assert.Equal(l.T(), "library", projects[0].Namespace) +} + +func (l *launchTestSuite) TestGetRepositories() { + repositories, err := getRepositories(l.projectMgr, l.repositoryMgr, 1, true) + require.Nil(l.T(), err) + assert.Equal(l.T(), 2, len(repositories)) + assert.Equal(l.T(), "library", repositories[0].Namespace) + assert.Equal(l.T(), "image", repositories[0].Repository) + assert.Equal(l.T(), "image", repositories[0].Kind) +} + +func (l *launchTestSuite) TestLaunch() { + launcher := &launcher{ + projectMgr: l.projectMgr, + repositoryMgr: l.repositoryMgr, + retentionMgr: l.retentionMgr, + jobserviceClient: l.jobserviceClient, + chartServerEnabled: true, + } + + var ply *policy.Metadata + // nil policy + n, err := launcher.Launch(ply, 1, false) + require.NotNil(l.T(), err) + + // nil rules + ply = &policy.Metadata{} + n, err = launcher.Launch(ply, 1, false) + require.Nil(l.T(), err) + assert.Equal(l.T(), int64(0), n) + + // nil scope + ply = &policy.Metadata{ + Rules: []rule.Metadata{ + {}, + }, + } + _, err = launcher.Launch(ply, 1, false) + require.NotNil(l.T(), err) + + // system scope + ply = &policy.Metadata{ + Scope: &policy.Scope{ + Level: "system", + }, + Rules: []rule.Metadata{ + { + ScopeSelectors: map[string][]*rule.Selector{ + "project": { + { + Kind: "doublestar", + Decoration: "nsMatches", + Pattern: "library", + }, + }, + "repository": { + { + Kind: "doublestar", + Decoration: "repoMatches", + Pattern: "**", + }, + }, + }, + }, + { + Disabled: true, + ScopeSelectors: map[string][]*rule.Selector{ + "project": { + { + Kind: "doublestar", + Decoration: "nsMatches", + Pattern: "library1", + }, + }, + "repository": { + { + Kind: "doublestar", + Decoration: "repoMatches", + Pattern: "**", + }, + }, + }, + }, + }, + } + n, err = launcher.Launch(ply, 1, false) + require.Nil(l.T(), err) + assert.Equal(l.T(), int64(2), n) +} + +func (l *launchTestSuite) TestStop() { + t := l.T() + launcher := &launcher{ + projectMgr: l.projectMgr, + repositoryMgr: l.repositoryMgr, + retentionMgr: l.retentionMgr, + jobserviceClient: l.jobserviceClient, + } + // invalid execution ID + err := launcher.Stop(0) + require.NotNil(t, err) + + err = launcher.Stop(1) + require.Nil(t, err) +} + +func TestLaunchTestSuite(t *testing.T) { + suite.Run(t, new(launchTestSuite)) +} diff --git a/src/pkg/retention/manager.go b/src/pkg/retention/manager.go new file mode 100644 index 000000000..c93f92256 --- /dev/null +++ b/src/pkg/retention/manager.go @@ -0,0 +1,310 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package retention + +import ( + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/astaxie/beego/orm" + cjob "github.com/goharbor/harbor/src/common/job" + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/pkg/retention/dao" + "github.com/goharbor/harbor/src/pkg/retention/dao/models" + "github.com/goharbor/harbor/src/pkg/retention/policy" + "github.com/goharbor/harbor/src/pkg/retention/q" +) + +// Manager defines operations of managing policy +type Manager interface { + // Create new policy and return ID + CreatePolicy(p *policy.Metadata) (int64, error) + // Update the existing policy + // Full update + UpdatePolicy(p *policy.Metadata) error + // Delete the specified policy + // No actual use so far + DeletePolicyAndExec(ID int64) error + // Get the specified policy + GetPolicy(ID int64) (*policy.Metadata, error) + // Create a new retention execution + CreateExecution(execution *Execution) (int64, error) + // Delete a new retention execution + DeleteExecution(int64) error + // Get the specified execution + GetExecution(eid int64) (*Execution, error) + // List executions + ListExecutions(policyID int64, query *q.Query) ([]*Execution, error) + // GetTotalOfRetentionExecs Count Retention Executions + GetTotalOfRetentionExecs(policyID int64) (int64, error) + // List tasks histories + ListTasks(query ...*q.TaskQuery) ([]*Task, error) + // GetTotalOfTasks Count Tasks + GetTotalOfTasks(executionID int64) (int64, error) + // Create a new retention task + CreateTask(task *Task) (int64, error) + // Update the specified task + UpdateTask(task *Task, cols ...string) error + // Update the status of the specified task + // The status is updated only when it is behind the one stored + // in the database. + // e.g. if the status is running but the status stored + // in database is failed, the updating doesn't take effect + UpdateTaskStatus(taskID int64, status string) error + // Get the task specified by the task ID + GetTask(taskID int64) (*Task, error) + // Get the log of the specified task + GetTaskLog(taskID int64) ([]byte, error) +} + +// DefaultManager ... +type DefaultManager struct { +} + +// CreatePolicy Create Policy +func (d *DefaultManager) CreatePolicy(p *policy.Metadata) (int64, error) { + p1 := &models.RetentionPolicy{} + p1.ScopeLevel = p.Scope.Level + p1.ScopeReference = p.Scope.Reference + p1.TriggerKind = p.Trigger.Kind + data, _ := json.Marshal(p) + p1.Data = string(data) + p1.CreateTime = time.Now() + p1.UpdateTime = p1.CreateTime + return dao.CreatePolicy(p1) +} + +// UpdatePolicy Update Policy +func (d *DefaultManager) UpdatePolicy(p *policy.Metadata) error { + p1 := &models.RetentionPolicy{} + p1.ID = p.ID + p1.ScopeLevel = p.Scope.Level + p1.ScopeReference = p.Scope.Reference + p1.TriggerKind = p.Trigger.Kind + p.ID = 0 + data, _ := json.Marshal(p) + p.ID = p1.ID + p1.Data = string(data) + p1.UpdateTime = time.Now() + return dao.UpdatePolicy(p1, "scope_level", "trigger_kind", "data", "update_time") +} + +// DeletePolicyAndExec Delete Policy +func (d *DefaultManager) DeletePolicyAndExec(id int64) error { + return dao.DeletePolicyAndExec(id) +} + +// GetPolicy Get Policy +func (d *DefaultManager) GetPolicy(id int64) (*policy.Metadata, error) { + p1, err := dao.GetPolicy(id) + if err != nil { + if err == orm.ErrNoRows { + return nil, nil + } + return nil, err + } + p := &policy.Metadata{} + if err = json.Unmarshal([]byte(p1.Data), p); err != nil { + return nil, err + } + p.ID = id + if p.Trigger.Settings != nil { + if _, ok := p.Trigger.References[policy.TriggerReferencesJobid]; ok { + p.Trigger.References[policy.TriggerReferencesJobid] = int64(p.Trigger.References[policy.TriggerReferencesJobid].(float64)) + } + } + return p, nil +} + +// CreateExecution Create Execution +func (d *DefaultManager) CreateExecution(execution *Execution) (int64, error) { + exec := &models.RetentionExecution{} + exec.PolicyID = execution.PolicyID + exec.StartTime = time.Now() + exec.DryRun = execution.DryRun + exec.Trigger = execution.Trigger + return dao.CreateExecution(exec) +} + +// DeleteExecution Delete Execution +func (d *DefaultManager) DeleteExecution(eid int64) error { + return dao.DeleteExecution(eid) +} + +// ListExecutions List Executions +func (d *DefaultManager) ListExecutions(policyID int64, query *q.Query) ([]*Execution, error) { + execs, err := dao.ListExecutions(policyID, query) + if err != nil { + if err == orm.ErrNoRows { + return nil, nil + } + return nil, err + } + var execs1 []*Execution + for _, e := range execs { + e1 := &Execution{} + e1.ID = e.ID + e1.PolicyID = e.PolicyID + e1.Status = e.Status + e1.StartTime = e.StartTime + e1.EndTime = e.EndTime + e1.DryRun = e.DryRun + execs1 = append(execs1, e1) + } + return execs1, nil +} + +// GetTotalOfRetentionExecs Count Executions +func (d *DefaultManager) GetTotalOfRetentionExecs(policyID int64) (int64, error) { + return dao.GetTotalOfRetentionExecs(policyID) +} + +// GetExecution Get Execution +func (d *DefaultManager) GetExecution(eid int64) (*Execution, error) { + e, err := dao.GetExecution(eid) + if err != nil { + return nil, err + } + e1 := &Execution{} + e1.ID = e.ID + e1.PolicyID = e.PolicyID + e1.Status = e.Status + e1.StartTime = e.StartTime + e1.EndTime = e.EndTime + e1.DryRun = e.DryRun + return e1, nil +} + +// CreateTask creates task record +func (d *DefaultManager) CreateTask(task *Task) (int64, error) { + if task == nil { + return 0, errors.New("nil task") + } + t := &models.RetentionTask{ + ExecutionID: task.ExecutionID, + Repository: task.Repository, + JobID: task.JobID, + Status: task.Status, + StartTime: task.StartTime, + EndTime: task.EndTime, + Total: task.Total, + Retained: task.Retained, + } + return dao.CreateTask(t) +} + +// ListTasks lists tasks according to the query +func (d *DefaultManager) ListTasks(query ...*q.TaskQuery) ([]*Task, error) { + ts, err := dao.ListTask(query...) + if err != nil { + if err == orm.ErrNoRows { + return nil, nil + } + return nil, err + } + tasks := make([]*Task, 0) + for _, t := range ts { + tasks = append(tasks, &Task{ + ID: t.ID, + ExecutionID: t.ExecutionID, + Repository: t.Repository, + JobID: t.JobID, + Status: t.Status, + StatusCode: t.StatusCode, + StartTime: t.StartTime, + EndTime: t.EndTime, + Total: t.Total, + Retained: t.Retained, + }) + } + return tasks, nil +} + +// GetTotalOfTasks Count tasks +func (d *DefaultManager) GetTotalOfTasks(executionID int64) (int64, error) { + return dao.GetTotalOfTasks(executionID) +} + +// UpdateTask updates the task +func (d *DefaultManager) UpdateTask(task *Task, cols ...string) error { + if task == nil { + return errors.New("nil task") + } + if task.ID <= 0 { + return fmt.Errorf("invalid task ID: %d", task.ID) + } + return dao.UpdateTask(&models.RetentionTask{ + ID: task.ID, + ExecutionID: task.ExecutionID, + Repository: task.Repository, + JobID: task.JobID, + Status: task.Status, + StartTime: task.StartTime, + EndTime: task.EndTime, + Total: task.Total, + Retained: task.Retained, + }, cols...) +} + +// UpdateTaskStatus updates the status of the specified task +func (d *DefaultManager) UpdateTaskStatus(taskID int64, status string) error { + if taskID <= 0 { + return fmt.Errorf("invalid task ID: %d", taskID) + } + st := job.Status(status) + return dao.UpdateTaskStatus(taskID, status, st.Code()) +} + +// GetTask returns the task specified by task ID +func (d *DefaultManager) GetTask(taskID int64) (*Task, error) { + if taskID <= 0 { + return nil, fmt.Errorf("invalid task ID: %d", taskID) + } + task, err := dao.GetTask(taskID) + if err != nil { + return nil, err + } + return &Task{ + ID: task.ID, + ExecutionID: task.ExecutionID, + Repository: task.Repository, + JobID: task.JobID, + Status: task.Status, + StatusCode: task.StatusCode, + StartTime: task.StartTime, + EndTime: task.EndTime, + Total: task.Total, + Retained: task.Retained, + }, nil +} + +// GetTaskLog gets the logs of task +func (d *DefaultManager) GetTaskLog(taskID int64) ([]byte, error) { + task, err := d.GetTask(taskID) + if err != nil { + return nil, err + } + if task == nil { + return nil, fmt.Errorf("task %d not found", taskID) + } + return cjob.GlobalClient.GetJobLog(task.JobID) +} + +// NewManager ... +func NewManager() Manager { + return &DefaultManager{} +} diff --git a/src/pkg/retention/manager_test.go b/src/pkg/retention/manager_test.go new file mode 100644 index 000000000..cbcbf5f10 --- /dev/null +++ b/src/pkg/retention/manager_test.go @@ -0,0 +1,221 @@ +package retention + +import ( + "os" + "testing" + "time" + + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/job" + jjob "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/pkg/retention/policy" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/goharbor/harbor/src/pkg/retention/q" + tjob "github.com/goharbor/harbor/src/testing/job" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMain(m *testing.M) { + dao.PrepareTestForPostgresSQL() + os.Exit(m.Run()) +} + +func TestPolicy(t *testing.T) { + m := NewManager() + p1 := &policy.Metadata{ + Algorithm: "or", + Rules: []rule.Metadata{ + { + ID: 1, + Priority: 1, + Template: "recentXdays", + Parameters: rule.Parameters{ + "num": 10, + }, + TagSelectors: []*rule.Selector{ + { + Kind: "label", + Decoration: "with", + Pattern: "latest", + }, + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: "release-[\\d\\.]+", + }, + }, + ScopeSelectors: map[string][]*rule.Selector{ + "repository": { + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: ".+", + }, + }, + }, + }, + }, + Trigger: &policy.Trigger{ + Kind: "Schedule", + Settings: map[string]interface{}{ + "cron": "* 22 11 * * *", + }, + }, + Scope: &policy.Scope{ + Level: "project", + Reference: 1, + }, + } + + id, err := m.CreatePolicy(p1) + assert.Nil(t, err) + assert.True(t, id > 0) + + p1, err = m.GetPolicy(id) + assert.Nil(t, err) + assert.EqualValues(t, "project", p1.Scope.Level) + assert.True(t, p1.ID > 0) + + p1.Scope.Level = "test" + err = m.UpdatePolicy(p1) + assert.Nil(t, err) + p1, err = m.GetPolicy(id) + assert.Nil(t, err) + assert.EqualValues(t, "test", p1.Scope.Level) + + err = m.DeletePolicyAndExec(id) + assert.Nil(t, err) + + p1, err = m.GetPolicy(id) + assert.Nil(t, err) + assert.Nil(t, p1) +} + +func TestExecution(t *testing.T) { + m := NewManager() + p1 := &policy.Metadata{ + Algorithm: "or", + Rules: []rule.Metadata{ + { + ID: 1, + Priority: 1, + Template: "recentXdays", + Parameters: rule.Parameters{ + "num": 10, + }, + TagSelectors: []*rule.Selector{ + { + Kind: "label", + Decoration: "with", + Pattern: "latest", + }, + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: "release-[\\d\\.]+", + }, + }, + ScopeSelectors: map[string][]*rule.Selector{ + "repository": { + { + Kind: "regularExpression", + Decoration: "matches", + Pattern: ".+", + }, + }, + }, + }, + }, + Trigger: &policy.Trigger{ + Kind: "Schedule", + Settings: map[string]interface{}{ + "cron": "* 22 11 * * *", + }, + }, + Scope: &policy.Scope{ + Level: "project", + Reference: 1, + }, + } + + policyID, err := m.CreatePolicy(p1) + assert.Nil(t, err) + assert.True(t, policyID > 0) + + e1 := &Execution{ + PolicyID: policyID, + StartTime: time.Now(), + Trigger: ExecutionTriggerManual, + DryRun: false, + } + id, err := m.CreateExecution(e1) + assert.Nil(t, err) + assert.True(t, id > 0) + + e1, err = m.GetExecution(id) + assert.Nil(t, err) + assert.NotNil(t, e1) + assert.EqualValues(t, id, e1.ID) + + es, err := m.ListExecutions(policyID, nil) + assert.Nil(t, err) + assert.EqualValues(t, 1, len(es)) + + err = m.DeleteExecution(id) + assert.Nil(t, err) +} + +func TestTask(t *testing.T) { + m := NewManager() + task := &Task{ + ExecutionID: 1, + JobID: "1", + Status: jjob.PendingStatus.String(), + StatusCode: jjob.PendingStatus.Code(), + Total: 0, + StartTime: time.Now(), + } + // create + id, err := m.CreateTask(task) + require.Nil(t, err) + + // get + tk, err := m.GetTask(id) + require.Nil(t, err) + assert.EqualValues(t, 1, tk.ExecutionID) + + // update + task.ID = id + task.Total = 1 + err = m.UpdateTask(task, "Total") + require.Nil(t, err) + + // update status to success which is a final status + err = m.UpdateTaskStatus(id, jjob.SuccessStatus.String()) + require.Nil(t, err) + + // try to update status to running, as the status has already + // been updated to a final status, this updating shouldn't take effect + err = m.UpdateTaskStatus(id, jjob.RunningStatus.String()) + require.Nil(t, err) + + // list + tasks, err := m.ListTasks(&q.TaskQuery{ + ExecutionID: 1, + }) + require.Nil(t, err) + require.Equal(t, 1, len(tasks)) + assert.Equal(t, int64(1), tasks[0].ExecutionID) + assert.Equal(t, 1, tasks[0].Total) + assert.Equal(t, jjob.SuccessStatus.String(), tasks[0].Status) + assert.Equal(t, jjob.SuccessStatus.Code(), tasks[0].StatusCode) + + // get task log + job.GlobalClient = &tjob.MockJobClient{ + JobUUID: []string{"1"}, + } + data, err := m.GetTaskLog(task.ID) + require.Nil(t, err) + assert.Equal(t, "some log", string(data)) +} diff --git a/src/pkg/retention/models.go b/src/pkg/retention/models.go new file mode 100644 index 000000000..1e4219937 --- /dev/null +++ b/src/pkg/retention/models.go @@ -0,0 +1,69 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package retention + +import "time" + +// const definitions +const ( + ExecutionStatusInProgress string = "InProgress" + ExecutionStatusSucceed string = "Succeed" + ExecutionStatusFailed string = "Failed" + ExecutionStatusStopped string = "Stopped" + + CandidateKindImage string = "image" + CandidateKindChart string = "chart" + + ExecutionTriggerManual string = "Manual" + ExecutionTriggerSchedule string = "Schedule" +) + +// Execution of retention +type Execution struct { + ID int64 `json:"id"` + PolicyID int64 `json:"policy_id"` + StartTime time.Time `json:"start_time"` + EndTime time.Time `json:"end_time,omitempty"` + Status string `json:"status"` + Trigger string `json:"Trigger"` + DryRun bool `json:"dry_run"` +} + +// Task of retention +type Task struct { + ID int64 `json:"id"` + ExecutionID int64 `json:"execution_id"` + Repository string `json:"repository"` + JobID string `json:"job_id"` + Status string `json:"status"` + StatusCode int `json:"status_code"` + StartTime time.Time `json:"start_time"` + EndTime time.Time `json:"end_time"` + Total int `json:"total"` + Retained int `json:"retained"` +} + +// History of retention +type History struct { + ID int64 `json:"id,omitempty"` + ExecutionID int64 `json:"execution_id"` + Rule struct { + ID int `json:"id"` + DisplayText string `json:"display_text"` + } `json:"rule_id"` + // full path: :ns/:repo:tag + Artifact string `json:"tag"` + Timestamp time.Time `json:"timestamp"` +} diff --git a/src/pkg/retention/policy/action/index/index.go b/src/pkg/retention/policy/action/index/index.go new file mode 100644 index 000000000..3b371d5df --- /dev/null +++ b/src/pkg/retention/policy/action/index/index.go @@ -0,0 +1,59 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import ( + "sync" + + "github.com/goharbor/harbor/src/pkg/retention/policy/action" + "github.com/pkg/errors" +) + +// index for keeping the mapping action and its performer +var index sync.Map + +func init() { + // Register retain action + Register(action.Retain, action.NewRetainAction) +} + +// Register the performer with the corresponding action +func Register(action string, factory action.PerformerFactory) { + if len(action) == 0 || factory == nil { + // do nothing + return + } + + index.Store(action, factory) +} + +// Get performer with the provided action +func Get(act string, params interface{}, isDryRun bool) (action.Performer, error) { + if len(act) == 0 { + return nil, errors.New("empty action") + } + + v, ok := index.Load(act) + if !ok { + return nil, errors.Errorf("action %s is not registered", act) + } + + factory, ok := v.(action.PerformerFactory) + if !ok { + return nil, errors.Errorf("invalid action performer registered for action %s", act) + } + + return factory(params, isDryRun), nil +} diff --git a/src/pkg/retention/policy/action/index/index_test.go b/src/pkg/retention/policy/action/index/index_test.go new file mode 100644 index 000000000..f9d4f57e5 --- /dev/null +++ b/src/pkg/retention/policy/action/index/index_test.go @@ -0,0 +1,95 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import ( + "testing" + "time" + + "github.com/goharbor/harbor/src/pkg/retention/policy/action" + "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +// IndexTestSuite tests the rule index +type IndexTestSuite struct { + suite.Suite + + candidates []*res.Candidate +} + +// TestIndexEntry is entry of IndexTestSuite +func TestIndexEntry(t *testing.T) { + suite.Run(t, new(IndexTestSuite)) +} + +// SetupSuite ... +func (suite *IndexTestSuite) SetupSuite() { + Register("fakeAction", newFakePerformer) + + suite.candidates = []*res.Candidate{{ + Namespace: "library", + Repository: "harbor", + Kind: "image", + Tag: "latest", + PushedTime: time.Now().Unix(), + Labels: []string{"L1", "L2"}, + }} +} + +// TestRegister tests register +func (suite *IndexTestSuite) TestGet() { + p, err := Get("fakeAction", nil, false) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), p) + + results, err := p.Perform(suite.candidates) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 1, len(results)) + assert.Condition(suite.T(), func() (success bool) { + r := results[0] + success = r.Target != nil && + r.Error == nil && + r.Target.Repository == "harbor" && + r.Target.Tag == "latest" + + return + }) +} + +type fakePerformer struct { + parameters interface{} + isDryRun bool +} + +// Perform the artifacts +func (p *fakePerformer) Perform(candidates []*res.Candidate) (results []*res.Result, err error) { + for _, c := range candidates { + results = append(results, &res.Result{ + Target: c, + }) + } + + return +} + +func newFakePerformer(params interface{}, isDryRun bool) action.Performer { + return &fakePerformer{ + parameters: params, + isDryRun: isDryRun, + } +} diff --git a/src/pkg/retention/policy/action/performer.go b/src/pkg/retention/policy/action/performer.go new file mode 100644 index 000000000..72d34d612 --- /dev/null +++ b/src/pkg/retention/policy/action/performer.go @@ -0,0 +1,94 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package action + +import ( + "github.com/goharbor/harbor/src/pkg/retention/dep" + "github.com/goharbor/harbor/src/pkg/retention/res" +) + +const ( + // Retain artifacts + Retain = "retain" +) + +// Performer performs the related actions targeting the candidates +type Performer interface { + // Perform the action + // + // Arguments: + // candidates []*res.Candidate : the targets to perform + // + // Returns: + // []*res.Result : result infos + // error : common error if any errors occurred + Perform(candidates []*res.Candidate) ([]*res.Result, error) +} + +// PerformerFactory is factory method for creating Performer +type PerformerFactory func(params interface{}, isDryRun bool) Performer + +// retainAction make sure all the candidates will be retained and others will be cleared +type retainAction struct { + all []*res.Candidate + // Indicate if it is a dry run + isDryRun bool +} + +// Perform the action +func (ra *retainAction) Perform(candidates []*res.Candidate) (results []*res.Result, err error) { + retained := make(map[string]bool) + for _, c := range candidates { + retained[c.Hash()] = true + } + + // start to delete + if len(ra.all) > 0 { + for _, art := range ra.all { + if _, ok := retained[art.Hash()]; !ok { + result := &res.Result{ + Target: art, + } + + if !ra.isDryRun { + if err := dep.DefaultClient.Delete(art); err != nil { + result.Error = err + } + } + + results = append(results, result) + } + } + } + + return +} + +// NewRetainAction is factory method for RetainAction +func NewRetainAction(params interface{}, isDryRun bool) Performer { + if params != nil { + if all, ok := params.([]*res.Candidate); ok { + return &retainAction{ + all: all, + isDryRun: isDryRun, + } + } + } + + return &retainAction{ + all: make([]*res.Candidate, 0), + isDryRun: isDryRun, + } +} diff --git a/src/pkg/retention/policy/action/performer_test.go b/src/pkg/retention/policy/action/performer_test.go new file mode 100644 index 000000000..2f6a6be15 --- /dev/null +++ b/src/pkg/retention/policy/action/performer_test.go @@ -0,0 +1,112 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package action + +import ( + "testing" + "time" + + "github.com/goharbor/harbor/src/pkg/retention/dep" + "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +// TestPerformerSuite tests the performer related function +type TestPerformerSuite struct { + suite.Suite + + oldClient dep.Client + all []*res.Candidate +} + +// TestPerformer is the entry of the TestPerformerSuite +func TestPerformer(t *testing.T) { + suite.Run(t, new(TestPerformerSuite)) +} + +// SetupSuite ... +func (suite *TestPerformerSuite) SetupSuite() { + suite.all = []*res.Candidate{ + { + Namespace: "library", + Repository: "harbor", + Kind: "image", + Tag: "latest", + PushedTime: time.Now().Unix(), + Labels: []string{"L1", "L2"}, + }, + { + Namespace: "library", + Repository: "harbor", + Kind: "image", + Tag: "dev", + PushedTime: time.Now().Unix(), + Labels: []string{"L3"}, + }, + } + + suite.oldClient = dep.DefaultClient + dep.DefaultClient = &fakeRetentionClient{} +} + +// TearDownSuite ... +func (suite *TestPerformerSuite) TearDownSuite() { + dep.DefaultClient = suite.oldClient +} + +// TestPerform tests Perform action +func (suite *TestPerformerSuite) TestPerform() { + p := &retainAction{ + all: suite.all, + } + + candidates := []*res.Candidate{ + { + Namespace: "library", + Repository: "harbor", + Kind: "image", + Tag: "latest", + PushedTime: time.Now().Unix(), + Labels: []string{"L1", "L2"}, + }, + } + + results, err := p.Perform(candidates) + require.NoError(suite.T(), err) + require.Equal(suite.T(), 1, len(results)) + require.NotNil(suite.T(), results[0].Target) + assert.NoError(suite.T(), results[0].Error) + assert.Equal(suite.T(), "dev", results[0].Target.Tag) +} + +type fakeRetentionClient struct{} + +// GetCandidates ... +func (frc *fakeRetentionClient) GetCandidates(repo *res.Repository) ([]*res.Candidate, error) { + return nil, errors.New("not implemented") +} + +// Delete ... +func (frc *fakeRetentionClient) Delete(candidate *res.Candidate) error { + return nil +} + +// DeleteRepository ... +func (frc *fakeRetentionClient) DeleteRepository(repo *res.Repository) error { + panic("implement me") +} diff --git a/src/pkg/retention/policy/alg/index/index.go b/src/pkg/retention/policy/alg/index/index.go new file mode 100644 index 000000000..ad5a6a7f2 --- /dev/null +++ b/src/pkg/retention/policy/alg/index/index.go @@ -0,0 +1,57 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import ( + "sync" + + "github.com/goharbor/harbor/src/pkg/retention/policy/alg" + "github.com/goharbor/harbor/src/pkg/retention/policy/alg/or" + "github.com/pkg/errors" +) + +const ( + // AlgorithmOR for || algorithm + AlgorithmOR = "or" +) + +// index for keeping the mapping between algorithm and its processor +var index sync.Map + +func init() { + // Register or + Register(AlgorithmOR, or.New) +} + +// Register processor with the algorithm +func Register(algorithm string, processor alg.Factory) { + if len(algorithm) > 0 && processor != nil { + index.Store(algorithm, processor) + } +} + +// Get Processor +func Get(algorithm string, params []*alg.Parameter) (alg.Processor, error) { + v, ok := index.Load(algorithm) + if !ok { + return nil, errors.Errorf("no processor registered with algorithm: %s", algorithm) + } + + if fac, ok := v.(alg.Factory); ok { + return fac(params), nil + } + + return nil, errors.Errorf("no valid processor registered for algorithm: %s", algorithm) +} diff --git a/src/pkg/retention/policy/alg/or/processor.go b/src/pkg/retention/policy/alg/or/processor.go new file mode 100644 index 000000000..623e4f050 --- /dev/null +++ b/src/pkg/retention/policy/alg/or/processor.go @@ -0,0 +1,216 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package or + +import ( + "sync" + + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/pkg/retention/policy/action" + "github.com/goharbor/harbor/src/pkg/retention/policy/alg" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/pkg/errors" +) + +// processor to handle the rules with OR mapping ways +type processor struct { + // keep evaluator and its related selector if existing + // attentions here, the selectors can be empty/nil, that means match all "**" + evaluators map[*rule.Evaluator][]res.Selector + // action performer + performers map[string]action.Performer +} + +// New processor +func New(parameters []*alg.Parameter) alg.Processor { + p := &processor{ + evaluators: make(map[*rule.Evaluator][]res.Selector), + performers: make(map[string]action.Performer), + } + + if len(parameters) > 0 { + for _, param := range parameters { + if param.Evaluator != nil { + if len(param.Selectors) > 0 { + p.evaluators[¶m.Evaluator] = param.Selectors + } + + if param.Performer != nil { + p.performers[param.Evaluator.Action()] = param.Performer + } + } + } + } + + return p +} + +// Process the candidates with the rules +func (p *processor) Process(artifacts []*res.Candidate) ([]*res.Result, error) { + if len(artifacts) == 0 { + log.Debug("no artifacts to retention") + return make([]*res.Result, 0), nil + } + + var ( + // collect errors by wrapping + err error + // collect processed candidates + processedCandidates = make(map[string]cHash) + ) + + // for sync + type chanItem struct { + action string + processed []*res.Candidate + } + + resChan := make(chan *chanItem, 1) + // handle error + errChan := make(chan error, 1) + // control chan + done := make(chan bool, 1) + + // go routine for receiving results/error + go func() { + defer func() { + // done + done <- true + }() + + for { + select { + case result := <-resChan: + if result == nil { + // chan is closed + return + } + + if _, ok := processedCandidates[result.action]; !ok { + processedCandidates[result.action] = make(cHash) + } + + listByAction := processedCandidates[result.action] + for _, rp := range result.processed { + // remove duplicated ones + listByAction[rp.Hash()] = rp + } + case e := <-errChan: + if err == nil { + err = errors.Wrap(e, "artifact processing error") + } else { + err = errors.Wrap(e, err.Error()) + } + } + } + }() + + wg := new(sync.WaitGroup) + wg.Add(len(p.evaluators)) + + for eva, selectors := range p.evaluators { + var evaluator = *eva + + go func(evaluator rule.Evaluator, selectors []res.Selector) { + var ( + processed []*res.Candidate + err error + ) + + defer func() { + wg.Done() + }() + + // init + // pass array copy to the selector + processed = append(processed, artifacts...) + + if len(selectors) > 0 { + // selecting artifacts one by one + // `&&` mappings + for _, s := range selectors { + if processed, err = s.Select(processed); err != nil { + errChan <- err + return + } + } + } + + if processed, err = evaluator.Process(processed); err != nil { + errChan <- err + return + } + + // Pass to the outside + resChan <- &chanItem{ + action: evaluator.Action(), + processed: processed, + } + }(evaluator, selectors) + } + + // waiting for all the rules are evaluated + wg.Wait() + // close result chan + close(resChan) + // check if the receiving loop exists + <-done + + if err != nil { + return nil, err + } + + results := make([]*res.Result, 0) + // Perform actions + for act, hash := range processedCandidates { + var attachedErr error + + cl := hash.toList() + + if pf, ok := p.performers[act]; ok { + if theRes, err := pf.Perform(cl); err != nil { + attachedErr = err + } else { + results = append(results, theRes...) + } + } else { + attachedErr = errors.Errorf("no performer added for action %s in OR processor", act) + } + + if attachedErr != nil { + for _, c := range cl { + results = append(results, &res.Result{ + Target: c, + Error: attachedErr, + }) + } + } + } + + return results, nil +} + +type cHash map[string]*res.Candidate + +func (ch cHash) toList() []*res.Candidate { + l := make([]*res.Candidate, 0) + + for _, v := range ch { + l = append(l, v) + } + + return l +} diff --git a/src/pkg/retention/policy/alg/or/processor_test.go b/src/pkg/retention/policy/alg/or/processor_test.go new file mode 100644 index 000000000..d37a602c9 --- /dev/null +++ b/src/pkg/retention/policy/alg/or/processor_test.go @@ -0,0 +1,176 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package or + +import ( + "errors" + "testing" + "time" + + "github.com/goharbor/harbor/src/pkg/retention/dep" + "github.com/goharbor/harbor/src/pkg/retention/policy/action" + "github.com/goharbor/harbor/src/pkg/retention/policy/alg" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule/always" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule/lastx" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule/latestps" + "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/goharbor/harbor/src/pkg/retention/res/selectors/doublestar" + "github.com/goharbor/harbor/src/pkg/retention/res/selectors/label" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +// ProcessorTestSuite is suite for testing processor +type ProcessorTestSuite struct { + suite.Suite + + all []*res.Candidate + + oldClient dep.Client +} + +// TestProcessor is entrance for ProcessorTestSuite +func TestProcessor(t *testing.T) { + suite.Run(t, new(ProcessorTestSuite)) +} + +// SetupSuite ... +func (suite *ProcessorTestSuite) SetupSuite() { + suite.all = []*res.Candidate{ + { + Namespace: "library", + Repository: "harbor", + Kind: "image", + Tag: "latest", + PushedTime: time.Now().Unix(), + Labels: []string{"L1", "L2"}, + }, + { + Namespace: "library", + Repository: "harbor", + Kind: "image", + Tag: "dev", + PushedTime: time.Now().Unix(), + Labels: []string{"L3"}, + }, + } + + suite.oldClient = dep.DefaultClient + dep.DefaultClient = &fakeRetentionClient{} +} + +// TearDownSuite ... +func (suite *ProcessorTestSuite) TearDownSuite() { + dep.DefaultClient = suite.oldClient +} + +// TestProcess tests process method +func (suite *ProcessorTestSuite) TestProcess() { + + perf := action.NewRetainAction(suite.all, false) + + params := make([]*alg.Parameter, 0) + lastxParams := make(map[string]rule.Parameter) + lastxParams[lastx.ParameterX] = 10 + params = append(params, &alg.Parameter{ + Evaluator: lastx.New(lastxParams), + Selectors: []res.Selector{ + doublestar.New(doublestar.Matches, "*dev*"), + label.New(label.With, "L1,L2"), + }, + Performer: perf, + }) + + latestKParams := make(map[string]rule.Parameter) + latestKParams[latestps.ParameterK] = 10 + params = append(params, &alg.Parameter{ + Evaluator: latestps.New(latestKParams), + Selectors: []res.Selector{ + label.New(label.With, "L3"), + }, + Performer: perf, + }) + + p := New(params) + + results, err := p.Process(suite.all) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 1, len(results)) + assert.Condition(suite.T(), func() bool { + for _, r := range results { + if r.Error != nil { + return false + } + } + + return true + }, "no errors in the returned result list") +} + +// TestProcess2 ... +func (suite *ProcessorTestSuite) TestProcess2() { + perf := action.NewRetainAction(suite.all, false) + + params := make([]*alg.Parameter, 0) + alwaysParams := make(map[string]rule.Parameter) + params = append(params, &alg.Parameter{ + Evaluator: always.New(alwaysParams), + Selectors: []res.Selector{ + doublestar.New(doublestar.Matches, "latest"), + label.New(label.With, ""), + }, + Performer: perf, + }) + + p := New(params) + + results, err := p.Process(suite.all) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 1, len(results)) + assert.Condition(suite.T(), func() bool { + found := false + for _, r := range results { + if r.Error != nil { + return false + } + + if r.Target.Tag == "dev" { + found = true + } + } + + return found + }, "no errors in the returned result list") + +} + +type fakeRetentionClient struct{} + +// GetCandidates ... +func (frc *fakeRetentionClient) GetCandidates(repo *res.Repository) ([]*res.Candidate, error) { + return nil, errors.New("not implemented") +} + +// Delete ... +func (frc *fakeRetentionClient) Delete(candidate *res.Candidate) error { + return nil +} + +// DeleteRepository ... +func (frc *fakeRetentionClient) DeleteRepository(repo *res.Repository) error { + panic("implement me") +} diff --git a/src/pkg/retention/policy/alg/processor.go b/src/pkg/retention/policy/alg/processor.go new file mode 100644 index 000000000..4f7103a5f --- /dev/null +++ b/src/pkg/retention/policy/alg/processor.go @@ -0,0 +1,52 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package alg + +import ( + "github.com/goharbor/harbor/src/pkg/retention/policy/action" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/goharbor/harbor/src/pkg/retention/res" +) + +// Processor processing the whole policy targeting a repository. +// Methods are defined to reflect the standard structure of the policy: +// list of rules with corresponding selectors plus an action performer. +type Processor interface { + // Process the artifact candidates + // + // Arguments: + // artifacts []*res.Candidate : process the retention candidates + // + // Returns: + // []*res.Result : the processed results + // error : common error object if any errors occurred + Process(artifacts []*res.Candidate) ([]*res.Result, error) +} + +// Parameter for constructing a processor +// Represents one rule +type Parameter struct { + // Evaluator for the rule + Evaluator rule.Evaluator + + // Selectors for the rule + Selectors []res.Selector + + // Performer for the rule evaluator + Performer action.Performer +} + +// Factory for creating processor +type Factory func([]*Parameter) Processor diff --git a/src/pkg/retention/policy/builder.go b/src/pkg/retention/policy/builder.go new file mode 100644 index 000000000..88443fb6b --- /dev/null +++ b/src/pkg/retention/policy/builder.go @@ -0,0 +1,102 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package policy + +import ( + "fmt" + + index4 "github.com/goharbor/harbor/src/pkg/retention/policy/action/index" + + index3 "github.com/goharbor/harbor/src/pkg/retention/policy/alg/index" + + index2 "github.com/goharbor/harbor/src/pkg/retention/res/selectors/index" + + "github.com/goharbor/harbor/src/pkg/retention/policy/rule/index" + + "github.com/goharbor/harbor/src/pkg/retention/policy/alg" + "github.com/goharbor/harbor/src/pkg/retention/policy/lwp" + "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/pkg/errors" +) + +// Builder builds the runnable processor from the raw policy +type Builder interface { + // Builds runnable processor + // + // Arguments: + // policy *Metadata : the simple metadata of retention policy + // isDryRun bool : indicate if we need to build a processor for dry run + // + // Returns: + // Processor : a processor implementation to process the candidates + // error : common error object if any errors occurred + Build(policy *lwp.Metadata, isDryRun bool) (alg.Processor, error) +} + +// NewBuilder news a basic builder +func NewBuilder(all []*res.Candidate) Builder { + return &basicBuilder{ + allCandidates: all, + } +} + +// basicBuilder is default implementation of Builder interface +type basicBuilder struct { + allCandidates []*res.Candidate +} + +// Build policy processor from the raw policy +func (bb *basicBuilder) Build(policy *lwp.Metadata, isDryRun bool) (alg.Processor, error) { + if policy == nil { + return nil, errors.New("nil policy to build processor") + } + + params := make([]*alg.Parameter, 0) + + for _, r := range policy.Rules { + evaluator, err := index.Get(r.Template, r.Parameters) + if err != nil { + return nil, err + } + + perf, err := index4.Get(r.Action, bb.allCandidates, isDryRun) + if err != nil { + return nil, errors.Wrap(err, "get action performer by metadata") + } + + sl := make([]res.Selector, 0) + for _, s := range r.TagSelectors { + sel, err := index2.Get(s.Kind, s.Decoration, s.Pattern) + if err != nil { + return nil, errors.Wrap(err, "get selector by metadata") + } + + sl = append(sl, sel) + } + + params = append(params, &alg.Parameter{ + Evaluator: evaluator, + Selectors: sl, + Performer: perf, + }) + } + + p, err := index3.Get(policy.Algorithm, params) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("get processor for algorithm: %s", policy.Algorithm)) + } + + return p, nil +} diff --git a/src/pkg/retention/policy/builder_test.go b/src/pkg/retention/policy/builder_test.go new file mode 100644 index 000000000..fb1f4271a --- /dev/null +++ b/src/pkg/retention/policy/builder_test.go @@ -0,0 +1,181 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package policy + +import ( + "testing" + "time" + + index3 "github.com/goharbor/harbor/src/pkg/retention/policy/action/index" + + index2 "github.com/goharbor/harbor/src/pkg/retention/policy/alg/index" + + "github.com/goharbor/harbor/src/pkg/retention/res/selectors/index" + + "github.com/goharbor/harbor/src/pkg/retention/dep" + + "github.com/pkg/errors" + + "github.com/goharbor/harbor/src/pkg/retention/policy/alg/or" + + "github.com/goharbor/harbor/src/pkg/retention/res/selectors/label" + + "github.com/goharbor/harbor/src/pkg/retention/res/selectors/doublestar" + + "github.com/goharbor/harbor/src/pkg/retention/policy/rule/latestps" + + "github.com/goharbor/harbor/src/pkg/retention/policy/action" + + "github.com/stretchr/testify/assert" + + "github.com/stretchr/testify/require" + + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + + "github.com/goharbor/harbor/src/pkg/retention/policy/lwp" + + "github.com/goharbor/harbor/src/pkg/retention/res" + + "github.com/stretchr/testify/suite" +) + +// TestBuilderSuite is the suite to test builder +type TestBuilderSuite struct { + suite.Suite + + all []*res.Candidate + oldClient dep.Client +} + +// TestBuilder is the entry of testing TestBuilderSuite +func TestBuilder(t *testing.T) { + suite.Run(t, new(TestBuilderSuite)) +} + +// SetupSuite prepares the testing content if needed +func (suite *TestBuilderSuite) SetupSuite() { + suite.all = []*res.Candidate{ + { + NamespaceID: 1, + Namespace: "library", + Repository: "harbor", + Kind: "image", + Tag: "latest", + PushedTime: time.Now().Unix(), + Labels: []string{"L1", "L2"}, + }, + { + NamespaceID: 1, + Namespace: "library", + Repository: "harbor", + Kind: "image", + Tag: "dev", + PushedTime: time.Now().Unix(), + Labels: []string{"L3"}, + }, + } + + index2.Register(index2.AlgorithmOR, or.New) + index.Register(doublestar.Kind, []string{ + doublestar.Matches, + doublestar.Excludes, + doublestar.RepoMatches, + doublestar.RepoExcludes, + doublestar.NSMatches, + doublestar.NSExcludes, + }, doublestar.New) + index.Register(label.Kind, []string{label.With, label.Without}, label.New) + index3.Register(action.Retain, action.NewRetainAction) + + suite.oldClient = dep.DefaultClient + dep.DefaultClient = &fakeRetentionClient{} +} + +// TearDownSuite ... +func (suite *TestBuilderSuite) TearDownSuite() { + dep.DefaultClient = suite.oldClient +} + +// TestBuild tests the Build function +func (suite *TestBuilderSuite) TestBuild() { + b := &basicBuilder{suite.all} + + params := make(rule.Parameters) + params[latestps.ParameterK] = 10 + + scopeSelectors := make(map[string][]*rule.Selector, 1) + scopeSelectors["repository"] = []*rule.Selector{{ + Kind: doublestar.Kind, + Decoration: doublestar.RepoMatches, + Pattern: "**", + }} + + lm := &lwp.Metadata{ + Algorithm: AlgorithmOR, + Rules: []*rule.Metadata{{ + ID: 1, + Priority: 999, + Action: action.Retain, + Template: latestps.TemplateID, + Parameters: params, + ScopeSelectors: scopeSelectors, + TagSelectors: []*rule.Selector{ + { + Kind: doublestar.Kind, + Decoration: doublestar.Matches, + Pattern: "latest", + }, + }, + }}, + } + + p, err := b.Build(lm, false) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), p) + + results, err := p.Process(suite.all) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 1, len(results)) + assert.Condition(suite.T(), func() (success bool) { + art := results[0] + success = art.Error == nil && + art.Target != nil && + art.Target.Repository == "harbor" && + art.Target.Tag == "dev" + + return + }) +} + +type fakeRetentionClient struct{} + +func (frc *fakeRetentionClient) DeleteRepository(repo *res.Repository) error { + panic("implement me") +} + +// GetCandidates ... +func (frc *fakeRetentionClient) GetCandidates(repo *res.Repository) ([]*res.Candidate, error) { + return nil, errors.New("not implemented") +} + +// Delete ... +func (frc *fakeRetentionClient) Delete(candidate *res.Candidate) error { + return nil +} + +// SubmitTask ... +func (frc *fakeRetentionClient) SubmitTask(taskID int64, repository *res.Repository, meta *lwp.Metadata) (string, error) { + return "", errors.New("not implemented") +} diff --git a/src/pkg/retention/policy/lwp/models.go b/src/pkg/retention/policy/lwp/models.go new file mode 100644 index 000000000..61d48efe5 --- /dev/null +++ b/src/pkg/retention/policy/lwp/models.go @@ -0,0 +1,53 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package lwp = lightweight policy +package lwp + +import ( + "encoding/json" + + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/pkg/errors" +) + +// Metadata contains partial metadata of policy +// It's a lightweight version of policy.Metadata +type Metadata struct { + // Algorithm applied to the rules + // "OR" / "AND" + Algorithm string `json:"algorithm"` + + // Rule collection + Rules []*rule.Metadata `json:"rules"` +} + +// ToJSON marshals metadata to JSON string +func (m *Metadata) ToJSON() (string, error) { + jsonData, err := json.Marshal(m) + if err != nil { + return "", errors.Wrap(err, "marshal repository") + } + + return string(jsonData), nil +} + +// FromJSON constructs the metadata from json data +func (m *Metadata) FromJSON(jsonData string) error { + if len(jsonData) == 0 { + return errors.New("empty json data to construct repository") + } + + return json.Unmarshal([]byte(jsonData), m) +} diff --git a/src/pkg/retention/policy/models.go b/src/pkg/retention/policy/models.go new file mode 100644 index 000000000..7fd48c205 --- /dev/null +++ b/src/pkg/retention/policy/models.go @@ -0,0 +1,98 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package policy + +import ( + "github.com/astaxie/beego/validation" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" +) + +const ( + // AlgorithmOR for OR algorithm + AlgorithmOR = "or" + + // TriggerKindSchedule Schedule + TriggerKindSchedule = "Schedule" + + // TriggerReferencesJobid job_id + TriggerReferencesJobid = "job_id" + // TriggerSettingsCron cron + TriggerSettingsCron = "cron" + + // ScopeLevelProject project + ScopeLevelProject = "project" +) + +// Metadata of policy +type Metadata struct { + // ID of the policy + ID int64 `json:"id"` + + // Algorithm applied to the rules + // "OR" / "AND" + Algorithm string `json:"algorithm" valid:"Required;Match(or)"` + + // Rule collection + Rules []rule.Metadata `json:"rules"` + + // Trigger about how to launch the policy + Trigger *Trigger `json:"trigger" valid:"Required"` + + // Which scope the policy will be applied to + Scope *Scope `json:"scope" valid:"Required"` + + // The max number of rules in a policy + Capacity int `json:"cap"` +} + +// Valid Valid +func (m *Metadata) Valid(v *validation.Validation) { + if m.Trigger != nil && m.Trigger.Kind == TriggerKindSchedule { + if m.Trigger.Settings == nil { + _ = v.SetError("Trigger.Settings", "Trigger.Settings is required") + } else { + if _, ok := m.Trigger.Settings[TriggerSettingsCron]; !ok { + _ = v.SetError("Trigger.Settings", "cron in Trigger.Settings is required") + } + } + + } +} + +// Trigger of the policy +type Trigger struct { + // Const string to declare the trigger type + // 'Schedule' + Kind string `json:"kind" valid:"Required"` + + // Settings for the specified trigger + // '[cron]="* 22 11 * * *"' for the 'Schedule' + Settings map[string]interface{} `json:"settings" valid:"Required"` + + // References of the trigger + // e.g: schedule job ID + References map[string]interface{} `json:"references"` +} + +// Scope definition +type Scope struct { + // Scope level declaration + // 'system', 'project' and 'repository' + Level string `json:"level" valid:"Required;Match(/^(project)$/)"` + + // The reference identity for the specified level + // 0 for 'system', project ID for 'project' and repo ID for 'repository' + Reference int64 `json:"ref" valid:"Required"` +} diff --git a/src/pkg/retention/policy/rule/always/evaluator.go b/src/pkg/retention/policy/rule/always/evaluator.go new file mode 100644 index 000000000..1cd4f4eb4 --- /dev/null +++ b/src/pkg/retention/policy/rule/always/evaluator.go @@ -0,0 +1,42 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package always + +import ( + "github.com/goharbor/harbor/src/pkg/retention/policy/action" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/goharbor/harbor/src/pkg/retention/res" +) + +const ( + // TemplateID of the always retain rule + TemplateID = "always" +) + +type evaluator struct{} + +// Process for the "always" Evaluator simply returns the input with no error +func (e *evaluator) Process(artifacts []*res.Candidate) ([]*res.Candidate, error) { + return artifacts, nil +} + +func (e *evaluator) Action() string { + return action.Retain +} + +// New returns an "always" Evaluator. It requires no parameters. +func New(_ rule.Parameters) rule.Evaluator { + return &evaluator{} +} diff --git a/src/pkg/retention/policy/rule/always/evaluator_test.go b/src/pkg/retention/policy/rule/always/evaluator_test.go new file mode 100644 index 000000000..9e7c53b77 --- /dev/null +++ b/src/pkg/retention/policy/rule/always/evaluator_test.go @@ -0,0 +1,49 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package always + +import ( + "testing" + + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type EvaluatorTestSuite struct { + suite.Suite +} + +func (e *EvaluatorTestSuite) TestNew() { + sut := New(rule.Parameters{}) + + require.NotNil(e.T(), sut) + require.IsType(e.T(), &evaluator{}, sut) +} + +func (e *EvaluatorTestSuite) TestProcess() { + sut := New(rule.Parameters{}) + input := []*res.Candidate{{PushedTime: 0}, {PushedTime: 1}, {PushedTime: 2}, {PushedTime: 3}} + + result, err := sut.Process(input) + + require.NoError(e.T(), err) + require.Len(e.T(), result, len(input)) +} + +func TestEvaluatorSuite(t *testing.T) { + suite.Run(t, &EvaluatorTestSuite{}) +} diff --git a/src/pkg/retention/policy/rule/dayspl/evaluator.go b/src/pkg/retention/policy/rule/dayspl/evaluator.go new file mode 100644 index 000000000..c0fd76256 --- /dev/null +++ b/src/pkg/retention/policy/rule/dayspl/evaluator.go @@ -0,0 +1,70 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dayspl + +import ( + "time" + + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/pkg/retention/policy/action" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/goharbor/harbor/src/pkg/retention/res" +) + +const ( + // TemplateID of the rule + TemplateID = "nDaysSinceLastPull" + + // ParameterN is the name of the metadata parameter for the N value + ParameterN = TemplateID + + // DefaultN is the default number of days that an artifact must have + // been pulled within to retain the tag or artifact. + DefaultN = 30 +) + +type evaluator struct { + n int +} + +func (e *evaluator) Process(artifacts []*res.Candidate) (result []*res.Candidate, err error) { + minPullTime := time.Now().UTC().Add(time.Duration(-1*24*e.n) * time.Hour).Unix() + for _, a := range artifacts { + if a.PulledTime >= minPullTime { + result = append(result, a) + } + } + + return +} + +func (e *evaluator) Action() string { + return action.Retain +} + +// New constructs a new 'Days Since Last Pull' evaluator +func New(params rule.Parameters) rule.Evaluator { + if params != nil { + if p, ok := params[ParameterN]; ok { + if v, ok := p.(float64); ok && v >= 0 { + return &evaluator{n: int(v)} + } + } + } + + log.Warningf("default parameter %d used for rule %s", DefaultN, TemplateID) + + return &evaluator{n: DefaultN} +} diff --git a/src/pkg/retention/policy/rule/dayspl/evaluator_test.go b/src/pkg/retention/policy/rule/dayspl/evaluator_test.go new file mode 100644 index 000000000..a8587ccd8 --- /dev/null +++ b/src/pkg/retention/policy/rule/dayspl/evaluator_test.go @@ -0,0 +1,104 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dayspl + +import ( + "fmt" + "testing" + "time" + + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type EvaluatorTestSuite struct { + suite.Suite +} + +func (e *EvaluatorTestSuite) TestNew() { + tests := []struct { + Name string + args rule.Parameters + expectedN int + }{ + {Name: "Valid", args: map[string]rule.Parameter{ParameterN: float64(5)}, expectedN: 5}, + {Name: "Default If Negative", args: map[string]rule.Parameter{ParameterN: float64(-1)}, expectedN: DefaultN}, + {Name: "Default If Not Set", args: map[string]rule.Parameter{}, expectedN: DefaultN}, + {Name: "Default If Wrong Type", args: map[string]rule.Parameter{ParameterN: "foo"}, expectedN: DefaultN}, + } + + for _, tt := range tests { + e.T().Run(tt.Name, func(t *testing.T) { + e := New(tt.args).(*evaluator) + + require.Equal(t, tt.expectedN, e.n) + }) + } +} + +func (e *EvaluatorTestSuite) TestProcess() { + now := time.Now().UTC() + data := []*res.Candidate{ + {PulledTime: daysAgo(now, 1)}, + {PulledTime: daysAgo(now, 2)}, + {PulledTime: daysAgo(now, 3)}, + {PulledTime: daysAgo(now, 4)}, + {PulledTime: daysAgo(now, 5)}, + {PulledTime: daysAgo(now, 10)}, + {PulledTime: daysAgo(now, 20)}, + {PulledTime: daysAgo(now, 30)}, + } + + tests := []struct { + n float64 + expected int + minPullTime int64 + }{ + {n: 0, expected: 0, minPullTime: 0}, + {n: 1, expected: 1, minPullTime: daysAgo(now, 1)}, + {n: 2, expected: 2, minPullTime: daysAgo(now, 2)}, + {n: 3, expected: 3, minPullTime: daysAgo(now, 3)}, + {n: 4, expected: 4, minPullTime: daysAgo(now, 4)}, + {n: 5, expected: 5, minPullTime: daysAgo(now, 5)}, + {n: 15, expected: 6, minPullTime: daysAgo(now, 10)}, + {n: 90, expected: 8, minPullTime: daysAgo(now, 30)}, + } + + for _, tt := range tests { + e.T().Run(fmt.Sprintf("%v", tt.n), func(t *testing.T) { + sut := New(map[string]rule.Parameter{ParameterN: tt.n}) + + result, err := sut.Process(data) + + require.NoError(t, err) + require.Len(t, result, tt.expected) + + for _, v := range result { + assert.False(t, v.PulledTime < tt.minPullTime) + } + }) + } +} + +func TestEvaluatorSuite(t *testing.T) { + suite.Run(t, &EvaluatorTestSuite{}) +} + +func daysAgo(from time.Time, n int) int64 { + return from.Add(time.Duration(-1*24*n) * time.Hour).Unix() +} diff --git a/src/pkg/retention/policy/rule/daysps/evaluator.go b/src/pkg/retention/policy/rule/daysps/evaluator.go new file mode 100644 index 000000000..ee4dd436d --- /dev/null +++ b/src/pkg/retention/policy/rule/daysps/evaluator.go @@ -0,0 +1,70 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package daysps + +import ( + "time" + + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/pkg/retention/policy/action" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/goharbor/harbor/src/pkg/retention/res" +) + +const ( + // TemplateID of the rule + TemplateID = "nDaysSinceLastPush" + + // ParameterN is the name of the metadata parameter for the N value + ParameterN = TemplateID + + // DefaultN is the default number of days that an artifact must have + // been pulled within to retain the tag or artifact. + DefaultN = 30 +) + +type evaluator struct { + n int +} + +func (e *evaluator) Process(artifacts []*res.Candidate) (result []*res.Candidate, err error) { + minPushTime := time.Now().UTC().Add(time.Duration(-1*24*e.n) * time.Hour).Unix() + for _, a := range artifacts { + if a.PushedTime >= minPushTime { + result = append(result, a) + } + } + + return +} + +func (e *evaluator) Action() string { + return action.Retain +} + +// New constructs a new 'Days Since Last Push' evaluator +func New(params rule.Parameters) rule.Evaluator { + if params != nil { + if p, ok := params[ParameterN]; ok { + if v, ok := p.(float64); ok && v >= 0 { + return &evaluator{n: int(v)} + } + } + } + + log.Warningf("default parameter %d used for rule %s", DefaultN, TemplateID) + + return &evaluator{n: DefaultN} +} diff --git a/src/pkg/retention/policy/rule/daysps/evaluator_test.go b/src/pkg/retention/policy/rule/daysps/evaluator_test.go new file mode 100644 index 000000000..75287ce4f --- /dev/null +++ b/src/pkg/retention/policy/rule/daysps/evaluator_test.go @@ -0,0 +1,104 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package daysps + +import ( + "fmt" + "testing" + "time" + + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type EvaluatorTestSuite struct { + suite.Suite +} + +func (e *EvaluatorTestSuite) TestNew() { + tests := []struct { + Name string + args rule.Parameters + expectedN int + }{ + {Name: "Valid", args: map[string]rule.Parameter{ParameterN: float64(5)}, expectedN: 5}, + {Name: "Default If Negative", args: map[string]rule.Parameter{ParameterN: float64(-1)}, expectedN: DefaultN}, + {Name: "Default If Not Set", args: map[string]rule.Parameter{}, expectedN: DefaultN}, + {Name: "Default If Wrong Type", args: map[string]rule.Parameter{ParameterN: "foo"}, expectedN: DefaultN}, + } + + for _, tt := range tests { + e.T().Run(tt.Name, func(t *testing.T) { + e := New(tt.args).(*evaluator) + + require.Equal(t, tt.expectedN, e.n) + }) + } +} + +func (e *EvaluatorTestSuite) TestProcess() { + now := time.Now().UTC() + data := []*res.Candidate{ + {PushedTime: daysAgo(now, 1)}, + {PushedTime: daysAgo(now, 2)}, + {PushedTime: daysAgo(now, 3)}, + {PushedTime: daysAgo(now, 4)}, + {PushedTime: daysAgo(now, 5)}, + {PushedTime: daysAgo(now, 10)}, + {PushedTime: daysAgo(now, 20)}, + {PushedTime: daysAgo(now, 30)}, + } + + tests := []struct { + n float64 + expected int + minPushTime int64 + }{ + {n: 0, expected: 0, minPushTime: 0}, + {n: 1, expected: 1, minPushTime: daysAgo(now, 1)}, + {n: 2, expected: 2, minPushTime: daysAgo(now, 2)}, + {n: 3, expected: 3, minPushTime: daysAgo(now, 3)}, + {n: 4, expected: 4, minPushTime: daysAgo(now, 4)}, + {n: 5, expected: 5, minPushTime: daysAgo(now, 5)}, + {n: 15, expected: 6, minPushTime: daysAgo(now, 10)}, + {n: 90, expected: 8, minPushTime: daysAgo(now, 30)}, + } + + for _, tt := range tests { + e.T().Run(fmt.Sprintf("%v", tt.n), func(t *testing.T) { + sut := New(map[string]rule.Parameter{ParameterN: tt.n}) + + result, err := sut.Process(data) + + require.NoError(t, err) + require.Len(t, result, tt.expected) + + for _, v := range result { + assert.False(t, v.PushedTime < tt.minPushTime) + } + }) + } +} + +func TestEvaluatorSuite(t *testing.T) { + suite.Run(t, &EvaluatorTestSuite{}) +} + +func daysAgo(from time.Time, n int) int64 { + return from.Add(time.Duration(-1*24*n) * time.Hour).Unix() +} diff --git a/src/pkg/retention/policy/rule/evaluator.go b/src/pkg/retention/policy/rule/evaluator.go new file mode 100644 index 000000000..91ec27913 --- /dev/null +++ b/src/pkg/retention/policy/rule/evaluator.go @@ -0,0 +1,36 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rule + +import "github.com/goharbor/harbor/src/pkg/retention/res" + +// Evaluator defines method of executing rule +type Evaluator interface { + // Filter the inputs and return the filtered outputs + // + // Arguments: + // artifacts []*res.Candidate : candidates for processing + // + // Returns: + // []*res.Candidate : matched candidates for next stage + // error : common error object if any errors occurred + Process(artifacts []*res.Candidate) ([]*res.Candidate, error) + + // Specify what action is performed to the candidates processed by this evaluator + Action() string +} + +// Factory defines a factory method for creating rule evaluator +type Factory func(parameters Parameters) Evaluator diff --git a/src/pkg/retention/policy/rule/index/index.go b/src/pkg/retention/policy/rule/index/index.go new file mode 100644 index 000000000..40a4cccc0 --- /dev/null +++ b/src/pkg/retention/policy/rule/index/index.go @@ -0,0 +1,227 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import ( + "sync" + + "github.com/goharbor/harbor/src/pkg/retention/policy/action" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule/always" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule/dayspl" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule/daysps" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule/lastx" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule/latestk" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule/latestpl" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule/latestps" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule/nothing" + "github.com/pkg/errors" +) + +// index for keeping the mapping between template ID and evaluator +var index sync.Map + +// Metadata defines metadata for rule registration +type Metadata struct { + TemplateID string `json:"rule_template"` + + // Action of the rule performs + // "retain" + Action string `json:"action"` + + Parameters []*IndexedParam `json:"params"` +} + +// IndexedParam declares the param info +type IndexedParam struct { + Name string `json:"name"` + + // Type of the param + // "int", "string" or "[]string" + Type string `json:"type"` + + Unit string `json:"unit"` + + Required bool `json:"required"` +} + +// indexedItem is the item saved in the sync map +type indexedItem struct { + Meta *Metadata + + Factory rule.Factory +} + +func init() { + // Register latest pushed + Register(&Metadata{ + TemplateID: latestps.TemplateID, + Action: action.Retain, + Parameters: []*IndexedParam{ + { + Name: latestps.ParameterK, + Type: "int", + Unit: "count", + Required: true, + }, + }, + }, latestps.New) + + // Register latest pulled + Register(&Metadata{ + TemplateID: latestpl.TemplateID, + Action: action.Retain, + Parameters: []*IndexedParam{ + { + Name: latestpl.ParameterN, + Type: "int", + Unit: "count", + Required: true, + }, + }, + }, latestpl.New) + + // Register latest active + Register(&Metadata{ + TemplateID: latestk.TemplateID, + Action: action.Retain, + Parameters: []*IndexedParam{ + { + Name: latestk.ParameterK, + Type: "int", + Unit: "count", + Required: true, + }, + }, + }, latestk.New) + + // Register lastx + Register(&Metadata{ + TemplateID: lastx.TemplateID, + Action: action.Retain, + Parameters: []*IndexedParam{ + { + Name: lastx.ParameterX, + Type: "int", + Unit: "days", + Required: true, + }, + }, + }, lastx.New) + + // Register nothing + Register(&Metadata{ + TemplateID: nothing.TemplateID, + Action: action.Retain, + Parameters: []*IndexedParam{}, + }, nothing.New) + + // Register always + Register(&Metadata{ + TemplateID: always.TemplateID, + Action: action.Retain, + Parameters: []*IndexedParam{}, + }, always.New) + + // Register dayspl + Register(&Metadata{ + TemplateID: dayspl.TemplateID, + Action: action.Retain, + Parameters: []*IndexedParam{ + { + Name: dayspl.ParameterN, + Type: "int", + Unit: "days", + Required: true, + }, + }, + }, dayspl.New) + + // Register daysps + Register(&Metadata{ + TemplateID: daysps.TemplateID, + Action: action.Retain, + Parameters: []*IndexedParam{ + { + Name: daysps.ParameterN, + Type: "int", + Unit: "days", + Required: true, + }, + }, + }, daysps.New) +} + +// Register the rule evaluator with the corresponding rule template +func Register(meta *Metadata, factory rule.Factory) { + if meta == nil || factory == nil || len(meta.TemplateID) == 0 { + // do nothing + return + } + + index.Store(meta.TemplateID, &indexedItem{ + Meta: meta, + Factory: factory, + }) +} + +// Get rule evaluator with the provided template ID +func Get(templateID string, parameters rule.Parameters) (rule.Evaluator, error) { + if len(templateID) == 0 { + return nil, errors.New("empty rule template ID") + } + + v, ok := index.Load(templateID) + if !ok { + return nil, errors.Errorf("rule evaluator %s is not registered", templateID) + } + + item := v.(*indexedItem) + + // We can check more things if we want to do in the future + if len(item.Meta.Parameters) > 0 { + for _, p := range item.Meta.Parameters { + if p.Required { + exists := parameters != nil + if exists { + _, exists = parameters[p.Name] + } + + if !exists { + return nil, errors.Errorf("missing required parameter %s for rule %s", p.Name, templateID) + } + } + } + } + factory := item.Factory + + return factory(parameters), nil +} + +// Index returns all the metadata of the registered rules +func Index() []*Metadata { + res := make([]*Metadata, 0) + + index.Range(func(k, v interface{}) bool { + if item, ok := v.(*indexedItem); ok { + res = append(res, item.Meta) + return true + } + + return false + }) + + return res +} diff --git a/src/pkg/retention/policy/rule/index/index_test.go b/src/pkg/retention/policy/rule/index/index_test.go new file mode 100644 index 000000000..b55d29f79 --- /dev/null +++ b/src/pkg/retention/policy/rule/index/index_test.go @@ -0,0 +1,122 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/stretchr/testify/require" + + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/goharbor/harbor/src/pkg/retention/res" + + "github.com/stretchr/testify/suite" +) + +// IndexTestSuite tests the rule index +type IndexTestSuite struct { + suite.Suite +} + +// TestIndexEntry is entry of IndexTestSuite +func TestIndexEntry(t *testing.T) { + suite.Run(t, new(IndexTestSuite)) +} + +// SetupSuite ... +func (suite *IndexTestSuite) SetupSuite() { + Register(&Metadata{ + TemplateID: "fakeEvaluator", + Action: "retain", + Parameters: []*IndexedParam{ + { + Name: "fakeParam", + Type: "int", + Unit: "count", + Required: true, + }, + }, + }, newFakeEvaluator) +} + +// TestRegister tests register +func (suite *IndexTestSuite) TestGet() { + + params := make(rule.Parameters) + params["fakeParam"] = 99 + evaluator, err := Get("fakeEvaluator", params) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), evaluator) + + candidates := []*res.Candidate{{ + Namespace: "library", + Repository: "harbor", + Kind: "image", + Tag: "latest", + PushedTime: time.Now().Unix(), + Labels: []string{"L1", "L2"}, + }} + + results, err := evaluator.Process(candidates) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 1, len(results)) + assert.Condition(suite.T(), func() bool { + c := results[0] + return c.Repository == "harbor" && c.Tag == "latest" + }) +} + +// TestIndex tests Index +func (suite *IndexTestSuite) TestIndex() { + metas := Index() + require.Equal(suite.T(), 9, len(metas)) + assert.Condition(suite.T(), func() bool { + for _, m := range metas { + if m.TemplateID == "fakeEvaluator" && + m.Action == "retain" && + len(m.Parameters) > 0 { + return true + } + } + return false + }, "check fake evaluator in index") +} + +type fakeEvaluator struct { + i int +} + +// Process rule +func (e *fakeEvaluator) Process(artifacts []*res.Candidate) ([]*res.Candidate, error) { + return artifacts, nil +} + +// Action of the rule +func (e *fakeEvaluator) Action() string { + return "retain" +} + +// newFakeEvaluator is the factory of fakeEvaluator +func newFakeEvaluator(parameters rule.Parameters) rule.Evaluator { + i := 10 + if v, ok := parameters["fakeParam"]; ok { + i = v.(int) + } + + return &fakeEvaluator{i} +} diff --git a/src/pkg/retention/policy/rule/lastx/evaluator.go b/src/pkg/retention/policy/rule/lastx/evaluator.go new file mode 100644 index 000000000..b466f5eda --- /dev/null +++ b/src/pkg/retention/policy/rule/lastx/evaluator.go @@ -0,0 +1,75 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lastx + +import ( + "time" + + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/pkg/retention/policy/action" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/goharbor/harbor/src/pkg/retention/res" +) + +const ( + // TemplateID of last x days rule + TemplateID = "lastXDays" + // ParameterX ... + ParameterX = TemplateID + // DefaultX defines the default X + DefaultX = 10 +) + +// evaluator for evaluating last x days +type evaluator struct { + // last x days + x int +} + +// Process the candidates based on the rule definition +func (e *evaluator) Process(artifacts []*res.Candidate) (retain []*res.Candidate, err error) { + cutoff := time.Now().Add(time.Duration(e.x*-24) * time.Hour) + for _, a := range artifacts { + if time.Unix(a.PushedTime, 0).UTC().After(cutoff) { + retain = append(retain, a) + } + } + + return +} + +// Specify what action is performed to the candidates processed by this evaluator +func (e *evaluator) Action() string { + return action.Retain +} + +// New a Evaluator +func New(params rule.Parameters) rule.Evaluator { + if params != nil { + if param, ok := params[ParameterX]; ok { + if v, ok := param.(float64); ok && v >= 0 { + return &evaluator{ + x: int(v), + } + } + } + } + + log.Warningf("default parameter %d used for rule %s", DefaultX, TemplateID) + + return &evaluator{ + x: DefaultX, + } +} diff --git a/src/pkg/retention/policy/rule/lastx/evaluator_test.go b/src/pkg/retention/policy/rule/lastx/evaluator_test.go new file mode 100644 index 000000000..becd79234 --- /dev/null +++ b/src/pkg/retention/policy/rule/lastx/evaluator_test.go @@ -0,0 +1,78 @@ +package lastx + +import ( + "fmt" + "testing" + "time" + + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type EvaluatorTestSuite struct { + suite.Suite +} + +func (e *EvaluatorTestSuite) TestNew() { + tests := []struct { + Name string + args rule.Parameters + expectedX int + }{ + {Name: "Valid", args: map[string]rule.Parameter{ParameterX: float64(3)}, expectedX: 3}, + {Name: "Default If Negative", args: map[string]rule.Parameter{ParameterX: float64(-3)}, expectedX: DefaultX}, + {Name: "Default If Not Set", args: map[string]rule.Parameter{}, expectedX: DefaultX}, + {Name: "Default If Wrong Type", args: map[string]rule.Parameter{}, expectedX: DefaultX}, + } + + for _, tt := range tests { + e.T().Run(tt.Name, func(t *testing.T) { + e := New(tt.args).(*evaluator) + + require.Equal(t, tt.expectedX, e.x) + }) + } +} + +func (e *EvaluatorTestSuite) TestProcess() { + now := time.Now().UTC() + data := []*res.Candidate{ + {PushedTime: now.Add(time.Duration(1*-24) * time.Hour).Unix()}, + {PushedTime: now.Add(time.Duration(2*-24) * time.Hour).Unix()}, + {PushedTime: now.Add(time.Duration(3*-24) * time.Hour).Unix()}, + {PushedTime: now.Add(time.Duration(4*-24) * time.Hour).Unix()}, + {PushedTime: now.Add(time.Duration(5*-24) * time.Hour).Unix()}, + {PushedTime: now.Add(time.Duration(99*-24) * time.Hour).Unix()}, + } + + tests := []struct { + days float64 + expected int + }{ + {days: 0, expected: 0}, + {days: 1, expected: 0}, + {days: 2, expected: 1}, + {days: 3, expected: 2}, + {days: 4, expected: 3}, + {days: 5, expected: 4}, + {days: 6, expected: 5}, + {days: 7, expected: 5}, + } + + for _, tt := range tests { + e.T().Run(fmt.Sprintf("%v days - should keep %d", tt.days, tt.expected), func(t *testing.T) { + e := New(rule.Parameters{ParameterX: tt.days}) + + result, err := e.Process(data) + + require.NoError(t, err) + require.Len(t, result, tt.expected) + }) + } +} + +func TestEvaluatorSuite(t *testing.T) { + suite.Run(t, &EvaluatorTestSuite{}) +} diff --git a/src/pkg/retention/policy/rule/latestk/evaluator.go b/src/pkg/retention/policy/rule/latestk/evaluator.go new file mode 100644 index 000000000..f6d73599a --- /dev/null +++ b/src/pkg/retention/policy/rule/latestk/evaluator.go @@ -0,0 +1,89 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package latestk + +import ( + "sort" + + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/pkg/retention/policy/action" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/goharbor/harbor/src/pkg/retention/res" +) + +const ( + // TemplateID of latest active k rule + TemplateID = "latestActiveK" + // ParameterK ... + ParameterK = TemplateID + // DefaultK defines the default K + DefaultK = 10 +) + +// evaluator for evaluating latest active k images +type evaluator struct { + // latest k + k int +} + +// Process the candidates based on the rule definition +func (e *evaluator) Process(artifacts []*res.Candidate) ([]*res.Candidate, error) { + // Sort artifacts by their "active time" + // + // Active time is defined as the selection of c.PulledTime or c.PushedTime, + // whichever is bigger, aka more recent. + sort.Slice(artifacts, func(i, j int) bool { + return activeTime(artifacts[i]) > activeTime(artifacts[j]) + }) + + i := e.k + if i > len(artifacts) { + i = len(artifacts) + } + + return artifacts[:i], nil +} + +// Specify what action is performed to the candidates processed by this evaluator +func (e *evaluator) Action() string { + return action.Retain +} + +// New a Evaluator +func New(params rule.Parameters) rule.Evaluator { + if params != nil { + if param, ok := params[ParameterK]; ok { + if v, ok := param.(float64); ok && v >= 0 { + return &evaluator{ + k: int(v), + } + } + } + } + + log.Debugf("default parameter %d used for rule %s", DefaultK, TemplateID) + + return &evaluator{ + k: DefaultK, + } +} + +func activeTime(c *res.Candidate) int64 { + if c.PulledTime > c.PushedTime { + return c.PulledTime + } + + return c.PushedTime +} diff --git a/src/pkg/retention/policy/rule/latestk/evaluator_test.go b/src/pkg/retention/policy/rule/latestk/evaluator_test.go new file mode 100644 index 000000000..24b04fb9e --- /dev/null +++ b/src/pkg/retention/policy/rule/latestk/evaluator_test.go @@ -0,0 +1,99 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package latestk + +import ( + "fmt" + "testing" + + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/stretchr/testify/suite" +) + +type EvaluatorTestSuite struct { + suite.Suite + + artifacts []*res.Candidate +} + +func (e *EvaluatorTestSuite) SetupSuite() { + e.artifacts = []*res.Candidate{ + {PulledTime: 1, PushedTime: 2}, + {PulledTime: 3, PushedTime: 4}, + {PulledTime: 6, PushedTime: 5}, + {PulledTime: 8, PushedTime: 7}, + {PulledTime: 9, PushedTime: 9}, + {PulledTime: 10, PushedTime: 10}, + {PulledTime: 0, PushedTime: 11}, + } +} + +func (e *EvaluatorTestSuite) TestProcess() { + tests := []struct { + k int + expected int + minActiveTime int64 + }{ + {k: 0, expected: 0}, + {k: 1, expected: 1, minActiveTime: 11}, + {k: 2, expected: 2, minActiveTime: 10}, + {k: 5, expected: 5, minActiveTime: 6}, + {k: 6, expected: 6, minActiveTime: 3}, + {k: 99, expected: len(e.artifacts)}, + } + for _, tt := range tests { + e.T().Run(fmt.Sprintf("%v", tt.k), func(t *testing.T) { + sut := &evaluator{k: tt.k} + + result, err := sut.Process(e.artifacts) + + require.NoError(t, err) + require.Len(t, result, tt.expected) + + for _, v := range result { + assert.True(t, activeTime(v) >= tt.minActiveTime) + } + }) + } +} + +func (e *EvaluatorTestSuite) TestNew() { + tests := []struct { + name string + params rule.Parameters + expectedK int + }{ + {name: "Valid", params: rule.Parameters{ParameterK: float64(5)}, expectedK: 5}, + {name: "Default If Negative", params: rule.Parameters{ParameterK: float64(-5)}, expectedK: DefaultK}, + {name: "Default If Wrong Type", params: rule.Parameters{ParameterK: "5"}, expectedK: DefaultK}, + {name: "Default If Wrong Key", params: rule.Parameters{"n": 5}, expectedK: DefaultK}, + {name: "Default If Empty", params: rule.Parameters{}, expectedK: DefaultK}, + } + for _, tt := range tests { + e.T().Run(tt.name, func(t *testing.T) { + sut := New(tt.params).(*evaluator) + + require.Equal(t, tt.expectedK, sut.k) + }) + } +} + +func TestEvaluatorSuite(t *testing.T) { + suite.Run(t, &EvaluatorTestSuite{}) +} diff --git a/src/pkg/retention/policy/rule/latestpl/evaluator.go b/src/pkg/retention/policy/rule/latestpl/evaluator.go new file mode 100644 index 000000000..bed7b6e4e --- /dev/null +++ b/src/pkg/retention/policy/rule/latestpl/evaluator.go @@ -0,0 +1,71 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package latestpl + +import ( + "sort" + + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/pkg/retention/policy/action" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/goharbor/harbor/src/pkg/retention/res" +) + +const ( + // TemplateID of the rule + TemplateID = "latestPulledN" + + // ParameterN is the name of the metadata parameter for the N value + ParameterN = TemplateID + + // DefaultN is the default number of tags to retain + DefaultN = 10 +) + +type evaluator struct { + n int +} + +func (e *evaluator) Process(artifacts []*res.Candidate) ([]*res.Candidate, error) { + sort.Slice(artifacts, func(i, j int) bool { + return artifacts[i].PulledTime > artifacts[j].PulledTime + }) + + i := e.n + if i > len(artifacts) { + i = len(artifacts) + } + + return artifacts[:i], nil +} + +func (e *evaluator) Action() string { + return action.Retain +} + +// New constructs an evaluator with the given parameters +func New(params rule.Parameters) rule.Evaluator { + if params != nil { + if p, ok := params[ParameterN]; ok { + if v, ok := p.(float64); ok && v >= 0 { + return &evaluator{n: int(v)} + } + } + } + + log.Warningf("default parameter %d used for rule %s", DefaultN, TemplateID) + + return &evaluator{n: DefaultN} +} diff --git a/src/pkg/retention/policy/rule/latestpl/evaluator_test.go b/src/pkg/retention/policy/rule/latestpl/evaluator_test.go new file mode 100644 index 000000000..69b0605f5 --- /dev/null +++ b/src/pkg/retention/policy/rule/latestpl/evaluator_test.go @@ -0,0 +1,89 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package latestpl + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type EvaluatorTestSuite struct { + suite.Suite +} + +func (e *EvaluatorTestSuite) TestNew() { + tests := []struct { + Name string + args rule.Parameters + expectedK int + }{ + {Name: "Valid", args: map[string]rule.Parameter{ParameterN: float64(5)}, expectedK: 5}, + {Name: "Default If Negative", args: map[string]rule.Parameter{ParameterN: float64(-1)}, expectedK: DefaultN}, + {Name: "Default If Not Set", args: map[string]rule.Parameter{}, expectedK: DefaultN}, + {Name: "Default If Wrong Type", args: map[string]rule.Parameter{ParameterN: "foo"}, expectedK: DefaultN}, + } + + for _, tt := range tests { + e.T().Run(tt.Name, func(t *testing.T) { + e := New(tt.args).(*evaluator) + + require.Equal(t, tt.expectedK, e.n) + }) + } +} + +func (e *EvaluatorTestSuite) TestProcess() { + data := []*res.Candidate{{PulledTime: 0}, {PulledTime: 1}, {PulledTime: 2}, {PulledTime: 3}, {PulledTime: 4}} + rand.Shuffle(len(data), func(i, j int) { + data[i], data[j] = data[j], data[i] + }) + + tests := []struct { + n float64 + expected int + minPullTime int64 + }{ + {n: 0, expected: 0, minPullTime: 0}, + {n: 1, expected: 1, minPullTime: 4}, + {n: 3, expected: 3, minPullTime: 2}, + {n: 5, expected: 5, minPullTime: 0}, + {n: 6, expected: 5, minPullTime: 0}, + } + + for _, tt := range tests { + e.T().Run(fmt.Sprintf("%v", tt.n), func(t *testing.T) { + ev := New(map[string]rule.Parameter{ParameterN: tt.n}) + + result, err := ev.Process(data) + + require.NoError(t, err) + require.Len(t, result, tt.expected) + + for _, v := range result { + require.False(e.T(), v.PulledTime < tt.minPullTime) + } + }) + } +} + +func TestEvaluatorSuite(t *testing.T) { + suite.Run(t, &EvaluatorTestSuite{}) +} diff --git a/src/pkg/retention/policy/rule/latestps/evaluator.go b/src/pkg/retention/policy/rule/latestps/evaluator.go new file mode 100644 index 000000000..ac000a302 --- /dev/null +++ b/src/pkg/retention/policy/rule/latestps/evaluator.go @@ -0,0 +1,78 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package latestps + +import ( + "sort" + + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/pkg/retention/policy/action" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/goharbor/harbor/src/pkg/retention/res" +) + +const ( + // TemplateID of latest k rule + TemplateID = "latestPushedK" + // ParameterK ... + ParameterK = TemplateID + // DefaultK defines the default K + DefaultK = 10 +) + +// evaluator for evaluating latest k tags +type evaluator struct { + // latest k + k int +} + +// Process the candidates based on the rule definition +func (e *evaluator) Process(artifacts []*res.Candidate) ([]*res.Candidate, error) { + // The updated proposal does not guarantee the order artifacts are provided, so we have to sort them first + sort.Slice(artifacts, func(i, j int) bool { + return artifacts[i].PushedTime < artifacts[j].PushedTime + }) + + i := e.k + if i > len(artifacts) { + i = len(artifacts) + } + + return artifacts[:i], nil +} + +// Specify what action is performed to the candidates processed by this evaluator +func (e *evaluator) Action() string { + return action.Retain +} + +// New a Evaluator +func New(params rule.Parameters) rule.Evaluator { + if params != nil { + if param, ok := params[ParameterK]; ok { + if v, ok := param.(float64); ok && v >= 0 { + return &evaluator{ + k: int(v), + } + } + } + } + + log.Warningf("default parameter %d used for rule %s", DefaultK, TemplateID) + + return &evaluator{ + k: DefaultK, + } +} diff --git a/src/pkg/retention/policy/rule/latestps/evaluator_test.go b/src/pkg/retention/policy/rule/latestps/evaluator_test.go new file mode 100644 index 000000000..6e303c3c4 --- /dev/null +++ b/src/pkg/retention/policy/rule/latestps/evaluator_test.go @@ -0,0 +1,71 @@ +package latestps + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/stretchr/testify/suite" + + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/stretchr/testify/require" +) + +type EvaluatorTestSuite struct { + suite.Suite +} + +func (e *EvaluatorTestSuite) TestNew() { + tests := []struct { + Name string + args rule.Parameters + expectedK int + }{ + {Name: "Valid", args: map[string]rule.Parameter{ParameterK: float64(5)}, expectedK: 5}, + {Name: "Default If Negative", args: map[string]rule.Parameter{ParameterK: float64(-1)}, expectedK: DefaultK}, + {Name: "Default If Not Set", args: map[string]rule.Parameter{}, expectedK: DefaultK}, + {Name: "Default If Wrong Type", args: map[string]rule.Parameter{ParameterK: "foo"}, expectedK: DefaultK}, + } + + for _, tt := range tests { + e.T().Run(tt.Name, func(t *testing.T) { + e := New(tt.args).(*evaluator) + + require.Equal(t, tt.expectedK, e.k) + }) + } +} + +func (e *EvaluatorTestSuite) TestProcess() { + data := []*res.Candidate{{PushedTime: 0}, {PushedTime: 1}, {PushedTime: 2}, {PushedTime: 3}, {PushedTime: 4}} + rand.Shuffle(len(data), func(i, j int) { + data[i], data[j] = data[j], data[i] + }) + + tests := []struct { + k float64 + expected int + }{ + {k: 0, expected: 0}, + {k: 1, expected: 1}, + {k: 3, expected: 3}, + {k: 5, expected: 5}, + {k: 6, expected: 5}, + } + + for _, tt := range tests { + e.T().Run(fmt.Sprintf("%v", tt.k), func(t *testing.T) { + e := New(map[string]rule.Parameter{ParameterK: tt.k}) + + result, err := e.Process(data) + + require.NoError(t, err) + require.Len(t, result, tt.expected) + }) + } +} + +func TestEvaluator(t *testing.T) { + suite.Run(t, &EvaluatorTestSuite{}) +} diff --git a/src/pkg/retention/policy/rule/models.go b/src/pkg/retention/policy/rule/models.go new file mode 100644 index 000000000..448b10183 --- /dev/null +++ b/src/pkg/retention/policy/rule/models.go @@ -0,0 +1,64 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rule + +// Metadata of the retention rule +type Metadata struct { + // UUID of rule + ID int `json:"id"` + + // Priority of rule when doing calculating + Priority int `json:"priority" valid:"Required"` + + // Disabled rule + Disabled bool `json:"disabled"` + + // Action of the rule performs + // "retain" + Action string `json:"action" valid:"Required"` + + // Template ID + Template string `json:"template" valid:"Required"` + + // The parameters of this rule + Parameters Parameters `json:"params"` + + // Selector attached to the rule for filtering tags + TagSelectors []*Selector `json:"tag_selectors" valid:"Required"` + + // Selector attached to the rule for filtering scope (e.g: repositories or namespaces) + ScopeSelectors map[string][]*Selector `json:"scope_selectors" valid:"Required"` +} + +// Selector to narrow down the list +type Selector struct { + // Kind of the selector + // "regularExpression" or "label" + Kind string `json:"kind" valid:"Required"` + + // Decorated the selector + // for "regularExpression" : "matches" and "excludes" + // for "label" : "with" and "without" + Decoration string `json:"decoration" valid:"Required"` + + // Param for the selector + Pattern string `json:"pattern" valid:"Required"` +} + +// Parameters of rule, indexed by the key +type Parameters map[string]Parameter + +// Parameter of rule +type Parameter interface{} diff --git a/src/pkg/retention/policy/rule/nothing/evaluator.go b/src/pkg/retention/policy/rule/nothing/evaluator.go new file mode 100644 index 000000000..8bc4b9063 --- /dev/null +++ b/src/pkg/retention/policy/rule/nothing/evaluator.go @@ -0,0 +1,42 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nothing + +import ( + "github.com/goharbor/harbor/src/pkg/retention/policy/action" + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/goharbor/harbor/src/pkg/retention/res" +) + +const ( + // TemplateID of the always retain rule + TemplateID = "nothing" +) + +type evaluator struct{} + +// Process for the "nothing" Evaluator simply returns the input with no error +func (e *evaluator) Process(artifacts []*res.Candidate) (processed []*res.Candidate, err error) { + return processed, err +} + +func (e *evaluator) Action() string { + return action.Retain +} + +// New returns an "always" Evaluator. It requires no parameters. +func New(_ rule.Parameters) rule.Evaluator { + return &evaluator{} +} diff --git a/src/pkg/retention/policy/rule/nothing/evaluator_test.go b/src/pkg/retention/policy/rule/nothing/evaluator_test.go new file mode 100644 index 000000000..1432db651 --- /dev/null +++ b/src/pkg/retention/policy/rule/nothing/evaluator_test.go @@ -0,0 +1,49 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nothing + +import ( + "testing" + + "github.com/goharbor/harbor/src/pkg/retention/policy/rule" + "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type EvaluatorTestSuite struct { + suite.Suite +} + +func (e *EvaluatorTestSuite) TestNew() { + sut := New(rule.Parameters{}) + + require.NotNil(e.T(), sut) + require.IsType(e.T(), &evaluator{}, sut) +} + +func (e *EvaluatorTestSuite) TestProcess() { + sut := New(rule.Parameters{}) + input := []*res.Candidate{{PushedTime: 0}, {PushedTime: 1}, {PushedTime: 2}, {PushedTime: 3}} + + result, err := sut.Process(input) + + require.NoError(e.T(), err) + require.Len(e.T(), result, 0) +} + +func TestEvaluatorSuite(t *testing.T) { + suite.Run(t, &EvaluatorTestSuite{}) +} diff --git a/src/pkg/retention/q/query.go b/src/pkg/retention/q/query.go new file mode 100644 index 000000000..bb2ba75d5 --- /dev/null +++ b/src/pkg/retention/q/query.go @@ -0,0 +1,29 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package q + +// Query parameters +type Query struct { + PageNumber int64 + PageSize int64 +} + +// TaskQuery parameters +type TaskQuery struct { + ExecutionID int64 + Status string + PageNumber int64 + PageSize int64 +} diff --git a/src/pkg/retention/res/candidate.go b/src/pkg/retention/res/candidate.go new file mode 100644 index 000000000..ff8c8d145 --- /dev/null +++ b/src/pkg/retention/res/candidate.go @@ -0,0 +1,90 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package res + +import ( + "encoding/base64" + "encoding/json" + "fmt" + + "github.com/pkg/errors" +) + +const ( + // Image kind + Image = "image" + // Chart kind + Chart = "chart" +) + +// Repository of candidate +type Repository struct { + // Namespace + Namespace string `json:"namespace"` + // Repository name + Name string `json:"name"` + // So far we need the kind of repository and retrieve candidates with different APIs + // TODO: REMOVE IT IN THE FUTURE IF WE SUPPORT UNIFIED ARTIFACT MODEL + Kind string `json:"kind"` +} + +// ToJSON marshals repository to JSON string +func (r *Repository) ToJSON() (string, error) { + jsonData, err := json.Marshal(r) + if err != nil { + return "", errors.Wrap(err, "marshal reporitory") + } + + return string(jsonData), nil +} + +// FromJSON constructs the repository from json data +func (r *Repository) FromJSON(jsonData string) error { + if len(jsonData) == 0 { + return errors.New("empty json data to construct repository") + } + + return json.Unmarshal([]byte(jsonData), r) +} + +// Candidate for retention processor to match +type Candidate struct { + // Namespace(project) ID + NamespaceID int64 + // Namespace + Namespace string + // Repository name + Repository string + // Kind of the candidate + // "image" or "chart" + Kind string + // Tag info + Tag string + // Pushed time in seconds + PushedTime int64 + // Pulled time in seconds + PulledTime int64 + // Created time in seconds + CreationTime int64 + // Labels attached with the candidate + Labels []string +} + +// Hash code based on the candidate info for differentiation +func (c *Candidate) Hash() string { + raw := fmt.Sprintf("%s:%s/%s:%s", c.Kind, c.Namespace, c.Repository, c.Tag) + + return base64.StdEncoding.EncodeToString([]byte(raw)) +} diff --git a/src/pkg/retention/res/result.go b/src/pkg/retention/res/result.go new file mode 100644 index 000000000..be91be04a --- /dev/null +++ b/src/pkg/retention/res/result.go @@ -0,0 +1,22 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package res + +// Result keeps the action result +type Result struct { + Target *Candidate `json:"target"` + // nil error means success + Error error `json:"error"` +} diff --git a/src/pkg/retention/res/selector.go b/src/pkg/retention/res/selector.go new file mode 100644 index 000000000..de0d34836 --- /dev/null +++ b/src/pkg/retention/res/selector.go @@ -0,0 +1,30 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package res + +// Selector is used to filter the inputting list +type Selector interface { + // Select the matched ones + // + // Arguments: + // artifacts []*Candidate : candidates for matching + // + // Returns: + // []*Candidate : matched candidates + Select(artifacts []*Candidate) ([]*Candidate, error) +} + +// SelectorFactory is factory method to return a selector implementation +type SelectorFactory func(decoration string, pattern string) Selector diff --git a/src/pkg/retention/res/selectors/doublestar/selector.go b/src/pkg/retention/res/selectors/doublestar/selector.go new file mode 100644 index 000000000..fcbb628b9 --- /dev/null +++ b/src/pkg/retention/res/selectors/doublestar/selector.go @@ -0,0 +1,102 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package doublestar + +import ( + "github.com/bmatcuk/doublestar" + "github.com/goharbor/harbor/src/pkg/retention/res" +) + +const ( + // Kind ... + Kind = "doublestar" + // Matches [pattern] for tag (default) + Matches = "matches" + // Excludes [pattern] for tag (default) + Excludes = "excludes" + // RepoMatches represents repository matches [pattern] + RepoMatches = "repoMatches" + // RepoExcludes represents repository excludes [pattern] + RepoExcludes = "repoExcludes" + // NSMatches represents namespace matches [pattern] + NSMatches = "nsMatches" + // NSExcludes represents namespace excludes [pattern] + NSExcludes = "nsExcludes" +) + +// selector for regular expression +type selector struct { + // Pre defined pattern declarator + // "matches", "excludes", "repoMatches" or "repoExcludes" + decoration string + // The pattern expression + pattern string +} + +// Select candidates by regular expressions +func (s *selector) Select(artifacts []*res.Candidate) (selected []*res.Candidate, err error) { + value := "" + excludes := false + + for _, art := range artifacts { + switch s.decoration { + case Matches: + value = art.Tag + case Excludes: + value = art.Tag + excludes = true + case RepoMatches: + value = art.Repository + case RepoExcludes: + value = art.Repository + excludes = true + case NSMatches: + value = art.Namespace + case NSExcludes: + value = art.Namespace + excludes = true + } + + if len(value) > 0 { + matched, err := match(s.pattern, value) + if err != nil { + // if error occurred, directly throw it out + return nil, err + } + + if (matched && !excludes) || (!matched && excludes) { + selected = append(selected, art) + } + } + } + + return selected, nil +} + +// New is factory method for doublestar selector +func New(decoration string, pattern string) res.Selector { + return &selector{ + decoration: decoration, + pattern: pattern, + } +} + +// match returns whether the str matches the pattern +func match(pattern, str string) (bool, error) { + if len(pattern) == 0 { + return true, nil + } + return doublestar.Match(pattern, str) +} diff --git a/src/pkg/retention/res/selectors/doublestar/selector_test.go b/src/pkg/retention/res/selectors/doublestar/selector_test.go new file mode 100644 index 000000000..23c8dd377 --- /dev/null +++ b/src/pkg/retention/res/selectors/doublestar/selector_test.go @@ -0,0 +1,252 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package doublestar + +import ( + "fmt" + "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "testing" + "time" +) + +// RegExpSelectorTestSuite is a suite for testing the label selector +type RegExpSelectorTestSuite struct { + suite.Suite + + artifacts []*res.Candidate +} + +// TestRegExpSelector is entrance for RegExpSelectorTestSuite +func TestRegExpSelector(t *testing.T) { + suite.Run(t, new(RegExpSelectorTestSuite)) +} + +// SetupSuite to do preparation work +func (suite *RegExpSelectorTestSuite) SetupSuite() { + suite.artifacts = []*res.Candidate{ + { + NamespaceID: 1, + Namespace: "library", + Repository: "harbor", + Tag: "latest", + Kind: res.Image, + PushedTime: time.Now().Unix() - 3600, + PulledTime: time.Now().Unix(), + CreationTime: time.Now().Unix() - 7200, + Labels: []string{"label1", "label2", "label3"}, + }, + { + NamespaceID: 2, + Namespace: "retention", + Repository: "redis", + Tag: "4.0", + Kind: res.Image, + PushedTime: time.Now().Unix() - 3600, + PulledTime: time.Now().Unix(), + CreationTime: time.Now().Unix() - 7200, + Labels: []string{"label1", "label4", "label5"}, + }, + { + NamespaceID: 2, + Namespace: "retention", + Repository: "redis", + Tag: "4.1", + Kind: res.Image, + PushedTime: time.Now().Unix() - 3600, + PulledTime: time.Now().Unix(), + CreationTime: time.Now().Unix() - 7200, + Labels: []string{"label1", "label4", "label5"}, + }, + } +} + +// TestTagMatches tests the tag `matches` case +func (suite *RegExpSelectorTestSuite) TestTagMatches() { + tagMatches := &selector{ + decoration: Matches, + pattern: "{latest,4.*}", + } + + selected, err := tagMatches.Select(suite.artifacts) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 3, len(selected)) + assert.Condition(suite.T(), func() bool { + return expect([]string{"harbor:latest", "redis:4.0", "redis:4.1"}, selected) + }) + + tagMatches2 := &selector{ + decoration: Matches, + pattern: "4.*", + } + + selected, err = tagMatches2.Select(suite.artifacts) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 2, len(selected)) + assert.Condition(suite.T(), func() bool { + return expect([]string{"redis:4.0", "redis:4.1"}, selected) + }) +} + +// TestTagExcludes tests the tag `excludes` case +func (suite *RegExpSelectorTestSuite) TestTagExcludes() { + tagExcludes := &selector{ + decoration: Excludes, + pattern: "{latest,4.*}", + } + + selected, err := tagExcludes.Select(suite.artifacts) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 0, len(selected)) + + tagExcludes2 := &selector{ + decoration: Excludes, + pattern: "4.*", + } + + selected, err = tagExcludes2.Select(suite.artifacts) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 1, len(selected)) + assert.Condition(suite.T(), func() bool { + return expect([]string{"harbor:latest"}, selected) + }) +} + +// TestRepoMatches tests the repository `matches` case +func (suite *RegExpSelectorTestSuite) TestRepoMatches() { + repoMatches := &selector{ + decoration: RepoMatches, + pattern: "{redis}", + } + + selected, err := repoMatches.Select(suite.artifacts) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 2, len(selected)) + assert.Condition(suite.T(), func() bool { + return expect([]string{"redis:4.0", "redis:4.1"}, selected) + }) + + repoMatches2 := &selector{ + decoration: RepoMatches, + pattern: "har*", + } + + selected, err = repoMatches2.Select(suite.artifacts) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 1, len(selected)) + assert.Condition(suite.T(), func() bool { + return expect([]string{"harbor:latest"}, selected) + }) +} + +// TestRepoExcludes tests the repository `excludes` case +func (suite *RegExpSelectorTestSuite) TestRepoExcludes() { + repoExcludes := &selector{ + decoration: RepoExcludes, + pattern: "{redis}", + } + + selected, err := repoExcludes.Select(suite.artifacts) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 1, len(selected)) + assert.Condition(suite.T(), func() bool { + return expect([]string{"harbor:latest"}, selected) + }) + + repoExcludes2 := &selector{ + decoration: RepoExcludes, + pattern: "har*", + } + + selected, err = repoExcludes2.Select(suite.artifacts) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 2, len(selected)) + assert.Condition(suite.T(), func() bool { + return expect([]string{"redis:4.0", "redis:4.1"}, selected) + }) +} + +// TestNSMatches tests the namespace `matches` case +func (suite *RegExpSelectorTestSuite) TestNSMatches() { + repoMatches := &selector{ + decoration: NSMatches, + pattern: "{library}", + } + + selected, err := repoMatches.Select(suite.artifacts) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 1, len(selected)) + assert.Condition(suite.T(), func() bool { + return expect([]string{"harbor:latest"}, selected) + }) + + repoMatches2 := &selector{ + decoration: RepoMatches, + pattern: "re*", + } + + selected, err = repoMatches2.Select(suite.artifacts) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 2, len(selected)) + assert.Condition(suite.T(), func() bool { + return expect([]string{"redis:4.0", "redis:4.1"}, selected) + }) +} + +// TestNSExcludes tests the namespace `excludes` case +func (suite *RegExpSelectorTestSuite) TestNSExcludes() { + repoExcludes := &selector{ + decoration: NSExcludes, + pattern: "{library}", + } + + selected, err := repoExcludes.Select(suite.artifacts) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 2, len(selected)) + assert.Condition(suite.T(), func() bool { + return expect([]string{"redis:4.0", "redis:4.1"}, selected) + }) + + repoExcludes2 := &selector{ + decoration: NSExcludes, + pattern: "re*", + } + + selected, err = repoExcludes2.Select(suite.artifacts) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 1, len(selected)) + assert.Condition(suite.T(), func() bool { + return expect([]string{"harbor:latest"}, selected) + }) +} + +// Check whether the returned result matched the expected ones (only check repo:tag) +func expect(expected []string, candidates []*res.Candidate) bool { + hash := make(map[string]bool) + + for _, art := range candidates { + hash[fmt.Sprintf("%s:%s", art.Repository, art.Tag)] = true + } + + for _, exp := range expected { + if _, ok := hash[exp]; !ok { + return ok + } + } + + return true +} diff --git a/src/pkg/retention/res/selectors/index/index.go b/src/pkg/retention/res/selectors/index/index.go new file mode 100644 index 000000000..fe00c4f4b --- /dev/null +++ b/src/pkg/retention/res/selectors/index/index.go @@ -0,0 +1,109 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import ( + "sync" + + "github.com/goharbor/harbor/src/pkg/retention/res/selectors/label" + + "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/goharbor/harbor/src/pkg/retention/res/selectors/doublestar" + "github.com/pkg/errors" +) + +func init() { + // Register doublestar selector + Register(doublestar.Kind, []string{ + doublestar.Matches, + doublestar.Excludes, + doublestar.RepoMatches, + doublestar.RepoExcludes, + doublestar.NSMatches, + doublestar.NSExcludes, + }, doublestar.New) + + // Register label selector + Register(label.Kind, []string{label.With, label.Without}, label.New) +} + +// index for keeping the mapping between selector meta and its implementation +var index sync.Map + +// IndexedMeta describes the indexed selector +type IndexedMeta struct { + Kind string `json:"kind"` + Decorations []string `json:"decorations"` +} + +// indexedItem defined item kept in the index +type indexedItem struct { + Meta *IndexedMeta + Factory res.SelectorFactory +} + +// Register the selector with the corresponding selector kind and decoration +func Register(kind string, decorations []string, factory res.SelectorFactory) { + if len(kind) == 0 || factory == nil { + // do nothing + return + } + + index.Store(kind, &indexedItem{ + Meta: &IndexedMeta{ + Kind: kind, + Decorations: decorations, + }, + Factory: factory, + }) +} + +// Get selector with the provided kind and decoration +func Get(kind, decoration, pattern string) (res.Selector, error) { + if len(kind) == 0 || len(decoration) == 0 { + return nil, errors.New("empty selector kind or decoration") + } + + v, ok := index.Load(kind) + if !ok { + return nil, errors.Errorf("selector %s is not registered", kind) + } + + item := v.(*indexedItem) + for _, dec := range item.Meta.Decorations { + if dec == decoration { + factory := item.Factory + return factory(decoration, pattern), nil + } + } + + return nil, errors.Errorf("decoration %s of selector %s is not supported", decoration, kind) +} + +// Index returns all the declarative selectors +func Index() []*IndexedMeta { + all := make([]*IndexedMeta, 0) + + index.Range(func(k, v interface{}) bool { + if item, ok := v.(*indexedItem); ok { + all = append(all, item.Meta) + return true + } + + return false + }) + + return all +} diff --git a/src/pkg/retention/res/selectors/label/selector.go b/src/pkg/retention/res/selectors/label/selector.go new file mode 100644 index 000000000..2fa788a5a --- /dev/null +++ b/src/pkg/retention/res/selectors/label/selector.go @@ -0,0 +1,86 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package label + +import ( + "strings" + + "github.com/goharbor/harbor/src/pkg/retention/res" +) + +const ( + // Kind ... + Kind = "label" + // With labels + With = "withLabels" + // Without labels + Without = "withoutLabels" +) + +// selector is for label selector +type selector struct { + // Pre defined pattern decorations + // "with" or "without" + decoration string + // Label list + labels []string +} + +// Select candidates by the labels +func (s *selector) Select(artifacts []*res.Candidate) (selected []*res.Candidate, err error) { + for _, art := range artifacts { + if isMatched(s.labels, art.Labels, s.decoration) { + selected = append(selected, art) + } + } + + return selected, nil +} + +// New is factory method for list selector +func New(decoration string, pattern string) res.Selector { + labels := make([]string, 0) + if len(pattern) > 0 { + labels = append(labels, strings.Split(pattern, ",")...) + } + + return &selector{ + decoration: decoration, + labels: labels, + } +} + +// Check if the resource labels match the pattern labels +func isMatched(patternLbls []string, resLbls []string, decoration string) bool { + hash := make(map[string]bool) + + for _, lbl := range resLbls { + hash[lbl] = true + } + + for _, lbl := range patternLbls { + _, exists := hash[lbl] + + if decoration == Without && exists { + return false + } + + if decoration == With && !exists { + return false + } + } + + return true +} diff --git a/src/pkg/retention/res/selectors/label/selector_test.go b/src/pkg/retention/res/selectors/label/selector_test.go new file mode 100644 index 000000000..6bf58118a --- /dev/null +++ b/src/pkg/retention/res/selectors/label/selector_test.go @@ -0,0 +1,148 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package label + +import ( + "fmt" + "github.com/goharbor/harbor/src/pkg/retention/res" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "testing" + "time" +) + +// LabelSelectorTestSuite is a suite for testing the label selector +type LabelSelectorTestSuite struct { + suite.Suite + + artifacts []*res.Candidate +} + +// TestLabelSelector is entrance for LabelSelectorTestSuite +func TestLabelSelector(t *testing.T) { + suite.Run(t, new(LabelSelectorTestSuite)) +} + +// SetupSuite to do preparation work +func (suite *LabelSelectorTestSuite) SetupSuite() { + suite.artifacts = []*res.Candidate{ + { + NamespaceID: 1, + Namespace: "library", + Repository: "harbor", + Tag: "1.9", + Kind: res.Image, + PushedTime: time.Now().Unix() - 3600, + PulledTime: time.Now().Unix(), + CreationTime: time.Now().Unix() - 7200, + Labels: []string{"label1", "label2", "label3"}, + }, + { + NamespaceID: 1, + Namespace: "library", + Repository: "harbor", + Tag: "dev", + Kind: res.Image, + PushedTime: time.Now().Unix() - 3600, + PulledTime: time.Now().Unix(), + CreationTime: time.Now().Unix() - 7200, + Labels: []string{"label1", "label4", "label5"}, + }, + } +} + +// TestWithLabelsUnMatched tests the selector of `with` labels but nothing matched +func (suite *LabelSelectorTestSuite) TestWithLabelsUnMatched() { + withNothing := &selector{ + decoration: With, + labels: []string{"label6"}, + } + + selected, err := withNothing.Select(suite.artifacts) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 0, len(selected)) +} + +// TestWithLabelsMatched tests the selector of `with` labels and matched something +func (suite *LabelSelectorTestSuite) TestWithLabelsMatched() { + with1 := &selector{ + decoration: With, + labels: []string{"label2"}, + } + + selected, err := with1.Select(suite.artifacts) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 1, len(selected)) + assert.Condition(suite.T(), func() bool { + return expect([]string{"harbor:1.9"}, selected) + }) + + with2 := &selector{ + decoration: With, + labels: []string{"label1"}, + } + + selected2, err := with2.Select(suite.artifacts) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 2, len(selected2)) + assert.Condition(suite.T(), func() bool { + return expect([]string{"harbor:1.9", "harbor:dev"}, selected2) + }) +} + +// TestWithoutExistingLabels tests the selector of `without` existing labels +func (suite *LabelSelectorTestSuite) TestWithoutExistingLabels() { + withoutExisting := &selector{ + decoration: Without, + labels: []string{"label1"}, + } + + selected, err := withoutExisting.Select(suite.artifacts) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 0, len(selected)) +} + +// TestWithoutNoneExistingLabels tests the selector of `without` non-existing labels +func (suite *LabelSelectorTestSuite) TestWithoutNoneExistingLabels() { + withoutNonExisting := &selector{ + decoration: Without, + labels: []string{"label6"}, + } + + selected, err := withoutNonExisting.Select(suite.artifacts) + require.NoError(suite.T(), err) + assert.Equal(suite.T(), 2, len(selected)) + assert.Condition(suite.T(), func() bool { + return expect([]string{"harbor:1.9", "harbor:dev"}, selected) + }) +} + +// Check whether the returned result matched the expected ones (only check repo:tag) +func expect(expected []string, candidates []*res.Candidate) bool { + hash := make(map[string]bool) + + for _, art := range candidates { + hash[fmt.Sprintf("%s:%s", art.Repository, art.Tag)] = true + } + + for _, exp := range expected { + if _, ok := hash[exp]; !ok { + return ok + } + } + + return true +} diff --git a/src/pkg/scan/vuln.go b/src/pkg/scan/vuln.go new file mode 100644 index 000000000..a4ccac027 --- /dev/null +++ b/src/pkg/scan/vuln.go @@ -0,0 +1,136 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scan + +import ( + "fmt" + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils/clair" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/config" + "reflect" +) + +// VulnerabilityItem represents a vulnerability reported by scanner +type VulnerabilityItem struct { + ID string `json:"id"` + Severity models.Severity `json:"severity"` + Pkg string `json:"package"` + Version string `json:"version"` + Description string `json:"description"` + Link string `json:"link"` + Fixed string `json:"fixedVersion,omitempty"` +} + +// VulnerabilityList is a list of vulnerabilities, which should be scanner-agnostic +type VulnerabilityList []VulnerabilityItem + +// ApplyWhitelist filters out the CVE defined in the whitelist in the parm. +// It returns the items that are filtered for the caller to track or log. +func (vl *VulnerabilityList) ApplyWhitelist(whitelist models.CVEWhitelist) VulnerabilityList { + filtered := VulnerabilityList{} + if whitelist.IsExpired() { + log.Info("The input whitelist is expired, skip filtering") + return filtered + } + s := whitelist.CVESet() + r := (*vl)[:0] + for _, v := range *vl { + if _, ok := s[v.ID]; ok { + log.Debugf("Filtered Vulnerability in whitelist, CVE ID: %s, severity: %s", v.ID, v.Severity) + filtered = append(filtered, v) + } else { + r = append(r, v) + } + } + val := reflect.ValueOf(vl) + val.Elem().SetLen(len(r)) + return filtered +} + +// Severity returns the highest severity of the vulnerabilities in the list +func (vl *VulnerabilityList) Severity() models.Severity { + s := models.SevNone + for _, v := range *vl { + if v.Severity > s { + s = v.Severity + } + } + return s +} + +// HasCVE returns whether the vulnerability list has the vulnerability with CVE ID in the parm +func (vl *VulnerabilityList) HasCVE(id string) bool { + for _, v := range *vl { + if v.ID == id { + return true + } + } + return false +} + +// VulnListFromClairResult transforms the returned value of Clair API to a VulnerabilityList +func VulnListFromClairResult(layerWithVuln *models.ClairLayerEnvelope) VulnerabilityList { + res := VulnerabilityList{} + if layerWithVuln == nil { + return res + } + l := layerWithVuln.Layer + if l == nil { + return res + } + features := l.Features + if features == nil { + return res + } + for _, f := range features { + vulnerabilities := f.Vulnerabilities + if vulnerabilities == nil { + continue + } + for _, v := range vulnerabilities { + vItem := VulnerabilityItem{ + ID: v.Name, + Pkg: f.Name, + Version: f.Version, + Severity: clair.ParseClairSev(v.Severity), + Fixed: v.FixedBy, + Link: v.Link, + Description: v.Description, + } + res = append(res, vItem) + } + } + return res +} + +// VulnListByDigest returns the VulnerabilityList based on the scan result of artifact with the digest in the parm +func VulnListByDigest(digest string) (VulnerabilityList, error) { + var res VulnerabilityList + overview, err := dao.GetImgScanOverview(digest) + if err != nil { + return res, err + } + if overview == nil || len(overview.DetailsKey) == 0 { + return res, fmt.Errorf("unable to get the scan result for digest: %s, the artifact is not scanned", digest) + } + c := clair.NewClient(config.ClairEndpoint(), nil) + clairRes, err := c.GetResult(overview.DetailsKey) + if err != nil { + return res, fmt.Errorf("failed to get scan result from Clair, error: %v", err) + } + return VulnListFromClairResult(clairRes), nil +} diff --git a/src/pkg/scan/vuln_test.go b/src/pkg/scan/vuln_test.go new file mode 100644 index 000000000..fce594cd9 --- /dev/null +++ b/src/pkg/scan/vuln_test.go @@ -0,0 +1,178 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scan + +import ( + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" + "github.com/stretchr/testify/assert" + "os" + "testing" +) + +var ( + past = int64(1561967574) + vulnList1 = VulnerabilityList{} + vulnList2 = VulnerabilityList{ + {ID: "CVE-2018-10754", + Severity: models.SevLow, + Pkg: "ncurses", + Version: "6.0+20161126-1+deb9u2", + }, + { + ID: "CVE-2018-6485", + Severity: models.SevHigh, + Pkg: "glibc", + Version: "2.24-11+deb9u4", + }, + } + whiteList1 = models.CVEWhitelist{ + ExpiresAt: &past, + Items: []models.CVEWhitelistItem{ + {CVEID: "CVE-2018-6485"}, + }, + } + whiteList2 = models.CVEWhitelist{ + Items: []models.CVEWhitelistItem{ + {CVEID: "CVE-2018-6485"}, + }, + } + whiteList3 = models.CVEWhitelist{ + Items: []models.CVEWhitelistItem{ + {CVEID: "CVE-2018-6485"}, + {CVEID: "CVE-2018-10754"}, + {CVEID: "CVE-2019-12817"}, + }, + } +) + +func TestMain(m *testing.M) { + dao.PrepareTestForPostgresSQL() + os.Exit(m.Run()) +} + +func TestVulnerabilityList_HasCVE(t *testing.T) { + cases := []struct { + input VulnerabilityList + cve string + result bool + }{ + { + input: vulnList1, + cve: "CVE-2018-10754", + result: false, + }, + { + input: vulnList2, + cve: "CVE-2018-10754", + result: true, + }, + } + for _, c := range cases { + assert.Equal(t, c.result, c.input.HasCVE(c.cve)) + } +} + +func TestVulnerabilityList_Severity(t *testing.T) { + cases := []struct { + input VulnerabilityList + expect models.Severity + }{ + { + input: vulnList1, + expect: models.SevNone, + }, { + input: vulnList2, + expect: models.SevHigh, + }, + } + for _, c := range cases { + assert.Equal(t, c.expect, c.input.Severity()) + } +} + +func TestVulnerabilityList_ApplyWhitelist(t *testing.T) { + cases := []struct { + vl VulnerabilityList + wl models.CVEWhitelist + expectFiltered VulnerabilityList + expectSev models.Severity + }{ + { + vl: vulnList2, + wl: whiteList1, + expectFiltered: VulnerabilityList{}, + expectSev: models.SevHigh, + }, + { + vl: vulnList2, + wl: whiteList2, + expectFiltered: VulnerabilityList{ + { + ID: "CVE-2018-6485", + Severity: models.SevHigh, + Pkg: "glibc", + Version: "2.24-11+deb9u4", + }, + }, + expectSev: models.SevLow, + }, + { + vl: vulnList1, + wl: whiteList3, + expectFiltered: VulnerabilityList{}, + expectSev: models.SevNone, + }, + { + vl: vulnList2, + wl: whiteList3, + expectFiltered: VulnerabilityList{ + {ID: "CVE-2018-10754", + Severity: models.SevLow, + Pkg: "ncurses", + Version: "6.0+20161126-1+deb9u2", + }, + { + ID: "CVE-2018-6485", + Severity: models.SevHigh, + Pkg: "glibc", + Version: "2.24-11+deb9u4", + }, + }, + expectSev: models.SevNone, + }, + } + for _, c := range cases { + filtered := c.vl.ApplyWhitelist(c.wl) + assert.Equal(t, c.expectFiltered, filtered) + assert.Equal(t, c.vl.Severity(), c.expectSev) + } +} + +func TestVulnListByDigest(t *testing.T) { + _, err := VulnListByDigest("notexist") + assert.NotNil(t, err) +} + +func TestVulnListFromClairResult(t *testing.T) { + l := VulnListFromClairResult(nil) + assert.Equal(t, VulnerabilityList{}, l) + lv := &models.ClairLayerEnvelope{ + Layer: nil, + Error: nil, + } + l2 := VulnListFromClairResult(lv) + assert.Equal(t, VulnerabilityList{}, l2) +} diff --git a/src/pkg/scan/whitelist/manager.go b/src/pkg/scan/whitelist/manager.go new file mode 100644 index 000000000..d582e3f10 --- /dev/null +++ b/src/pkg/scan/whitelist/manager.go @@ -0,0 +1,85 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package whitelist + +import ( + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/jobservice/logger" +) + +// Manager defines the interface of CVE whitelist manager, it support both system level and project level whitelists +type Manager interface { + // CreateEmpty creates empty whitelist for given project + CreateEmpty(projectID int64) error + // Set sets the whitelist for given project (create or update) + Set(projectID int64, list models.CVEWhitelist) error + // Get gets the whitelist for given project + Get(projectID int64) (*models.CVEWhitelist, error) + // SetSys sets system level whitelist + SetSys(list models.CVEWhitelist) error + // GetSys gets system level whitelist + GetSys() (*models.CVEWhitelist, error) +} + +type defaultManager struct{} + +// CreateEmpty creates empty whitelist for given project +func (d *defaultManager) CreateEmpty(projectID int64) error { + l := models.CVEWhitelist{ + ProjectID: projectID, + } + _, err := dao.CreateCVEWhitelist(l) + if err != nil { + logger.Errorf("Failed to create empty CVE whitelist for project: %d, error: %v", projectID, err) + } + return err +} + +// Set sets the whitelist for given project (create or update) +func (d *defaultManager) Set(projectID int64, list models.CVEWhitelist) error { + list.ProjectID = projectID + if err := Validate(list); err != nil { + return err + } + _, err := dao.UpdateCVEWhitelist(list) + return err +} + +// Get gets the whitelist for given project +func (d *defaultManager) Get(projectID int64) (*models.CVEWhitelist, error) { + wl, err := dao.GetCVEWhitelist(projectID) + if wl == nil && err == nil { + log.Debugf("No CVE whitelist found for project %d, returning empty list.", projectID) + return &models.CVEWhitelist{ProjectID: projectID, Items: []models.CVEWhitelistItem{}}, nil + } + return wl, err +} + +// SetSys sets the system level whitelist +func (d *defaultManager) SetSys(list models.CVEWhitelist) error { + return d.Set(0, list) +} + +// GetSys gets the system level whitelist +func (d *defaultManager) GetSys() (*models.CVEWhitelist, error) { + return d.Get(0) +} + +// NewDefaultManager return a new instance of defaultManager +func NewDefaultManager() Manager { + return &defaultManager{} +} diff --git a/src/pkg/scan/whitelist/manager_test.go b/src/pkg/scan/whitelist/manager_test.go new file mode 100644 index 000000000..8dbf6da37 --- /dev/null +++ b/src/pkg/scan/whitelist/manager_test.go @@ -0,0 +1,46 @@ +package whitelist + +import ( + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/stretchr/testify/assert" + "os" + "testing" +) + +func TestMain(m *testing.M) { + + // databases := []string{"mysql", "sqlite"} + databases := []string{"postgresql"} + for _, database := range databases { + log.Infof("run test cases for database: %s", database) + + result := 1 + switch database { + case "postgresql": + dao.PrepareTestForPostgresSQL() + default: + log.Fatalf("invalid database: %s", database) + } + + result = m.Run() + + if result != 0 { + os.Exit(result) + } + } +} + +func TestDefaultManager_CreateEmpty(t *testing.T) { + dm := NewDefaultManager() + assert.NoError(t, dm.CreateEmpty(99)) + assert.Error(t, dm.CreateEmpty(99)) +} + +func TestDefaultManager_Get(t *testing.T) { + dm := NewDefaultManager() + // return empty list + l, err := dm.Get(1234) + assert.Nil(t, err) + assert.Empty(t, l.Items) +} diff --git a/src/pkg/scan/whitelist/validator.go b/src/pkg/scan/whitelist/validator.go new file mode 100644 index 000000000..cef2a17df --- /dev/null +++ b/src/pkg/scan/whitelist/validator.go @@ -0,0 +1,60 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package whitelist + +import ( + "fmt" + "github.com/goharbor/harbor/src/common/models" + "regexp" +) + +type invalidErr struct { + msg string +} + +func (ie *invalidErr) Error() string { + return ie.msg +} + +// NewInvalidErr ... +func NewInvalidErr(s string) error { + return &invalidErr{ + msg: s, + } +} + +// IsInvalidErr checks if the error is an invalidErr +func IsInvalidErr(err error) bool { + _, ok := err.(*invalidErr) + return ok +} + +const cveIDPattern = `^CVE-\d{4}-\d+$` + +// Validate help validates the CVE whitelist, to ensure the CVE ID is valid and there's no duplication +func Validate(wl models.CVEWhitelist) error { + m := map[string]struct{}{} + re := regexp.MustCompile(cveIDPattern) + for _, it := range wl.Items { + if !re.MatchString(it.CVEID) { + return &invalidErr{fmt.Sprintf("invalid CVE ID: %s", it.CVEID)} + } + if _, ok := m[it.CVEID]; ok { + return &invalidErr{fmt.Sprintf("duplicate CVE ID in whitelist: %s", it.CVEID)} + } + m[it.CVEID] = struct{}{} + } + return nil +} diff --git a/src/pkg/scan/whitelist/validator_test.go b/src/pkg/scan/whitelist/validator_test.go new file mode 100644 index 000000000..e147d2364 --- /dev/null +++ b/src/pkg/scan/whitelist/validator_test.go @@ -0,0 +1,102 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package whitelist + +import ( + "fmt" + "github.com/goharbor/harbor/src/common/models" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestIsInvalidErr(t *testing.T) { + cases := []struct { + instance error + expect bool + }{ + { + instance: nil, + expect: false, + }, + { + instance: fmt.Errorf("whatever"), + expect: false, + }, + { + instance: NewInvalidErr("This is true"), + expect: true, + }, + } + + for n, c := range cases { + t.Logf("Executing TestIsInvalidErr case: %d\n", n) + assert.Equal(t, c.expect, IsInvalidErr(c.instance)) + } +} + +func TestValidate(t *testing.T) { + cases := []struct { + l models.CVEWhitelist + noError bool + }{ + { + l: models.CVEWhitelist{ + Items: nil, + }, + noError: true, + }, + { + l: models.CVEWhitelist{ + Items: []models.CVEWhitelistItem{}, + }, + noError: true, + }, + { + l: models.CVEWhitelist{ + Items: []models.CVEWhitelistItem{ + {CVEID: "breakit"}, + }, + }, + noError: false, + }, + { + l: models.CVEWhitelist{ + Items: []models.CVEWhitelistItem{ + {CVEID: "CVE-2014-456132"}, + {CVEID: "CVE-2014-7654321"}, + }, + }, + noError: true, + }, + { + l: models.CVEWhitelist{ + Items: []models.CVEWhitelistItem{ + {CVEID: "CVE-2014-456132"}, + {CVEID: "CVE-2014-456132"}, + {CVEID: "CVE-2014-7654321"}, + }, + }, + noError: false, + }, + } + for n, c := range cases { + t.Logf("Executing TestValidate case: %d\n", n) + e := Validate(c.l) + assert.Equal(t, c.noError, e == nil) + if e != nil { + assert.True(t, IsInvalidErr(e)) + } + } +} diff --git a/src/pkg/scheduler/dao/schedule.go b/src/pkg/scheduler/dao/schedule.go new file mode 100644 index 000000000..1728556e4 --- /dev/null +++ b/src/pkg/scheduler/dao/schedule.go @@ -0,0 +1,99 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "errors" + "fmt" + "time" + + "github.com/astaxie/beego/orm" + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/pkg/scheduler/model" +) + +// ScheduleDao defines the method that a schedule data access model should implement +type ScheduleDao interface { + Create(*model.Schedule) (int64, error) + Update(*model.Schedule, ...string) error + Delete(int64) error + Get(int64) (*model.Schedule, error) + List(...*model.ScheduleQuery) ([]*model.Schedule, error) +} + +// New returns an instance of the default schedule data access model implementation +func New() ScheduleDao { + return &scheduleDao{} +} + +type scheduleDao struct{} + +func (s *scheduleDao) Create(schedule *model.Schedule) (int64, error) { + if schedule == nil { + return 0, errors.New("nil schedule") + } + now := time.Now() + schedule.CreationTime = &now + schedule.UpdateTime = &now + return dao.GetOrmer().Insert(schedule) +} + +func (s *scheduleDao) Update(schedule *model.Schedule, cols ...string) error { + if schedule == nil { + return errors.New("nil schedule") + } + if schedule.ID <= 0 { + return fmt.Errorf("invalid ID: %d", schedule.ID) + } + now := time.Now() + schedule.UpdateTime = &now + _, err := dao.GetOrmer().Update(schedule, cols...) + return err +} + +func (s *scheduleDao) Delete(id int64) error { + _, err := dao.GetOrmer().Delete(&model.Schedule{ + ID: id, + }) + return err +} + +func (s *scheduleDao) Get(id int64) (*model.Schedule, error) { + schedule := &model.Schedule{ + ID: id, + } + if err := dao.GetOrmer().Read(schedule); err != nil { + if err == orm.ErrNoRows { + return nil, nil + } + return nil, err + } + return schedule, nil +} + +func (s *scheduleDao) List(query ...*model.ScheduleQuery) ([]*model.Schedule, error) { + qs := dao.GetOrmer().QueryTable(&model.Schedule{}) + if len(query) > 0 && query[0] != nil { + if len(query[0].JobID) > 0 { + qs = qs.Filter("JobID", query[0].JobID) + } + } + schedules := []*model.Schedule{} + _, err := qs.All(&schedules) + if err != nil { + return nil, err + } + return schedules, nil +} diff --git a/src/pkg/scheduler/dao/schedule_test.go b/src/pkg/scheduler/dao/schedule_test.go new file mode 100644 index 000000000..60acf4f23 --- /dev/null +++ b/src/pkg/scheduler/dao/schedule_test.go @@ -0,0 +1,122 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/pkg/scheduler/model" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +var schDao = &scheduleDao{} + +type scheduleTestSuite struct { + suite.Suite + scheduleID int64 +} + +func (s *scheduleTestSuite) SetupSuite() { + dao.PrepareTestForPostgresSQL() +} + +func (s *scheduleTestSuite) SetupTest() { + t := s.T() + id, err := schDao.Create(&model.Schedule{ + JobID: "1", + Status: "pending", + }) + require.Nil(t, err) + s.scheduleID = id +} +func (s *scheduleTestSuite) TearDownTest() { + // clear + dao.GetOrmer().Raw("delete from schedule").Exec() +} + +func (s *scheduleTestSuite) TestCreate() { + t := s.T() + // nil schedule + _, err := schDao.Create(nil) + require.NotNil(t, err) + + // pass + _, err = schDao.Create(&model.Schedule{ + JobID: "1", + }) + require.Nil(t, err) +} + +func (s *scheduleTestSuite) TestUpdate() { + t := s.T() + // nil schedule + err := schDao.Update(nil) + require.NotNil(t, err) + + // invalid ID + err = schDao.Update(&model.Schedule{}) + require.NotNil(t, err) + + // pass + err = schDao.Update(&model.Schedule{ + ID: s.scheduleID, + Status: "running", + }) + require.Nil(t, err) + schedule, err := schDao.Get(s.scheduleID) + require.Nil(t, err) + assert.Equal(t, "running", schedule.Status) +} + +func (s *scheduleTestSuite) TestDelete() { + t := s.T() + err := schDao.Delete(s.scheduleID) + require.Nil(t, err) + schedule, err := schDao.Get(s.scheduleID) + require.Nil(t, err) + assert.Nil(t, schedule) +} + +func (s *scheduleTestSuite) TestGet() { + t := s.T() + schedule, err := schDao.Get(s.scheduleID) + require.Nil(t, err) + assert.Equal(t, "pending", schedule.Status) +} + +func (s *scheduleTestSuite) TestList() { + t := s.T() + // nil query + schedules, err := schDao.List() + require.Nil(t, err) + require.Equal(t, 1, len(schedules)) + assert.Equal(t, s.scheduleID, schedules[0].ID) + + // query by job ID + schedules, err = schDao.List(&model.ScheduleQuery{ + JobID: "1", + }) + require.Nil(t, err) + require.Equal(t, 1, len(schedules)) + assert.Equal(t, s.scheduleID, schedules[0].ID) +} + +func TestScheduleDao(t *testing.T) { + suite.Run(t, &scheduleTestSuite{}) +} diff --git a/src/pkg/scheduler/hook/handler.go b/src/pkg/scheduler/hook/handler.go new file mode 100644 index 000000000..f176850fa --- /dev/null +++ b/src/pkg/scheduler/hook/handler.go @@ -0,0 +1,59 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hook + +import ( + "time" + + "github.com/goharbor/harbor/src/pkg/scheduler" + "github.com/goharbor/harbor/src/pkg/scheduler/model" +) + +// GlobalController is an instance of the default controller that can be used globally +var GlobalController = NewController() + +// Controller updates the scheduler job status or runs the callback function +type Controller interface { + UpdateStatus(scheduleID int64, status string) error + Run(callbackFuncName string, params interface{}) error +} + +// NewController returns an instance of the default controller +func NewController() Controller { + return &controller{ + manager: scheduler.GlobalManager, + } +} + +type controller struct { + manager scheduler.Manager +} + +func (c *controller) UpdateStatus(scheduleID int64, status string) error { + now := time.Now() + return c.manager.Update(&model.Schedule{ + ID: scheduleID, + Status: status, + UpdateTime: &now, + }, "Status", "UpdateTime") +} + +func (c *controller) Run(callbackFuncName string, params interface{}) error { + f, err := scheduler.GetCallbackFunc(callbackFuncName) + if err != nil { + return err + } + return f(params) +} diff --git a/src/pkg/scheduler/hook/handler_test.go b/src/pkg/scheduler/hook/handler_test.go new file mode 100644 index 000000000..99875ae5f --- /dev/null +++ b/src/pkg/scheduler/hook/handler_test.go @@ -0,0 +1,56 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hook + +import ( + "testing" + + "github.com/goharbor/harbor/src/pkg/scheduler" + "github.com/goharbor/harbor/src/pkg/scheduler/model" + htesting "github.com/goharbor/harbor/src/testing" + "github.com/stretchr/testify/require" +) + +var h = &controller{ + manager: &htesting.FakeSchedulerManager{}, +} + +func TestUpdateStatus(t *testing.T) { + // task not exist + err := h.UpdateStatus(1, "running") + require.NotNil(t, err) + + // pass + h.manager.(*htesting.FakeSchedulerManager).Schedules = []*model.Schedule{ + { + ID: 1, + Status: "", + }, + } + err = h.UpdateStatus(1, "running") + require.Nil(t, err) +} + +func TestRun(t *testing.T) { + // callback function not exist + err := h.Run("not-exist", nil) + require.NotNil(t, err) + + // pass + err = scheduler.Register("callback", func(interface{}) error { return nil }) + require.Nil(t, err) + err = h.Run("callback", nil) + require.Nil(t, err) +} diff --git a/src/pkg/scheduler/manager.go b/src/pkg/scheduler/manager.go new file mode 100644 index 000000000..735e89873 --- /dev/null +++ b/src/pkg/scheduler/manager.go @@ -0,0 +1,66 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scheduler + +import ( + "github.com/goharbor/harbor/src/pkg/scheduler/dao" + "github.com/goharbor/harbor/src/pkg/scheduler/model" +) + +var ( + // GlobalManager is an instance of the default manager that + // can be used globally + GlobalManager = NewManager() +) + +// Manager manages the schedule of the scheduler +type Manager interface { + Create(*model.Schedule) (int64, error) + Update(*model.Schedule, ...string) error + Delete(int64) error + Get(int64) (*model.Schedule, error) + List(...*model.ScheduleQuery) ([]*model.Schedule, error) +} + +// NewManager returns an instance of the default manager +func NewManager() Manager { + return &manager{ + scheduleDao: dao.New(), + } +} + +type manager struct { + scheduleDao dao.ScheduleDao +} + +func (m *manager) Create(schedule *model.Schedule) (int64, error) { + return m.scheduleDao.Create(schedule) +} + +func (m *manager) Update(schedule *model.Schedule, props ...string) error { + return m.scheduleDao.Update(schedule, props...) +} + +func (m *manager) Delete(id int64) error { + return m.scheduleDao.Delete(id) +} + +func (m *manager) List(query ...*model.ScheduleQuery) ([]*model.Schedule, error) { + return m.scheduleDao.List(query...) +} + +func (m *manager) Get(id int64) (*model.Schedule, error) { + return m.scheduleDao.Get(id) +} diff --git a/src/pkg/scheduler/manager_test.go b/src/pkg/scheduler/manager_test.go new file mode 100644 index 000000000..b9f59b358 --- /dev/null +++ b/src/pkg/scheduler/manager_test.go @@ -0,0 +1,110 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scheduler + +import ( + "testing" + + "github.com/goharbor/harbor/src/pkg/scheduler/model" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" +) + +var mgr *manager + +type fakeScheduleDao struct { + schedules []*model.Schedule + mock.Mock +} + +func (f *fakeScheduleDao) Create(*model.Schedule) (int64, error) { + f.Called() + return 1, nil +} +func (f *fakeScheduleDao) Update(*model.Schedule, ...string) error { + f.Called() + return nil +} +func (f *fakeScheduleDao) Delete(int64) error { + f.Called() + return nil +} +func (f *fakeScheduleDao) Get(int64) (*model.Schedule, error) { + f.Called() + return nil, nil +} +func (f *fakeScheduleDao) List(query ...*model.ScheduleQuery) ([]*model.Schedule, error) { + f.Called() + if len(query) == 0 || query[0] == nil { + return f.schedules, nil + } + result := []*model.Schedule{} + for _, sch := range f.schedules { + if sch.JobID == query[0].JobID { + result = append(result, sch) + } + } + return result, nil +} + +type managerTestSuite struct { + suite.Suite +} + +func (m *managerTestSuite) SetupTest() { + // recreate schedule manager + mgr = &manager{ + scheduleDao: &fakeScheduleDao{}, + } +} + +func (m *managerTestSuite) TestCreate() { + t := m.T() + mgr.scheduleDao.(*fakeScheduleDao).On("Create", mock.Anything) + mgr.Create(nil) + mgr.scheduleDao.(*fakeScheduleDao).AssertCalled(t, "Create") +} + +func (m *managerTestSuite) TestUpdate() { + t := m.T() + mgr.scheduleDao.(*fakeScheduleDao).On("Update", mock.Anything) + mgr.Update(nil) + mgr.scheduleDao.(*fakeScheduleDao).AssertCalled(t, "Update") +} + +func (m *managerTestSuite) TestDelete() { + t := m.T() + mgr.scheduleDao.(*fakeScheduleDao).On("Delete", mock.Anything) + mgr.Delete(1) + mgr.scheduleDao.(*fakeScheduleDao).AssertCalled(t, "Delete") +} + +func (m *managerTestSuite) TestGet() { + t := m.T() + mgr.scheduleDao.(*fakeScheduleDao).On("Get", mock.Anything) + mgr.Get(1) + mgr.scheduleDao.(*fakeScheduleDao).AssertCalled(t, "Get") +} + +func (m *managerTestSuite) TestList() { + t := m.T() + mgr.scheduleDao.(*fakeScheduleDao).On("List", mock.Anything) + mgr.List(nil) + mgr.scheduleDao.(*fakeScheduleDao).AssertCalled(t, "List") +} + +func TestManager(t *testing.T) { + suite.Run(t, &managerTestSuite{}) +} diff --git a/src/pkg/scheduler/model/schedule.go b/src/pkg/scheduler/model/schedule.go new file mode 100644 index 000000000..3cdbe0a68 --- /dev/null +++ b/src/pkg/scheduler/model/schedule.go @@ -0,0 +1,40 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "time" + + "github.com/astaxie/beego/orm" +) + +func init() { + orm.RegisterModel( + new(Schedule)) +} + +// Schedule is a record for a scheduler job +type Schedule struct { + ID int64 `orm:"pk;auto;column(id)" json:"id"` + JobID string `orm:"column(job_id)" json:"job_id"` + Status string `orm:"column(status)" json:"status"` + CreationTime *time.Time `orm:"column(creation_time)" json:"creation_time"` + UpdateTime *time.Time `orm:"column(update_time)" json:"update_time"` +} + +// ScheduleQuery is query for schedule +type ScheduleQuery struct { + JobID string +} diff --git a/src/pkg/scheduler/periodic_job.go b/src/pkg/scheduler/periodic_job.go new file mode 100644 index 000000000..f52f47af5 --- /dev/null +++ b/src/pkg/scheduler/periodic_job.go @@ -0,0 +1,54 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scheduler + +import ( + "encoding/json" + + "github.com/goharbor/harbor/src/jobservice/job" +) + +// const definitions +const ( + // the job name that used to register to Jobservice + JobNameScheduler = "SCHEDULER" +) + +// PeriodicJob is designed to generate hook event periodically +type PeriodicJob struct{} + +// MaxFails of the job +func (pj *PeriodicJob) MaxFails() uint { + return 3 +} + +// ShouldRetry indicates job can be retried if failed +func (pj *PeriodicJob) ShouldRetry() bool { + return true +} + +// Validate the parameters +func (pj *PeriodicJob) Validate(params job.Parameters) error { + return nil +} + +// Run the job +func (pj *PeriodicJob) Run(ctx job.Context, params job.Parameters) error { + data, err := json.Marshal(params) + if err != nil { + return err + } + return ctx.Checkin(string(data)) +} diff --git a/src/pkg/scheduler/scheduler.go b/src/pkg/scheduler/scheduler.go new file mode 100644 index 000000000..6fb7d7e87 --- /dev/null +++ b/src/pkg/scheduler/scheduler.go @@ -0,0 +1,208 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scheduler + +import ( + "encoding/json" + "fmt" + "net/http" + "sync" + "time" + + chttp "github.com/goharbor/harbor/src/common/http" + "github.com/goharbor/harbor/src/common/job" + "github.com/goharbor/harbor/src/common/job/models" + "github.com/goharbor/harbor/src/common/utils/log" + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/pkg/scheduler/model" + "github.com/pkg/errors" +) + +// const definitions +const ( + JobParamCallbackFunc = "callback_func" + JobParamCallbackFuncParams = "params" +) + +var ( + // GlobalScheduler is an instance of the default scheduler that + // can be used globally. Call Init() to initialize it first + GlobalScheduler Scheduler + registry = make(map[string]CallbackFunc) +) + +// CallbackFunc defines the function that the scheduler calls when triggered +type CallbackFunc func(interface{}) error + +// Scheduler provides the capability to run a periodic task, a callback function +// needs to be registered before using the scheduler +// The "params" is passed to the callback function specified by "callbackFuncName" +// as encoded json string, so the callback function must decode it before using +type Scheduler interface { + Schedule(cron string, callbackFuncName string, params interface{}) (int64, error) + UnSchedule(id int64) error +} + +// Register the callback function with name, and the function will be called +// by the scheduler when the scheduler is triggered +func Register(name string, callbackFunc CallbackFunc) error { + if len(name) == 0 { + return errors.New("empty name") + } + if callbackFunc == nil { + return errors.New("callback function is nil") + } + + _, exist := registry[name] + if exist { + return fmt.Errorf("callback function %s already exists", name) + } + registry[name] = callbackFunc + + return nil +} + +// GetCallbackFunc returns the registered callback function specified by the name +func GetCallbackFunc(name string) (CallbackFunc, error) { + f, exist := registry[name] + if !exist { + return nil, fmt.Errorf("callback function %s not found", name) + } + return f, nil +} + +func callbackFuncExist(name string) bool { + _, exist := registry[name] + return exist +} + +// Init the GlobalScheduler +func Init() { + GlobalScheduler = New(config.InternalCoreURL()) +} + +// New returns an instance of the default scheduler +func New(internalCoreURL string) Scheduler { + return &scheduler{ + internalCoreURL: internalCoreURL, + jobserviceClient: job.GlobalClient, + manager: GlobalManager, + } +} + +type scheduler struct { + sync.RWMutex + internalCoreURL string + manager Manager + jobserviceClient job.Client +} + +func (s *scheduler) Schedule(cron string, callbackFuncName string, params interface{}) (int64, error) { + if !callbackFuncExist(callbackFuncName) { + return 0, fmt.Errorf("callback function %s not found", callbackFuncName) + } + + // create schedule record + now := time.Now() + scheduleID, err := s.manager.Create(&model.Schedule{ + CreationTime: &now, + UpdateTime: &now, + }) + if err != nil { + return 0, err + } + // if got error in the following steps, delete the schedule record in database + defer func() { + if err != nil { + e := s.manager.Delete(scheduleID) + if e != nil { + log.Errorf("failed to delete the schedule %d: %v", scheduleID, e) + } + } + }() + log.Debugf("the schedule record %d created", scheduleID) + + // submit scheduler job to Jobservice + statusHookURL := fmt.Sprintf("%s/service/notifications/schedules/%d", s.internalCoreURL, scheduleID) + jd := &models.JobData{ + Name: JobNameScheduler, + Parameters: map[string]interface{}{ + JobParamCallbackFunc: callbackFuncName, + }, + Metadata: &models.JobMetadata{ + JobKind: job.JobKindPeriodic, + Cron: cron, + }, + StatusHook: statusHookURL, + } + if params != nil { + var paramsData []byte + paramsData, err = json.Marshal(params) + if err != nil { + return 0, err + } + jd.Parameters[JobParamCallbackFuncParams] = string(paramsData) + } + jobID, err := s.jobserviceClient.SubmitJob(jd) + if err != nil { + return 0, err + } + // if got error in the following steps, stop the scheduler job + defer func() { + if err != nil { + if e := s.jobserviceClient.PostAction(jobID, job.JobActionStop); e != nil { + log.Errorf("failed to stop the scheduler job %s: %v", jobID, e) + } + } + }() + log.Debugf("the scheduler job submitted to Jobservice, job ID: %s", jobID) + + // populate the job ID for the schedule + err = s.manager.Update(&model.Schedule{ + ID: scheduleID, + JobID: jobID, + }, "JobID") + if err != nil { + return 0, err + } + + return scheduleID, nil +} + +func (s *scheduler) UnSchedule(id int64) error { + schedule, err := s.manager.Get(id) + if err != nil { + return err + } + if schedule == nil { + log.Warningf("the schedule record %d not found", id) + return nil + } + if err = s.jobserviceClient.PostAction(schedule.JobID, job.JobActionStop); err != nil { + herr, ok := err.(*chttp.Error) + // if the job specified by jobID is not found in Jobservice, just delete + // the schedule record + if !ok || herr.Code != http.StatusNotFound { + return err + } + } + log.Debugf("the stop action for job %s submitted to the Jobservice", schedule.JobID) + if err = s.manager.Delete(schedule.ID); err != nil { + return err + } + log.Debugf("the schedule record %d deleted", schedule.ID) + + return nil +} diff --git a/src/pkg/scheduler/scheduler_test.go b/src/pkg/scheduler/scheduler_test.go new file mode 100644 index 000000000..de4bb2a2a --- /dev/null +++ b/src/pkg/scheduler/scheduler_test.go @@ -0,0 +1,115 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scheduler + +import ( + "testing" + + htesting "github.com/goharbor/harbor/src/testing" + "github.com/goharbor/harbor/src/testing/job" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +var sch *scheduler + +type schedulerTestSuite struct { + suite.Suite +} + +func (s *schedulerTestSuite) SetupTest() { + t := s.T() + // empty callback function registry before running every test case + // and register a new callback function named "callback" + registry = make(map[string]CallbackFunc) + err := Register("callback", func(interface{}) error { return nil }) + require.Nil(t, err) + + // recreate the scheduler object + sch = &scheduler{ + jobserviceClient: &job.MockJobClient{}, + manager: &htesting.FakeSchedulerManager{}, + } +} + +func (s *schedulerTestSuite) TestRegister() { + t := s.T() + var name string + var callbackFun CallbackFunc + + // empty name + err := Register(name, callbackFun) + require.NotNil(t, err) + + // nil callback function + name = "test" + err = Register(name, callbackFun) + require.NotNil(t, err) + + // pass + callbackFun = func(interface{}) error { return nil } + err = Register(name, callbackFun) + require.Nil(t, err) + + // duplicate name + err = Register(name, callbackFun) + require.NotNil(t, err) +} + +func (s *schedulerTestSuite) TestGetCallbackFunc() { + t := s.T() + // not exist + _, err := GetCallbackFunc("not-exist") + require.NotNil(t, err) + + // pass + f, err := GetCallbackFunc("callback") + require.Nil(t, err) + assert.NotNil(t, f) +} + +func (s *schedulerTestSuite) TestSchedule() { + t := s.T() + + // callback function not exist + _, err := sch.Schedule("0 * * * * *", "not-exist", nil) + require.NotNil(t, err) + + // pass + id, err := sch.Schedule("0 * * * * *", "callback", nil) + require.Nil(t, err) + assert.Equal(t, int64(1), id) +} + +func (s *schedulerTestSuite) TestUnSchedule() { + t := s.T() + // schedule not exist + err := sch.UnSchedule(1) + require.NotNil(t, err) + + // schedule exist + id, err := sch.Schedule("0 * * * * *", "callback", nil) + require.Nil(t, err) + assert.Equal(t, int64(1), id) + + err = sch.UnSchedule(id) + require.Nil(t, err) +} + +func TestScheduler(t *testing.T) { + s := &schedulerTestSuite{} + suite.Run(t, s) +} diff --git a/src/pkg/types/format.go b/src/pkg/types/format.go new file mode 100644 index 000000000..cc97f0764 --- /dev/null +++ b/src/pkg/types/format.go @@ -0,0 +1,40 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" +) + +var ( + resourceValueFormats = map[ResourceName]func(int64) string{ + ResourceStorage: byteCountToDisplaySize, + } +) + +func byteCountToDisplaySize(value int64) string { + const unit = 1024 + if value < unit { + return fmt.Sprintf("%d B", value) + } + + div, exp := int64(unit), 0 + for n := value / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + + return fmt.Sprintf("%.1f %ciB", float64(value)/float64(div), "KMGTPE"[exp]) +} diff --git a/src/pkg/types/format_test.go b/src/pkg/types/format_test.go new file mode 100644 index 000000000..19f6607eb --- /dev/null +++ b/src/pkg/types/format_test.go @@ -0,0 +1,45 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import "testing" + +func Test_byteCountToDisplaySize(t *testing.T) { + type args struct { + value int64 + } + tests := []struct { + name string + args args + want string + }{ + {"100 B", args{100}, "100 B"}, + {"1.0 KiB", args{1024}, "1.0 KiB"}, + {"1.5 KiB", args{1024 * 3 / 2}, "1.5 KiB"}, + {"1.0 MiB", args{1024 * 1024}, "1.0 MiB"}, + {"1.5 MiB", args{1024 * 1024 * 3 / 2}, "1.5 MiB"}, + {"1.0 GiB", args{1024 * 1024 * 1024}, "1.0 GiB"}, + {"1.5 GiB", args{1024 * 1024 * 1024 * 3 / 2}, "1.5 GiB"}, + {"1.0 TiB", args{1024 * 1024 * 1024 * 1024}, "1.0 TiB"}, + {"1.5 TiB", args{1024 * 1024 * 1024 * 1024 * 3 / 2}, "1.5 TiB"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := byteCountToDisplaySize(tt.args.value); got != tt.want { + t.Errorf("byteCountToDisplaySize() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/src/pkg/types/resources.go b/src/pkg/types/resources.go new file mode 100644 index 000000000..95a98fdff --- /dev/null +++ b/src/pkg/types/resources.go @@ -0,0 +1,137 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "encoding/json" + "strconv" +) + +const ( + // UNLIMITED unlimited resource value + UNLIMITED = -1 + + // ResourceCount count, in number + ResourceCount ResourceName = "count" + // ResourceStorage storage size, in bytes + ResourceStorage ResourceName = "storage" +) + +// ResourceName is the name identifying various resources in a ResourceList. +type ResourceName string + +// FormatValue returns string for the resource value +func (resource ResourceName) FormatValue(value int64) string { + format, ok := resourceValueFormats[resource] + if ok { + return format(value) + } + + return strconv.FormatInt(value, 10) +} + +// ResourceList is a set of (resource name, value) pairs. +type ResourceList map[ResourceName]int64 + +func (resources ResourceList) String() string { + bytes, _ := json.Marshal(resources) + return string(bytes) +} + +// NewResourceList returns resource list from string +func NewResourceList(s string) (ResourceList, error) { + var resources ResourceList + if err := json.Unmarshal([]byte(s), &resources); err != nil { + return nil, err + } + + return resources, nil +} + +// Equals returns true if the two lists are equivalent +func Equals(a ResourceList, b ResourceList) bool { + if len(a) != len(b) { + return false + } + + for key, value1 := range a { + value2, found := b[key] + if !found { + return false + } + if value1 != value2 { + return false + } + } + + return true +} + +// Add returns the result of a + b for each named resource +func Add(a ResourceList, b ResourceList) ResourceList { + result := ResourceList{} + for key, value := range a { + if other, found := b[key]; found { + value = value + other + } + result[key] = value + } + + for key, value := range b { + if _, found := result[key]; !found { + result[key] = value + } + } + return result +} + +// Subtract returns the result of a - b for each named resource +func Subtract(a ResourceList, b ResourceList) ResourceList { + result := ResourceList{} + for key, value := range a { + if other, found := b[key]; found { + value = value - other + } + result[key] = value + } + + for key, value := range b { + if _, found := result[key]; !found { + result[key] = -value + } + } + + return result +} + +// Zero returns the result of a - a for each named resource +func Zero(a ResourceList) ResourceList { + result := ResourceList{} + for key := range a { + result[key] = 0 + } + return result +} + +// IsNegative returns the set of resource names that have a negative value. +func IsNegative(a ResourceList) []ResourceName { + results := []ResourceName{} + for k, v := range a { + if v < 0 { + results = append(results, k) + } + } + return results +} diff --git a/src/pkg/types/resources_test.go b/src/pkg/types/resources_test.go new file mode 100644 index 000000000..c912c5f3e --- /dev/null +++ b/src/pkg/types/resources_test.go @@ -0,0 +1,86 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "testing" + + "github.com/stretchr/testify/suite" +) + +type ResourcesSuite struct { + suite.Suite +} + +func (suite *ResourcesSuite) TestNewResourceList() { + res1, err1 := NewResourceList("") + suite.Error(err1) + suite.Nil(res1) + suite.Equal(0, len(res1)) + + res2, err2 := NewResourceList("{}") + suite.Nil(err2) + suite.NotNil(res2) +} + +func (suite *ResourcesSuite) TestEquals() { + suite.True(Equals(ResourceList{}, ResourceList{})) + suite.True(Equals(ResourceList{ResourceStorage: 100}, ResourceList{ResourceStorage: 100})) + suite.False(Equals(ResourceList{ResourceStorage: 100}, ResourceList{ResourceStorage: 200})) + suite.False(Equals(ResourceList{ResourceStorage: 100}, ResourceList{ResourceStorage: 100, ResourceCount: 10})) + suite.False(Equals(ResourceList{ResourceStorage: 100, ResourceCount: 10}, ResourceList{ResourceStorage: 100})) +} + +func (suite *ResourcesSuite) TestAdd() { + res1 := ResourceList{ResourceStorage: 100} + res2 := ResourceList{ResourceStorage: 100} + res3 := ResourceList{ResourceStorage: 100, ResourceCount: 10} + res4 := ResourceList{ResourceCount: 10} + + suite.Equal(res1, Add(ResourceList{}, res1)) + suite.Equal(ResourceList{ResourceStorage: 200}, Add(res1, res2)) + suite.Equal(ResourceList{ResourceStorage: 200, ResourceCount: 10}, Add(res1, res3)) + suite.Equal(ResourceList{ResourceStorage: 100, ResourceCount: 10}, Add(res1, res4)) +} + +func (suite *ResourcesSuite) TestSubtract() { + res1 := ResourceList{ResourceStorage: 100} + res2 := ResourceList{ResourceStorage: 100} + res3 := ResourceList{ResourceStorage: 100, ResourceCount: 10} + res4 := ResourceList{ResourceCount: 10} + + suite.Equal(res1, Subtract(res1, ResourceList{})) + suite.Equal(ResourceList{ResourceStorage: 0}, Subtract(res1, res2)) + suite.Equal(ResourceList{ResourceStorage: 0, ResourceCount: -10}, Subtract(res1, res3)) + suite.Equal(ResourceList{ResourceStorage: 100, ResourceCount: -10}, Subtract(res1, res4)) +} + +func (suite *ResourcesSuite) TestZero() { + res1 := ResourceList{ResourceStorage: 100} + res2 := ResourceList{ResourceCount: 10, ResourceStorage: 100} + + suite.Equal(ResourceList{}, Zero(ResourceList{})) + suite.Equal(ResourceList{ResourceStorage: 0}, Zero(res1)) + suite.Equal(ResourceList{ResourceStorage: 0, ResourceCount: 0}, Zero(res2)) +} + +func (suite *ResourcesSuite) TestIsNegative() { + suite.EqualValues([]ResourceName{ResourceStorage}, IsNegative(ResourceList{ResourceStorage: -100, ResourceCount: 100})) + suite.EqualValues([]ResourceName{ResourceStorage, ResourceCount}, IsNegative(ResourceList{ResourceStorage: -100, ResourceCount: -100})) +} + +func TestRunResourcesSuite(t *testing.T) { + suite.Run(t, new(ResourcesSuite)) +} diff --git a/src/portal/lib/ng-package.json b/src/portal/lib/ng-package.json index 053ada8db..89852ea86 100644 --- a/src/portal/lib/ng-package.json +++ b/src/portal/lib/ng-package.json @@ -8,6 +8,13 @@ "@ngx-translate/core": "ngx-translate-core", "@ngx-translate/core/index": "ngx-translate-core", "ngx-markdown": "ngx-markdown" - } + }, + "umdModuleIds": { + "@clr/angular" : "angular", + "ngx-markdown" : "ngxMarkdown", + "@ngx-translate/http-loader" : "httpLoader", + "ngx-cookie" : "ngxCookie", + "@ngx-translate/core" : "core$1" + } } } \ No newline at end of file diff --git a/src/portal/lib/ng-package.prod.json b/src/portal/lib/ng-package.prod.json index 464fcabd4..85a87a50d 100644 --- a/src/portal/lib/ng-package.prod.json +++ b/src/portal/lib/ng-package.prod.json @@ -7,6 +7,13 @@ "@ngx-translate/core": "ngx-translate-core", "@ngx-translate/core/index": "ngx-translate-core", "ngx-markdown": "ngx-markdown" - } + }, + "umdModuleIds": { + "@clr/angular" : "angular", + "ngx-markdown" : "ngxMarkdown", + "@ngx-translate/http-loader" : "httpLoader", + "ngx-cookie" : "ngxCookie", + "@ngx-translate/core" : "core$1" + } } } \ No newline at end of file diff --git a/src/portal/lib/package.json b/src/portal/lib/package.json index 4804668db..9c49c4207 100644 --- a/src/portal/lib/package.json +++ b/src/portal/lib/package.json @@ -1,6 +1,6 @@ { "name": "@harbor/ui", - "version": "1.8.0-rc2", + "version": "1.9.0", "description": "Harbor shared UI components based on Clarity and Angular7", "author": "CNCF", "module": "index.js", diff --git a/src/portal/lib/src/config/config.ts b/src/portal/lib/src/config/config.ts index 2a376d71c..9505b11c5 100644 --- a/src/portal/lib/src/config/config.ts +++ b/src/portal/lib/src/config/config.ts @@ -87,16 +87,19 @@ export class Configuration { token_expiration: NumberValueItem; scan_all_policy: ComplexValueItem; read_only: BoolValueItem; + notification_enable: BoolValueItem; http_authproxy_endpoint?: StringValueItem; http_authproxy_tokenreview_endpoint?: StringValueItem; http_authproxy_verify_cert?: BoolValueItem; - http_authproxy_always_onboard?: BoolValueItem; + http_authproxy_skip_search?: BoolValueItem; oidc_name?: StringValueItem; oidc_endpoint?: StringValueItem; oidc_client_id?: StringValueItem; oidc_client_secret?: StringValueItem; oidc_verify_cert?: BoolValueItem; oidc_scope?: StringValueItem; + count_per_project: NumberValueItem; + storage_per_project: NumberValueItem; public constructor() { this.auth_mode = new StringValueItem("db_auth", true); this.project_creation_restriction = new StringValueItem("everyone", true); @@ -138,15 +141,18 @@ export class Configuration { } }, true); this.read_only = new BoolValueItem(false, true); + this.notification_enable = new BoolValueItem(false, true); this.http_authproxy_endpoint = new StringValueItem("", true); this.http_authproxy_tokenreview_endpoint = new StringValueItem("", true); this.http_authproxy_verify_cert = new BoolValueItem(false, true); - this.http_authproxy_always_onboard = new BoolValueItem(false, true); + this.http_authproxy_skip_search = new BoolValueItem(false, true); this.oidc_name = new StringValueItem('', true); this.oidc_endpoint = new StringValueItem('', true); this.oidc_client_id = new StringValueItem('', true); this.oidc_client_secret = new StringValueItem('', true); this.oidc_verify_cert = new BoolValueItem(false, true); this.oidc_scope = new StringValueItem('', true); + this.count_per_project = new NumberValueItem(-1, true); + this.storage_per_project = new NumberValueItem(-1, true); } } diff --git a/src/portal/lib/src/config/gc/gc-history/gc-history.component.ts b/src/portal/lib/src/config/gc/gc-history/gc-history.component.ts index dfcc0c4e8..2a8c55c18 100644 --- a/src/portal/lib/src/config/gc/gc-history/gc-history.component.ts +++ b/src/portal/lib/src/config/gc/gc-history/gc-history.component.ts @@ -4,6 +4,7 @@ import { GcJobViewModel } from "../gcLog"; import { GcViewModelFactory } from "../gc.viewmodel.factory"; import { ErrorHandler } from "../../../error-handler/index"; import { Subscription, timer } from "rxjs"; +import { REFRESH_TIME_DIFFERENCE } from '../../../shared/shared.const'; const JOB_STATUS = { PENDING: "pending", RUNNING: "running" @@ -34,7 +35,7 @@ export class GcHistoryComponent implements OnInit, OnDestroy { this.loading = false; // to avoid some jobs not finished. if (!this.timerDelay) { - this.timerDelay = timer(3000, 3000).subscribe(() => { + this.timerDelay = timer(REFRESH_TIME_DIFFERENCE, REFRESH_TIME_DIFFERENCE).subscribe(() => { let count: number = 0; this.jobs.forEach(job => { if ( diff --git a/src/portal/lib/src/config/index.ts b/src/portal/lib/src/config/index.ts index 5ecae2c6e..a43adbce6 100644 --- a/src/portal/lib/src/config/index.ts +++ b/src/portal/lib/src/config/index.ts @@ -6,6 +6,8 @@ import { VulnerabilityConfigComponent } from './vulnerability/vulnerability-conf import { RegistryConfigComponent } from './registry-config.component'; import { GcComponent } from './gc/gc.component'; import { GcHistoryComponent } from './gc/gc-history/gc-history.component'; +import { ProjectQuotasComponent } from './project-quotas/project-quotas.component'; +import { EditProjectQuotasComponent } from './project-quotas/edit-project-quotas/edit-project-quotas.component'; export * from './config'; export * from './replication/replication-config.component'; @@ -20,5 +22,7 @@ export const CONFIGURATION_DIRECTIVES: Type[] = [ GcComponent, SystemSettingsComponent, VulnerabilityConfigComponent, - RegistryConfigComponent + RegistryConfigComponent, + ProjectQuotasComponent, + EditProjectQuotasComponent ]; diff --git a/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.html b/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.html new file mode 100644 index 000000000..c9bd48440 --- /dev/null +++ b/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.html @@ -0,0 +1,87 @@ + + + + + + \ No newline at end of file diff --git a/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.scss b/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.scss new file mode 100644 index 000000000..43f9bf3bc --- /dev/null +++ b/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.scss @@ -0,0 +1,84 @@ +::ng-deep .modal-dialog { + width: 25rem; +} + +.modal-body { + padding-top: 0.8rem; + overflow-y: visible; + overflow-x: visible; + + .clr-form-compact { + div.form-group { + padding-left: 8.5rem; + + .mr-3px { + margin-right: 3px; + } + + .quota-input { + width: 2rem; + padding-right: 0.8rem; + } + + .select-div { + width: 2.5rem; + + ::ng-deep .clr-form-control { + margin-top: 0.28rem; + + select { + padding-right: 15px; + } + } + } + } + } + + .clr-form-compact-common { + div.form-group { + padding-left: 6rem; + + .select-div { + width: 1.6rem; + } + } + } +} + +.progress-block { + width: 8rem; +} + +.progress-div { + position: relative; + padding-right: 0.6rem; + width: 9rem; +} + +::ng-deep { + .progress { + &.warning>progress { + color: orange; + + &::-webkit-progress-value { + background-color: orange; + } + + &::-moz-progress-bar { + background-color: orange; + } + } + } +} + +.progress-label { + position: absolute; + right: -2.3rem; + top: 0; + width: 3.5rem; + font-weight: 100; + font-size: 10px; + + overflow: hidden; + text-overflow: ellipsis; +} \ No newline at end of file diff --git a/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.spec.ts b/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.spec.ts new file mode 100644 index 000000000..595f1ab1b --- /dev/null +++ b/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.spec.ts @@ -0,0 +1,37 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; + +import { EditProjectQuotasComponent } from './edit-project-quotas.component'; +import { SharedModule } from '../../../shared/shared.module'; +import { InlineAlertComponent } from '../../../inline-alert/inline-alert.component'; +import { SERVICE_CONFIG, IServiceConfig } from '../../../service.config'; +import { RouterModule } from '@angular/router'; + +describe('EditProjectQuotasComponent', () => { + let component: EditProjectQuotasComponent; + let fixture: ComponentFixture; + let config: IServiceConfig = { + quotaUrl: "/api/quotas/testing" + }; + beforeEach(async(() => { + TestBed.configureTestingModule({ + imports: [ + SharedModule, + RouterModule.forRoot([]) + ], + declarations: [ EditProjectQuotasComponent, InlineAlertComponent ], + providers: [ + { provide: SERVICE_CONFIG, useValue: config }, + ] + }) + .compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(EditProjectQuotasComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.ts b/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.ts new file mode 100644 index 000000000..ca3248b32 --- /dev/null +++ b/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.ts @@ -0,0 +1,154 @@ +import { + Component, + EventEmitter, + Output, + ViewChild, + OnInit, +} from '@angular/core'; +import { NgForm, Validators } from '@angular/forms'; + +import { InlineAlertComponent } from '../../../inline-alert/inline-alert.component'; + +import { QuotaUnits, QuotaUnlimited, QUOTA_DANGER_COEFFICIENT, QUOTA_WARNING_COEFFICIENT } from "../../../shared/shared.const"; + +import { clone, getSuitableUnit, getByte, GetIntegerAndUnit, validateCountLimit, validateLimit } from '../../../utils'; +import { EditQuotaQuotaInterface, QuotaHardLimitInterface } from '../../../service'; +import { distinctUntilChanged } from 'rxjs/operators'; + +@Component({ + selector: 'edit-project-quotas', + templateUrl: './edit-project-quotas.component.html', + styleUrls: ['./edit-project-quotas.component.scss'] +}) +export class EditProjectQuotasComponent implements OnInit { + openEditQuota: boolean; + defaultTextsObj: { editQuota: string; setQuota: string; countQuota: string; storageQuota: string; isSystemDefaultQuota: boolean } = { + editQuota: '', + setQuota: '', + countQuota: '', + storageQuota: '', + isSystemDefaultQuota: false, + }; + quotaHardLimitValue: QuotaHardLimitInterface = { + storageLimit: -1 + , storageUnit: '' + , countLimit: -1 + }; + quotaUnits = QuotaUnits; + staticBackdrop = true; + closable = false; + quotaForm: NgForm; + @ViewChild(InlineAlertComponent) + inlineAlert: InlineAlertComponent; + + @ViewChild('quotaForm') + currentForm: NgForm; + @Output() confirmAction = new EventEmitter(); + quotaDangerCoefficient: number = QUOTA_DANGER_COEFFICIENT; + quotaWarningCoefficient: number = QUOTA_WARNING_COEFFICIENT; + constructor() { } + + ngOnInit() { + } + + onSubmit(): void { + const emitData = { + formValue: this.currentForm.value, + isSystemDefaultQuota: this.defaultTextsObj.isSystemDefaultQuota, + id: this.quotaHardLimitValue.id + }; + this.confirmAction.emit(emitData); + } + onCancel() { + this.openEditQuota = false; + } + + openEditQuotaModal(defaultTextsObj: EditQuotaQuotaInterface): void { + this.defaultTextsObj = defaultTextsObj; + if (this.defaultTextsObj.isSystemDefaultQuota) { + this.quotaHardLimitValue = { + storageLimit: defaultTextsObj.quotaHardLimitValue.storageLimit === QuotaUnlimited ? + QuotaUnlimited : GetIntegerAndUnit(defaultTextsObj.quotaHardLimitValue.storageLimit + , clone(QuotaUnits), 0, clone(QuotaUnits)).partNumberHard + , storageUnit: defaultTextsObj.quotaHardLimitValue.storageLimit === QuotaUnlimited ? + QuotaUnits[3].UNIT : GetIntegerAndUnit(defaultTextsObj.quotaHardLimitValue.storageLimit + , clone(QuotaUnits), 0, clone(QuotaUnits)).partCharacterHard + , countLimit: defaultTextsObj.quotaHardLimitValue.countLimit + }; + } else { + this.quotaHardLimitValue = { + storageLimit: defaultTextsObj.quotaHardLimitValue.hard.storage === QuotaUnlimited ? + QuotaUnlimited : GetIntegerAndUnit(defaultTextsObj.quotaHardLimitValue.hard.storage + , clone(QuotaUnits), defaultTextsObj.quotaHardLimitValue.used.storage, clone(QuotaUnits)).partNumberHard + , storageUnit: defaultTextsObj.quotaHardLimitValue.hard.storage === QuotaUnlimited ? + QuotaUnits[3].UNIT : GetIntegerAndUnit(defaultTextsObj.quotaHardLimitValue.hard.storage + , clone(QuotaUnits), defaultTextsObj.quotaHardLimitValue.used.storage, clone(QuotaUnits)).partCharacterHard + , countLimit: defaultTextsObj.quotaHardLimitValue.hard.count + , id: defaultTextsObj.quotaHardLimitValue.id + , countUsed: defaultTextsObj.quotaHardLimitValue.used.count + , storageUsed: defaultTextsObj.quotaHardLimitValue.used.storage + }; + } + let defaultForm = { + count: this.quotaHardLimitValue.countLimit + , storage: this.quotaHardLimitValue.storageLimit + , storageUnit: this.quotaHardLimitValue.storageUnit + }; + this.currentForm.resetForm(defaultForm); + this.openEditQuota = true; + + this.currentForm.form.controls['storage'].setValidators( + [ + Validators.required, + Validators.pattern('(^-1$)|(^([1-9]+)([0-9]+)*$)'), + validateLimit(this.currentForm.form.controls['storageUnit']) + ]); + this.currentForm.form.controls['count'].setValidators( + [ + Validators.required, + Validators.pattern('(^-1$)|(^([1-9]+)([0-9]+)*$)'), + validateCountLimit() + ]); + this.currentForm.form.valueChanges + .pipe(distinctUntilChanged((a, b) => JSON.stringify(a) === JSON.stringify(b))) + .subscribe((data) => { + ['storage', 'storageUnit', 'count'].forEach(fieldName => { + if (this.currentForm.form.get(fieldName) && this.currentForm.form.get(fieldName).value !== null) { + this.currentForm.form.get(fieldName).updateValueAndValidity(); + } + }); + }); + } + + get isValid() { + return this.currentForm.valid && this.currentForm.dirty; + } + getSuitableUnit(value) { + const QuotaUnitsCopy = clone(QuotaUnits); + return getSuitableUnit(value, QuotaUnitsCopy); + } + getIntegerAndUnit(valueHard, valueUsed) { + return GetIntegerAndUnit(valueHard + , clone(QuotaUnits), valueUsed, clone(QuotaUnits)); + } + getByte(count: number, unit: string) { + if (+count === +count) { + return getByte(+count, unit); + } + return 0; + } + isDangerColor(limit: number | string, used: number | string, unit?: string) { + if (unit) { + return limit !== QuotaUnlimited ? +used / getByte(+limit, unit) >= this.quotaDangerCoefficient : false; + } + return limit !== QuotaUnlimited ? +used / +limit >= this.quotaDangerCoefficient : false; + } + isWarningColor(limit: number | string, used: number | string, unit?: string) { + if (unit) { + return limit !== QuotaUnlimited ? + +used / getByte(+limit, unit) >= this.quotaWarningCoefficient && +used / getByte(+limit, unit) <= this.quotaDangerCoefficient : false; + } + return limit !== QuotaUnlimited ? + +used / +limit >= this.quotaWarningCoefficient && +used / +limit <= this.quotaDangerCoefficient : false; + } +} diff --git a/src/portal/lib/src/config/project-quotas/project-quotas.component.html b/src/portal/lib/src/config/project-quotas/project-quotas.component.html new file mode 100644 index 000000000..22af1333d --- /dev/null +++ b/src/portal/lib/src/config/project-quotas/project-quotas.component.html @@ -0,0 +1,79 @@ +
+
+
+
+
+
{{'QUOTA.PROJECT_QUOTA_DEFAULT_ARTIFACT' | translate}}{{ quotaHardLimitValue?.countLimit === -1? ('QUOTA.UNLIMITED'| translate): quotaHardLimitValue?.countLimit }} +
+
{{'QUOTA.PROJECT_QUOTA_DEFAULT_DISK' | translate}} + {{ quotaHardLimitValue?.storageLimit === -1?('QUOTA.UNLIMITED' | translate): getIntegerAndUnit(quotaHardLimitValue?.storageLimit, 0).partNumberHard}} + {{ quotaHardLimitValue?.storageLimit === -1?'':quotaHardLimitValue?.storageUnit }} +
+
+ +
+
+ + + +
+
+
+ + {{'QUOTA.PROJECT' | translate}} + {{'QUOTA.OWNER' | translate}} + {{'QUOTA.COUNT' | translate }} + {{'QUOTA.STORAGE' | translate }} + {{'QUOTA.PLACEHOLDER' | translate }} + + + + + + {{quota?.ref?.name}} + {{quota?.ref?.owner_name}} + +
+
+ +
+ +
+
+ +
+
+ +
+ +
+
+
+ + {{pagination.firstItem + 1}} - {{pagination.lastItem + 1}} + {{'DESTINATION.OF' | translate}} + {{totalCount}} {{'SUMMARY.QUOTAS' | translate}} + + +
+
+
+ +
\ No newline at end of file diff --git a/src/portal/lib/src/config/project-quotas/project-quotas.component.scss b/src/portal/lib/src/config/project-quotas/project-quotas.component.scss new file mode 100644 index 000000000..eeb09db91 --- /dev/null +++ b/src/portal/lib/src/config/project-quotas/project-quotas.component.scss @@ -0,0 +1,42 @@ +.default-quota { + display: flex; + + .default-quota-text { + display: flex; + justify-content: space-between; + min-width: 13rem; + + .num-count { + display: inline-block; + min-width: 2rem; + } + } +} + +.color-0 { + color: #000; +} + +.progress-block { + label { + font-weight: 400 !important; + } +} + +.default-quota-edit-button { + height: 1rem; +} + +.min-label-width { + min-width: 120px; +} + +.quota-top { + display: flex; + justify-content: space-between; +} + +.refresh-div { + margin-top: auto; + cursor: pointer; +} \ No newline at end of file diff --git a/src/portal/lib/src/config/project-quotas/project-quotas.component.spec.ts b/src/portal/lib/src/config/project-quotas/project-quotas.component.spec.ts new file mode 100644 index 000000000..168685550 --- /dev/null +++ b/src/portal/lib/src/config/project-quotas/project-quotas.component.spec.ts @@ -0,0 +1,93 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; + +import { ProjectQuotasComponent } from './project-quotas.component'; +import { IServiceConfig, SERVICE_CONFIG } from '../../service.config'; +import { SharedModule } from '../../shared/shared.module'; +import { RouterModule } from '@angular/router'; +import { EditProjectQuotasComponent } from './edit-project-quotas/edit-project-quotas.component'; +import { InlineAlertComponent } from '../../inline-alert/inline-alert.component'; +import { + ConfigurationService, ConfigurationDefaultService, QuotaService + , QuotaDefaultService, Quota, RequestQueryParams +} from '../../service'; +import { ErrorHandler } from '../../error-handler'; +import { of } from 'rxjs'; +import { delay } from 'rxjs/operators'; +import {APP_BASE_HREF} from '@angular/common'; +describe('ProjectQuotasComponent', () => { + let spy: jasmine.Spy; + let quotaService: QuotaService; + + let component: ProjectQuotasComponent; + let fixture: ComponentFixture; + + let config: IServiceConfig = { + quotaUrl: "/api/quotas/testing" + }; + let mockQuotaList: Quota[] = [{ + id: 1111, + ref: { + id: 1111, + name: "project1", + owner_name: "project1" + }, + creation_time: "12212112121", + update_time: "12212112121", + hard: { + count: -1, + storage: -1, + }, + used: { + count: 1234, + storage: 1234 + }, + } + ]; + beforeEach(async(() => { + TestBed.configureTestingModule({ + imports: [ + SharedModule, + RouterModule.forRoot([]) + ], + declarations: [ProjectQuotasComponent, EditProjectQuotasComponent, InlineAlertComponent], + providers: [ + ErrorHandler, + { provide: SERVICE_CONFIG, useValue: config }, + { provide: ConfigurationService, useClass: ConfigurationDefaultService }, + { provide: QuotaService, useClass: QuotaDefaultService }, + { provide: APP_BASE_HREF, useValue : '/' } + + ] + }) + .compileComponents(); + })); + + beforeEach(async(() => { + + fixture = TestBed.createComponent(ProjectQuotasComponent); + component = fixture.componentInstance; + component.quotaHardLimitValue = { + countLimit: 1111, + storageLimit: 23, + storageUnit: 'GB' + }; + component.loading = true; + quotaService = fixture.debugElement.injector.get(QuotaService); + spy = spyOn(quotaService, 'getQuotaList') + .and.callFake(function (params: RequestQueryParams) { + let header = new Map(); + header.set("X-Total-Count", 123); + const httpRes = { + headers: header, + body: mockQuotaList + }; + return of(httpRes).pipe(delay(0)); + }); + + fixture.detectChanges(); + })); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/src/portal/lib/src/config/project-quotas/project-quotas.component.ts b/src/portal/lib/src/config/project-quotas/project-quotas.component.ts new file mode 100644 index 000000000..fa457b03e --- /dev/null +++ b/src/portal/lib/src/config/project-quotas/project-quotas.component.ts @@ -0,0 +1,241 @@ +import { Component, Input, Output, EventEmitter, ViewChild, SimpleChanges, OnChanges } from '@angular/core'; +import { Configuration } from '../config'; +import { + Quota, State, Comparator, ClrDatagridComparatorInterface, QuotaHardLimitInterface, QuotaHard +} from '../../service/interface'; +import { + clone, isEmpty, getChanges, getSuitableUnit, calculatePage, CustomComparator + , getByte, GetIntegerAndUnit +} from '../../utils'; +import { ErrorHandler } from '../../error-handler/index'; +import { QuotaUnits, QuotaUnlimited, QUOTA_DANGER_COEFFICIENT, QUOTA_WARNING_COEFFICIENT } from '../../shared/shared.const'; +import { EditProjectQuotasComponent } from './edit-project-quotas/edit-project-quotas.component'; +import { + ConfigurationService +} from '../../service/index'; +import { TranslateService } from '@ngx-translate/core'; +import { forkJoin } from 'rxjs'; +import { QuotaService } from "../../service/quota.service"; +import { Router } from '@angular/router'; +import { finalize } from 'rxjs/operators'; +const quotaSort = { + count: 'used.count', + storage: "used.storage", + sortType: 'string' +}; +const QuotaType = 'project'; + +@Component({ + selector: 'project-quotas', + templateUrl: './project-quotas.component.html', + styleUrls: ['./project-quotas.component.scss'] +}) +export class ProjectQuotasComponent implements OnChanges { + + config: Configuration = new Configuration(); + @ViewChild('editProjectQuotas') + editQuotaDialog: EditProjectQuotasComponent; + loading = true; + quotaHardLimitValue: QuotaHardLimitInterface; + currentState: State; + + @Output() configChange: EventEmitter = new EventEmitter(); + @Output() refreshAllconfig: EventEmitter = new EventEmitter(); + quotaList: Quota[] = []; + originalConfig: Configuration; + currentPage = 1; + totalCount = 0; + pageSize = 15; + quotaDangerCoefficient: number = QUOTA_DANGER_COEFFICIENT; + quotaWarningCoefficient: number = QUOTA_WARNING_COEFFICIENT; + @Input() + get allConfig(): Configuration { + return this.config; + } + set allConfig(cfg: Configuration) { + this.config = cfg; + this.configChange.emit(this.config); + } + countComparator: Comparator = new CustomComparator(quotaSort.count, quotaSort.sortType); + storageComparator: Comparator = new CustomComparator(quotaSort.storage, quotaSort.sortType); + + constructor( + private configService: ConfigurationService, + private quotaService: QuotaService, + private translate: TranslateService, + private router: Router, + private errorHandler: ErrorHandler) { } + + editQuota(quotaHardLimitValue: Quota) { + const defaultTexts = [this.translate.get('QUOTA.EDIT_PROJECT_QUOTAS') + , this.translate.get('QUOTA.SET_QUOTAS', { params: quotaHardLimitValue.ref.name }) + , this.translate.get('QUOTA.COUNT_QUOTA'), this.translate.get('QUOTA.STORAGE_QUOTA')]; + forkJoin(...defaultTexts).subscribe(res => { + const defaultTextsObj = { + editQuota: res[0], + setQuota: res[1], + countQuota: res[2], + storageQuota: res[3], + quotaHardLimitValue: quotaHardLimitValue, + isSystemDefaultQuota: false + }; + this.editQuotaDialog.openEditQuotaModal(defaultTextsObj); + }); + } + + editDefaultQuota(quotaHardLimitValue: QuotaHardLimitInterface) { + const defaultTexts = [this.translate.get('QUOTA.EDIT_DEFAULT_PROJECT_QUOTAS'), this.translate.get('QUOTA.SET_DEFAULT_QUOTAS') + , this.translate.get('QUOTA.COUNT_DEFAULT_QUOTA'), this.translate.get('QUOTA.STORAGE_DEFAULT_QUOTA')]; + forkJoin(...defaultTexts).subscribe(res => { + const defaultTextsObj = { + editQuota: res[0], + setQuota: res[1], + countQuota: res[2], + storageQuota: res[3], + quotaHardLimitValue: quotaHardLimitValue, + isSystemDefaultQuota: true + }; + this.editQuotaDialog.openEditQuotaModal(defaultTextsObj); + + }); + } + public getChanges() { + let allChanges = getChanges(this.originalConfig, this.config); + if (allChanges) { + return this.getQuotaChanges(allChanges); + } + return null; + } + + getQuotaChanges(allChanges) { + let changes = {}; + for (let prop in allChanges) { + if (prop === 'storage_per_project' + || prop === 'count_per_project' + ) { + changes[prop] = allChanges[prop]; + } + } + return changes; + } + + public saveConfig(configQuota): void { + this.allConfig.count_per_project.value = configQuota.count; + this.allConfig.storage_per_project.value = +configQuota.storage === QuotaUnlimited ? + configQuota.storage : getByte(configQuota.storage, configQuota.storageUnit); + let changes = this.getChanges(); + if (!isEmpty(changes)) { + this.loading = true; + this.configService.saveConfigurations(changes) + .pipe(finalize(() => { + this.loading = false; + this.editQuotaDialog.openEditQuota = false; + })) + .subscribe(response => { + this.refreshAllconfig.emit(); + this.errorHandler.info('CONFIG.SAVE_SUCCESS'); + } + , error => { + this.errorHandler.error(error); + }); + } else { + // Inprop situation, should not come here + this.translate.get('CONFIG.NO_CHANGE').subscribe(res => { + this.editQuotaDialog.inlineAlert.showInlineError(res); + }); + } + } + + confirmEdit(event) { + if (event.isSystemDefaultQuota) { + this.saveConfig(event.formValue); + } else { + this.saveCurrentQuota(event); + } + } + saveCurrentQuota(event) { + let count = +event.formValue.count; + let storage = +event.formValue.storage === QuotaUnlimited ? + +event.formValue.storage : getByte(+event.formValue.storage, event.formValue.storageUnit); + let rep: QuotaHard = { hard: { count, storage } }; + this.loading = true; + this.quotaService.updateQuota(event.id, rep).subscribe(res => { + this.editQuotaDialog.openEditQuota = false; + this.getQuotaList(this.currentState); + this.errorHandler.info('QUOTA.SAVE_SUCCESS'); + }, error => { + this.errorHandler.error(error); + this.loading = false; + }); + } + + getquotaHardLimitValue() { + const storageNumberAndUnit = this.allConfig.storage_per_project ? this.allConfig.storage_per_project.value : QuotaUnlimited; + const storageLimit = storageNumberAndUnit; + const storageUnit = this.getIntegerAndUnit(storageNumberAndUnit, 0).partCharacterHard; + const countLimit = this.allConfig.count_per_project ? this.allConfig.count_per_project.value : QuotaUnlimited; + this.quotaHardLimitValue = { storageLimit, storageUnit, countLimit }; + } + getQuotaList(state: State) { + if (!state || !state.page) { + return; + } + // Keep state for future filtering and sorting + this.currentState = state; + + let pageNumber: number = calculatePage(state); + if (pageNumber <= 0) { pageNumber = 1; } + let sortBy: any = ''; + if (state.sort) { + sortBy = state.sort.by as string | ClrDatagridComparatorInterface; + sortBy = sortBy.fieldName ? sortBy.fieldName : sortBy; + sortBy = state.sort.reverse ? `-${sortBy}` : sortBy; + } + this.loading = true; + + this.quotaService.getQuotaList(QuotaType, pageNumber, this.pageSize, sortBy).pipe(finalize(() => { + this.loading = false; + })).subscribe(res => { + if (res.headers) { + let xHeader: string = res.headers.get("X-Total-Count"); + if (xHeader) { + this.totalCount = parseInt(xHeader, 0); + } + } + this.quotaList = res.body.filter((quota) => { + return quota.ref !== null; + }) as Quota[]; + }, error => { + this.errorHandler.error(error); + }); + } + ngOnChanges(changes: SimpleChanges): void { + if (changes && changes["allConfig"]) { + this.originalConfig = clone(this.config); + this.getquotaHardLimitValue(); + } + } + getSuitableUnit(value) { + const QuotaUnitsCopy = clone(QuotaUnits); + return getSuitableUnit(value, QuotaUnitsCopy); + } + getIntegerAndUnit(valueHard, valueUsed) { + return GetIntegerAndUnit(valueHard + , clone(QuotaUnits), valueUsed, clone(QuotaUnits)); + } + + goToLink(proId) { + let linkUrl = ["harbor", "projects", proId, "summary"]; + this.router.navigate(linkUrl); + } + refresh() { + const state: State = { + page: { + from: 0, + to: 14, + size: 15 + }, + }; + this.getQuotaList(state); + } +} diff --git a/src/portal/lib/src/config/registry-config.component.spec.ts b/src/portal/lib/src/config/registry-config.component.spec.ts index d938115da..9aef842d4 100644 --- a/src/portal/lib/src/config/registry-config.component.spec.ts +++ b/src/portal/lib/src/config/registry-config.component.spec.ts @@ -19,7 +19,7 @@ import { ScanningResultDefaultService, SystemInfoService, SystemInfoDefaultService, - SystemInfo + SystemInfo, SystemCVEWhitelist } from '../service/index'; import { Configuration } from './config'; import { of } from 'rxjs'; @@ -56,7 +56,12 @@ describe('RegistryConfigComponent (inline template)', () => { "harbor_version": "v1.1.1-rc1-160-g565110d", "next_scan_all": 0 }; - + let mockSystemWhitelist: SystemCVEWhitelist = { + "expires_at": 1561996800, + "id": 1, + "items": [], + "project_id": 0 + }; beforeEach(async(() => { TestBed.configureTestingModule({ imports: [ @@ -90,7 +95,7 @@ describe('RegistryConfigComponent (inline template)', () => { systemInfoService = fixture.debugElement.injector.get(SystemInfoService); spy = spyOn(cfgService, 'getConfigurations').and.returnValue(of(mockConfig)); spySystemInfo = spyOn(systemInfoService, 'getSystemInfo').and.returnValue(of(mockSystemInfo)); - + spySystemInfo = spyOn(systemInfoService, 'getSystemWhitelist').and.returnValue(of(mockSystemWhitelist)); fixture.detectChanges(); }); diff --git a/src/portal/lib/src/config/registry-config.component.ts b/src/portal/lib/src/config/registry-config.component.ts index 66e2f3d3e..28a9252e8 100644 --- a/src/portal/lib/src/config/registry-config.component.ts +++ b/src/portal/lib/src/config/registry-config.component.ts @@ -12,9 +12,10 @@ import { clone } from '../utils'; import { ErrorHandler } from '../error-handler/index'; -import { SystemSettingsComponent, VulnerabilityConfigComponent, GcComponent} from './index'; import { Configuration } from './config'; -import { map, catchError } from "rxjs/operators"; +import { VulnerabilityConfigComponent } from "./vulnerability/vulnerability-config.component"; +import { GcComponent } from "./gc"; +import { SystemSettingsComponent } from "./system/system-settings.component"; @Component({ selector: 'hbr-registry-config', diff --git a/src/portal/lib/src/config/system/system-settings.component.html b/src/portal/lib/src/config/system/system-settings.component.html index e5bef3026..72b9458f6 100644 --- a/src/portal/lib/src/config/system/system-settings.component.html +++ b/src/portal/lib/src/config/system/system-settings.component.html @@ -4,23 +4,27 @@
-
- + {{'CONFIG.TOOLTIP.PRO_CREATION_RESTRICTION' | translate}}
-
-
\ No newline at end of file diff --git a/src/portal/src/app/project/project.component.ts b/src/portal/src/app/project/project.component.ts index 10ff031e1..948f3e602 100644 --- a/src/portal/src/app/project/project.component.ts +++ b/src/portal/src/app/project/project.component.ts @@ -15,6 +15,9 @@ import { Component, OnInit, ViewChild } from '@angular/core'; import { CreateProjectComponent } from './create-project/create-project.component'; import { ListProjectComponent } from './list-project/list-project.component'; import { ProjectTypes } from '../shared/shared.const'; +import { ConfigurationService } from '../config/config.service'; +import { Configuration, QuotaHardInterface } from '@harbor/ui'; +import { SessionService } from "../shared/session.service"; @Component({ selector: 'project', @@ -23,7 +26,7 @@ import { ProjectTypes } from '../shared/shared.const'; }) export class ProjectComponent implements OnInit { projectTypes = ProjectTypes; - + quotaObj: QuotaHardInterface; @ViewChild(CreateProjectComponent) creationProject: CreateProjectComponent; @@ -45,16 +48,33 @@ export class ProjectComponent implements OnInit { } } - constructor() { - } + constructor( + public configService: ConfigurationService, + private session: SessionService + ) { } ngOnInit(): void { if (window.sessionStorage && window.sessionStorage['projectTypeValue'] && window.sessionStorage['fromDetails']) { this.currentFilteredType = +window.sessionStorage['projectTypeValue']; window.sessionStorage.removeItem('fromDetails'); } + if (this.isSystemAdmin) { + this.getConfigration(); + } + } + getConfigration() { + this.configService.getConfiguration() + .subscribe((configurations: Configuration) => { + this.quotaObj = { + count_per_project: configurations.count_per_project ? configurations.count_per_project.value : -1, + storage_per_project: configurations.storage_per_project ? configurations.storage_per_project.value : -1 + }; + }); + } + public get isSystemAdmin(): boolean { + let account = this.session.getCurrentUser(); + return account != null && account.has_admin_role; } - openModal(): void { this.creationProject.newProject(); } diff --git a/src/portal/src/app/project/project.module.ts b/src/portal/src/app/project/project.module.ts index fb180988f..d29d255f0 100644 --- a/src/portal/src/app/project/project.module.ts +++ b/src/portal/src/app/project/project.module.ts @@ -17,6 +17,7 @@ import { RouterModule } from '@angular/router'; import { SharedModule } from '../shared/shared.module'; import { RepositoryModule } from '../repository/repository.module'; import { ReplicationModule } from '../replication/replication.module'; +import { SummaryModule } from './summary/summary.module'; import { LogModule } from '../log/log.module'; import { ProjectComponent } from './project.component'; @@ -38,6 +39,14 @@ import { ProjectLabelComponent } from "../project/project-label/project-label.co import { HelmChartModule } from './helm-chart/helm-chart.module'; import { RobotAccountComponent } from './robot-account/robot-account.component'; import { AddRobotComponent } from './robot-account/add-robot/add-robot.component'; +import { AddHttpAuthGroupComponent } from './member/add-http-auth-group/add-http-auth-group.component'; +import { TagRetentionComponent } from "./tag-retention/tag-retention.component"; +import { AddRuleComponent } from "./tag-retention/add-rule/add-rule.component"; +import { TagRetentionService } from "./tag-retention/tag-retention.service"; +import { WebhookService } from './webhook/webhook.service'; +import { WebhookComponent } from './webhook/webhook.component'; +import { AddWebhookComponent } from './webhook/add-webhook/add-webhook.component'; +import { AddWebhookFormComponent } from './webhook/add-webhook-form/add-webhook-form.component'; @NgModule({ imports: [ @@ -46,7 +55,8 @@ import { AddRobotComponent } from './robot-account/add-robot/add-robot.component ReplicationModule, LogModule, RouterModule, - HelmChartModule + HelmChartModule, + SummaryModule ], declarations: [ ProjectComponent, @@ -59,10 +69,16 @@ import { AddRobotComponent } from './robot-account/add-robot/add-robot.component ProjectLabelComponent, AddGroupComponent, RobotAccountComponent, - AddRobotComponent + AddRobotComponent, + AddHttpAuthGroupComponent, + TagRetentionComponent, + AddRuleComponent, + WebhookComponent, + AddWebhookComponent, + AddWebhookFormComponent, ], exports: [ProjectComponent, ListProjectComponent], - providers: [ProjectRoutingResolver, MemberService, RobotService] + providers: [ProjectRoutingResolver, MemberService, RobotService, TagRetentionService, WebhookService] }) export class ProjectModule { diff --git a/src/portal/src/app/project/project.ts b/src/portal/src/app/project/project.ts index 7a5df0f96..f6365f244 100644 --- a/src/portal/src/app/project/project.ts +++ b/src/portal/src/app/project/project.ts @@ -51,6 +51,7 @@ export class Project { prevent_vul: string | boolean; severity: string; auto_scan: string | boolean; + retention_id: number; }; constructor () { this.metadata = {}; diff --git a/src/portal/src/app/project/robot-account/add-robot/add-robot.component.html b/src/portal/src/app/project/robot-account/add-robot/add-robot.component.html index 661a4bd5e..4019db7f4 100644 --- a/src/portal/src/app/project/robot-account/add-robot/add-robot.component.html +++ b/src/portal/src/app/project/robot-account/add-robot/add-robot.component.html @@ -1,130 +1,134 @@ - - - + -