Merge branch 'master' into log_bugs

This commit is contained in:
jwangyangls 2019-09-03 10:45:58 +08:00 committed by GitHub
commit 1165bd6d0c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1621 changed files with 257711 additions and 29599 deletions

1
.dockerignore Normal file
View File

@ -0,0 +1 @@
src/portal/node_modules/

View File

@ -1,23 +1,23 @@
sudo: true
language: go
go:
- 1.11.2
- 1.12.5
go_import_path: github.com/goharbor/harbor
services:
- docker
dist: trusty
matrix:
include:
- go: 1.11.2
- go: 1.12.5
env:
- UTTEST=true
- go: 1.11.2
- go: 1.12.5
env:
- APITEST_DB=true
- go: 1.11.2
- go: 1.12.5
env:
- APITEST_LDAP=true
- go: 1.11.2
- go: 1.12.5
env:
- OFFLINE=true
env:
@ -28,7 +28,7 @@ env:
- POSTGRESQL_PWD: root123
- POSTGRESQL_DATABASE: registry
- ADMINSERVER_URL: http://127.0.0.1:8888
- DOCKER_COMPOSE_VERSION: 1.22.0
- DOCKER_COMPOSE_VERSION: 1.23.0
- HARBOR_ADMIN: admin
- HARBOR_ADMIN_PASSWD: Harbor12345
- CORE_SECRET: tempString

View File

@ -1,5 +1,18 @@
# Changelog
# v1.8.0 (2019-05-21)
[Full list of issues fixed in v1.8.0](https://github.com/goharbor/harbor/issues?q=is%3Aissue+is%3Aclosed+label%3Atarget%2F1.8.0)
* Support for OpenID Connect - OpenID Connect (OIDC) is an authentication layer on top of OAuth 2.0, allowing Harbor to verify the identity of users based on the authentication performed by an external authorization server or identity provider.
* Robot accounts - Robot accounts can be configured to provide administrators with a token that can be granted appropriate permissions for pulling or pushing images. Harbor users can continue operating Harbor using their enterprise SSO credentials, and use robot accounts for CI/CD systems that perform Docker client commands.
* Replication advancements - Harbor new version replication allows you to replicate your Harbor repository to and from non-Harbor registries. Harbor 1.8 expands on the Harbor-to-Harbor replication feature, adding the ability to replicate resources between Harbor and Docker Hub, Docker Registry, and Huawei Registry. This is enabled through both push and pull mode replication.
* Health check API, showing detailed status and health of all Harbor components.
* Support for defining cron-based scheduled tasks in the Harbor UI. Administrators can now use cron strings to define the schedule of a job. Scan, garbage collection and replication jobs are all supported.
API explorer integration. End users can now explore and trigger Harbors API via the swagger UI nested inside Harbors UI.
* Introduce a new master role to project, the role's permissions are more than developer and less than project admin.
* Introduce harbor.yml as the replacement of harbor.cfg and refactor the prepare script to provide more flexibility to the installation process based on docker-compose
* Enhancement of the Job Service engine to include webhook events, additional APIs for automation, and numerous bug fixes to improve the stability of the service.
* Docker Registry upgraded to v2.7.1.
## v1.7.5 (2019-04-02)
* Bumped up Clair to v2.0.8
* Fixed issues in supporting windows images. #6992 #6369

View File

@ -70,7 +70,6 @@ SRCPATH=./src
TOOLSPATH=$(BUILDPATH)/tools
CORE_PATH=$(BUILDPATH)/src/core
PORTAL_PATH=$(BUILDPATH)/src/portal
GOBASEPATH=/go/src/github.com/goharbor
CHECKENVCMD=checkenv.sh
# parameters
@ -101,14 +100,14 @@ PREPARE_VERSION_NAME=versions
REGISTRYVERSION=v2.7.1-patch-2819
NGINXVERSION=$(VERSIONTAG)
NOTARYVERSION=v0.6.1
CLAIRVERSION=v2.0.8
CLAIRVERSION=v2.0.9
CLAIRDBVERSION=$(VERSIONTAG)
MIGRATORVERSION=$(VERSIONTAG)
REDISVERSION=$(VERSIONTAG)
NOTARYMIGRATEVERSION=v3.5.4
# version of chartmuseum
CHARTMUSEUMVERSION=v0.8.1
CHARTMUSEUMVERSION=v0.9.0
define VERSIONS_FOR_PREPARE
VERSION_TAG: $(VERSIONTAG)
@ -136,10 +135,10 @@ GOINSTALL=$(GOCMD) install
GOTEST=$(GOCMD) test
GODEP=$(GOTEST) -i
GOFMT=gofmt -w
GOBUILDIMAGE=golang:1.11.2
GOBUILDPATH=$(GOBASEPATH)/harbor
GOBUILDIMAGE=golang:1.12.5
GOBUILDPATH=/harbor
GOIMAGEBUILDCMD=/usr/local/go/bin/go
GOIMAGEBUILD=$(GOIMAGEBUILDCMD) build
GOIMAGEBUILD=$(GOIMAGEBUILDCMD) build -mod vendor
GOBUILDPATH_CORE=$(GOBUILDPATH)/src/core
GOBUILDPATH_JOBSERVICE=$(GOBUILDPATH)/src/jobservice
GOBUILDPATH_REGISTRYCTL=$(GOBUILDPATH)/src/registryctl
@ -243,7 +242,7 @@ PACKAGE_ONLINE_PARA=-zcvf harbor-online-installer-$(PKGVERSIONTAG).tgz \
$(HARBORPKG)/install.sh \
$(HARBORPKG)/harbor.yml
DOCKERCOMPOSE_LIST=-f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
DOCKERCOMPOSE_FILE_OPT=-f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
ifeq ($(NOTARYFLAG), true)
DOCKERSAVE_PARA+= goharbor/notary-server-photon:$(NOTARYVERSION)-$(VERSIONTAG) goharbor/notary-signer-photon:$(NOTARYVERSION)-$(VERSIONTAG)
@ -271,7 +270,6 @@ check_environment:
compile_core:
@echo "compiling binary for core (golang image)..."
@echo $(GOBASEPATH)
@echo $(GOBUILDPATH)
@$(DOCKERCMD) run --rm -v $(BUILDPATH):$(GOBUILDPATH) -w $(GOBUILDPATH_CORE) $(GOBUILDIMAGE) $(GOIMAGEBUILD) -o $(GOBUILDMAKEPATH_CORE)/$(CORE_BINARYNAME)
@echo "Done."
@ -294,7 +292,7 @@ compile_notary_migrate_patch:
compile: check_environment versions_prepare compile_core compile_jobservice compile_registryctl compile_notary_migrate_patch
update_prepare_version:
@echo "substitude the prepare version tag in prepare file..."
@echo "substitute the prepare version tag in prepare file..."
@$(SEDCMD) -i -e 's/goharbor\/prepare:.*[[:space:]]\+/goharbor\/prepare:$(VERSIONTAG) /' $(MAKEPATH)/prepare ;
prepare: update_prepare_version
@ -414,17 +412,16 @@ pushimage:
start:
@echo "loading harbor images..."
@$(DOCKERCOMPOSECMD) $(DOCKERCOMPOSE_LIST) up -d
@$(DOCKERCOMPOSECMD) $(DOCKERCOMPOSE_FILE_OPT) up -d
@echo "Start complete. You can visit harbor now."
down:
@echo "Please make sure to set -e NOTARYFLAG=true/CLAIRFLAG=true/CHARTFLAG=true if you are using Notary/CLAIR/Chartmuseum in Harbor, otherwise the Notary/CLAIR/Chartmuseum containers cannot be stop automaticlly."
@while [ -z "$$CONTINUE" ]; do \
read -r -p "Type anything but Y or y to exit. [Y/N]: " CONTINUE; \
done ; \
[ $$CONTINUE = "y" ] || [ $$CONTINUE = "Y" ] || (echo "Exiting."; exit 1;)
@echo "stoping harbor instance..."
@$(DOCKERCOMPOSECMD) $(DOCKERCOMPOSE_LIST) down -v
@$(DOCKERCOMPOSECMD) $(DOCKERCOMPOSE_FILE_OPT) down -v
@echo "Done."
swagger_client:

View File

@ -25,7 +25,7 @@ Please use [releases](https://github.com/vmware/harbor/releases) instead of the
<img alt="Harbor" src="docs/img/harbor_logo.png">
Harbor is an an open source trusted cloud native registry project that stores, signs, and scans content. Harbor extends the open source Docker Distribution by adding the functionalities usually required by users such as security, identity and management. Having a registry closer to the build and run environment can improve the image transfer efficiency. Harbor supports replication of images between registries, and also offers advanced security features such as user management, access control and activity auditing.
Harbor is an open source trusted cloud native registry project that stores, signs, and scans content. Harbor extends the open source Docker Distribution by adding the functionalities usually required by users such as security, identity and management. Having a registry closer to the build and run environment can improve the image transfer efficiency. Harbor supports replication of images between registries, and also offers advanced security features such as user management, access control and activity auditing.
Harbor is hosted by the [Cloud Native Computing Foundation](https://cncf.io) (CNCF). If you are an organization that wants to help shape the evolution of cloud native technologies, consider joining the CNCF. For details about who's involved and how Harbor plays a role, read the CNCF
[announcement](https://www.cncf.io/blog/2018/07/31/cncf-to-host-harbor-in-the-sandbox/).
@ -33,22 +33,23 @@ Harbor is hosted by the [Cloud Native Computing Foundation](https://cncf.io) (CN
## Features
* **Cloud native registry**: With support for both container images and [Helm](https://helm.sh) charts, Harbor serves as registry for cloud native environments like container runtimes and orchestration platforms.
* **Role based access control**: Users and repositories are organized via 'projects' and a user can have different permission for images under a project.
* **Policy based image replication**: Images can be replicated (synchronized) between multiple registry instances based on policies with multiple filters (repository, tag and label). Harbor will auto-retry to replicate if it encounters any errors. Great for load balancing, high availability, multi-datacenter, hybrid and multi-cloud scenarios.
* **Role based access control**: Users and repositories are organized via 'projects' and a user can have different permission for images or Helm charts under a project.
* **Policy based replication**: Images and charts can be replicated (synchronized) between multiple registry instances based on policies with multiple filters (repository, tag and label). Harbor automatically retries a replication if it encounters any errors. Great for load balancing, high availability, multi-datacenter, hybrid and multi-cloud scenarios.
* **Vulnerability Scanning**: Harbor scans images regularly and warns users of vulnerabilities.
* **LDAP/AD support**: Harbor integrates with existing enterprise LDAP/AD for user authentication and management, and supports importing LDAP groups into Harbor and assigning proper project roles to them.
* **OIDC support**: Harbor leverages OpenID Connect (OIDC) to verify the identity of users authenticated by an external authorization server or identity provider. Single sign-on can be enabled to log into the Harbor portal.
* **Image deletion & garbage collection**: Images can be deleted and their space can be recycled.
* **Notary**: Image authenticity can be ensured.
* **Graphical user portal**: User can easily browse, search repositories and manage projects.
* **Auditing**: All the operations to the repositories are tracked.
* **RESTful API**: RESTful APIs for most administrative operations, easy to integrate with external systems.
* **Easy deployment**: Provide both an online and offline installer.
* **RESTful API**: RESTful APIs for most administrative operations, easy to integrate with external systems. An embedded Swagger UI is available for exploring and testing the API.
* **Easy deployment**: Provide both an online and offline installer. In addition, a Helm Chart can be used to deploy Harbor on Kubernetes.
## Install & Run
**System requirements:**
**On a Linux host:** docker 17.03.0-ce+ and docker-compose 1.18.0+ .
**On a Linux host:** docker 17.06.0-ce+ and docker-compose 1.23.0+ .
Download binaries of **[Harbor release ](https://github.com/vmware/harbor/releases)** and follow **[Installation & Configuration Guide](docs/installation_guide.md)** to install Harbor.

View File

@ -4,14 +4,13 @@ This guide provides instructions for developers to build and run Harbor from sou
## Step 1: Prepare for a build environment for Harbor
Harbor is deployed as several Docker containers and most of the code is written in Go language. The build environment requires Python, Docker, Docker Compose and golang development environment. Please install the below prerequisites:
Harbor is deployed as several Docker containers and most of the code is written in Go language. The build environment requires Docker, Docker Compose and golang development environment. Please install the below prerequisites:
Software | Required Version
----------------------|--------------------------
docker | 17.05 +
docker-compose | 1.11.0 +
python | 2.7 +
docker-compose | 1.23.0 +
git | 1.9.1 +
make | 3.81 +
golang* | 1.7.3 +
@ -28,11 +27,11 @@ golang* | 1.7.3 +
### Configuration
Edit the file **make/harbor.cfg** and make necessary configuration changes such as hostname, admin password and mail server. Refer to **[Installation and Configuration Guide](installation_guide.md#configuring-harbor)** for more info.
Edit the file **make/harbor.yml** and make necessary configuration changes such as hostname, admin password and mail server. Refer to **[Installation and Configuration Guide](installation_guide.md#configuring-harbor)** for more info.
```sh
$ cd harbor
$ vi make/harbor.cfg
$ vi make/harbor.yml
```
### Compiling and Running
@ -44,25 +43,25 @@ You can compile the code by one of the three approaches:
* Get official Golang image from docker hub:
```sh
$ docker pull golang:1.11.2
$ docker pull golang:1.12.5
```
* Build, install and bring up Harbor without Notary:
```sh
$ make install GOBUILDIMAGE=golang:1.11.2 COMPILETAG=compile_golangimage
$ make install GOBUILDIMAGE=golang:1.12.5 COMPILETAG=compile_golangimage
```
* Build, install and bring up Harbor with Notary:
```sh
$ make install GOBUILDIMAGE=golang:1.11.2 COMPILETAG=compile_golangimage NOTARYFLAG=true
$ make install GOBUILDIMAGE=golang:1.12.5 COMPILETAG=compile_golangimage NOTARYFLAG=true
```
* Build, install and bring up Harbor with Clair:
```sh
$ make install GOBUILDIMAGE=golang:1.11.2 COMPILETAG=compile_golangimage CLAIRFLAG=true
$ make install GOBUILDIMAGE=golang:1.12.5 COMPILETAG=compile_golangimage CLAIRFLAG=true
```
#### II. Compile code with your own Golang environment, then build Harbor

View File

@ -113,17 +113,24 @@ Notice that you may need to trust the certificate at OS level. Please refer to t
**3) Configure Harbor**
Edit the file ```harbor.cfg```, update the hostname and the protocol, and update the attributes ```ssl_cert``` and ```ssl_cert_key```:
Edit the file `harbor.yml`, update the hostname and uncomment the https block, and update the attributes `certificate` and `private_key`:
```yaml
#set hostname
hostname: yourdomain.com
http:
port: 80
https:
# https port for harbor, default is 443
port: 443
# The path of cert and key files for nginx
certificate: /data/cert/yourdomain.com.crt
private_key: /data/cert/yourdomain.com.key
```
#set hostname
hostname = yourdomain.com:port
#set ui_url_protocol
ui_url_protocol = https
......
#The path of cert and key files for nginx, they are applied only the protocol is set to https
ssl_cert = /data/cert/yourdomain.com.crt
ssl_cert_key = /data/cert/yourdomain.com.key
```
Generate configuration files for Harbor:
@ -148,7 +155,7 @@ After setting up HTTPS for Harbor, you can verify it by the following steps:
* Notice that some browser may still shows the warning regarding Certificate Authority (CA) unknown for security reason even though we signed certificates by self-signed CA and deploy the CA to the place mentioned above. It is because self-signed CA essentially is not a trusted third-party CA. You can import the CA to the browser on your own to solve the warning.
* On a machine with Docker daemon, make sure the option "-insecure-registry" for https://yourdomain.com does not present.
* On a machine with Docker daemon, make sure the option "-insecure-registry" for https://yourdomain.com is not present.
* If you mapped nginx port 443 to another port, then you should instead create the directory ```/etc/docker/certs.d/yourdomain.com:port``` (or your registry host IP:port). Then run any docker command to verify the setup, e.g.
@ -163,7 +170,7 @@ If you've mapped nginx 443 port to another, you need to add the port to login, l
```
##Troubleshooting
## Troubleshooting
1. You may get an intermediate certificate from a certificate issuer. In this case, you should merge the intermediate certificate with your own certificate to create a certificate bundle. You can achieve this by the below command:
```

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.5 KiB

View File

@ -30,8 +30,8 @@ Harbor is deployed as several Docker containers, and, therefore, can be deployed
|Software|Version|Description|
|---|---|---|
|Docker engine|version 17.03.0-ce+ or higher|For installation instructions, please refer to: [docker engine doc](https://docs.docker.com/engine/installation/)|
|Docker Compose|version 1.18.0 or higher|For installation instructions, please refer to: [docker compose doc](https://docs.docker.com/compose/install/)|
|Docker engine|version 17.06.0-ce+ or higher|For installation instructions, please refer to: [docker engine doc](https://docs.docker.com/engine/installation/)|
|Docker Compose|version 1.23.0 or higher|For installation instructions, please refer to: [docker compose doc](https://docs.docker.com/compose/install/)|
|Openssl|latest is preferred|Generate certificate and keys for Harbor|
### Network ports
@ -80,7 +80,7 @@ The parameters are described below - note that at the very least, you will need
##### Required parameters
- **hostname**: The target host's hostname, which is used to access the Portal and the registry service. It should be the IP address or the fully qualified domain name (FQDN) of your target machine, e.g., `192.168.1.10` or `reg.yourdomain.com`. _Do NOT use `localhost` or `127.0.0.1` for the hostname - the registry service needs to be accessible by external clients!_
- **hostname**: The target host's hostname, which is used to access the Portal and the registry service. It should be the IP address or the fully qualified domain name (FQDN) of your target machine, e.g., `192.168.1.10` or `reg.yourdomain.com`. _Do NOT use `localhost` or `127.0.0.1` or `0.0.0.0` for the hostname - the registry service needs to be accessible by external clients!_
- **data_volume**: The location to store harbor's data.
@ -97,7 +97,7 @@ The parameters are described below - note that at the very least, you will need
- **level**: log level, options are debug, info, warning, error, fatal
- **rotate_count**: Log files are rotated **rotate_count** times before being removed. If count is 0, old versions are removed rather than rotated.
- **rotate_size**: Log files are rotated only if they grow bigger than **rotate_size** bytes. If size is followed by k, the size is assumed to be in kilobytes. If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G are all valid.
- **location**: he directory to store store log
- **location**: the directory to store log
##### optional parameters

View File

@ -4,7 +4,7 @@ This guide provides instructions to manage roles by LDAP/AD group. You can impor
## Prerequisite
1. Harbor's auth_mode is ldap_auth and **[basic LDAP configure paremters](https://github.com/vmware/harbor/blob/master/docs/installation_guide.md#optional-parameters)** are configured.
1. Harbor's auth_mode is ldap_auth and **[basic LDAP configure parameters](https://github.com/vmware/harbor/blob/master/docs/installation_guide.md#optional-parameters)** are configured.
1. Memberof overlay
This feature requires the LDAP/AD server enabled the feature **memberof overlay**.
@ -17,18 +17,23 @@ This guide provides instructions to manage roles by LDAP/AD group. You can impor
Besides **[basic LDAP configure parameters](https://github.com/vmware/harbor/blob/master/docs/installation_guide.md#optional-parameters)** , LDAP group related configure parameters should be configured, they can be configured before or after installation
1. Configure parameters in harbor.cfg before installation
1. Configure LDAP parameters via API, refer to **[Config Harbor user settings by command line](configure_user_settings.md)**
For example:
```
curl -X PUT -u "<username>:<password>" -H "Content-Type: application/json" -ki https://harbor.sample.domain/api/configurations -d'{"ldap_group_basedn":"ou=groups,dc=example,dc=com"}'
```
The following parameters are related to LDAP group configuration.
* ldap_group_basedn -- The base DN from which to lookup a group in LDAP/AD, for example: ou=groups,dc=example,dc=com
* ldap_group_filter -- The filter to search LDAP/AD group, for example: objectclass=groupOfNames
* ldap_group_gid -- The attribute used to name an LDAP/AD group, for example: cn
* ldap_group_scope -- The scope to search for LDAP/AD groups. 0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE
2. Or Change configure parameter in web console after installation. Go to "Administration" -> "Configuration" -> "Authentication" and change following settings.
- LDAP Group Base DN -- ldap_group_basedn in harbor.cfg
- LDAP Group Filter -- ldap_group_filter in harbor.cfg
- LDAP Group GID -- ldap_group_gid in harbor.cfg
- LDAP Group Scope -- ldap_group_scope in harbor.cfg
2. Or change configure parameter in web console after installation. Go to "Administration" -> "Configuration" -> "Authentication" and change following settings.
- LDAP Group Base DN -- ldap_group_basedn in the Harbor user settings
- LDAP Group Filter -- ldap_group_filter in the Harbor user settings
- LDAP Group GID -- ldap_group_gid in the Harbor user settings
- LDAP Group Scope -- ldap_group_scope in the Harbor user settings
- LDAP Groups With Admin Privilege -- Specify an LDAP/AD group DN, all LDAPA/AD users in this group have harbor admin privileges.
![Screenshot of LDAP group config](img/group/ldap_group_config.png)
@ -49,4 +54,4 @@ If a user is in the LDAP groups with admin privilege (ldap_group_admin_dn), the
## User privileges and group privileges
If a user has both user-level role and group-level role, only the user level role privileges will be considered.
If a user has both user-level role and group-level role, these privileges are merged together.

View File

@ -1,6 +1,6 @@
# Harbor upgrade and migration guide
This guide only covers upgrade and mgiration to version >= v1.8.0
This guide only covers upgrade and migration to version >= v1.8.0
When upgrading your existing Harbor instance to a newer version, you may need to migrate the data in your database and the settings in `harbor.cfg`.
Since the migration may alter the database schema and the settings of `harbor.cfg`, you should **always** back up your data before any migration.
@ -34,7 +34,7 @@ you follow the steps below.
```
mv harbor /my_backup_dir/harbor
```
Back up database (by default in diretory `/data/database`)
Back up database (by default in directory `/data/database`)
```
cp -r /data/database /my_backup_dir/
```
@ -52,7 +52,7 @@ you follow the steps below.
in that path will be updated with the values from ${harbor_cfg}
```
docker run -it --rm -v ${harbor_cfg}:/harbor-migration/harbor-cfg/harbor.cfg -v ${harbor_yml}:/harbor-migration/harbor-cfg-out/harbor.yml goharbor/harbor-migrator:[tag] --cfg up
docker run -it --rm -v ${harbor_cfg}:/harbor-migration/harbor-cfg/harbor.yml -v ${harbor_yml}:/harbor-migration/harbor-cfg-out/harbor.yml goharbor/harbor-migrator:[tag] --cfg up
```
**NOTE:** The schema upgrade and data migration of Database is performed by core when Harbor starts, if the migration fails,
please check the log of core to debug.

View File

@ -2,7 +2,7 @@ swagger: '2.0'
info:
title: Harbor API
description: These APIs provide services for manipulating Harbor project.
version: 1.7.0
version: 1.9.0
host: localhost
schemes:
- http
@ -122,8 +122,6 @@ paths:
responses:
'200':
description: Project name exists.
'401':
description: User need to log in first.
'404':
description: Project name does not exist.
'500':
@ -311,6 +309,34 @@ paths:
description: User need to log in first.
'500':
description: Unexpected internal errors.
'/projects/{project_id}/summary':
get:
summary: Get summary of the project.
description: Get summary of the project.
parameters:
- name: project_id
in: path
type: integer
format: int64
required: true
description: Relevant project ID
tags:
- Products
responses:
'200':
description: Get summary of the project successfully.
schema:
$ref: '#/definitions/ProjectSummary'
'400':
description: Illegal format of provided ID value.
'401':
description: User need to log in first.
'403':
description: User does not have permission to get summary of the project.
'404':
description: Project ID does not exist.
'500':
description: Unexpected internal errors.
'/projects/{project_id}/metadatas':
get:
summary: Get project metadata.
@ -516,7 +542,7 @@ paths:
'403':
description: User in session does not have permission to the project.
'409':
description: An LDAP user group with same DN already exist.
description: A user group with same group name already exist or an LDAP user group with same DN already exist.
'500':
description: Unexpected internal errors.
'/projects/{project_id}/members/{mid}':
@ -1235,11 +1261,16 @@ paths:
type: string
required: true
description: Relevant repository name.
- name: label_ids
- name: label_id
in: query
type: string
required: false
description: A list of comma separated label IDs.
description: A label ID.
- name: detail
in: query
type: boolean
required: false
description: Bool value indicating whether return detailed information of the tag, such as vulnerability scan info, if set to false, only tag name is returned.
tags:
- Products
responses:
@ -1277,6 +1308,8 @@ paths:
description: Invalid image values provided.
'401':
description: User has no permission to the source project or destination project.
'403':
description: Forbiden as quota exceeded.
'404':
description: Project or repository not found.
'409':
@ -2352,10 +2385,10 @@ paths:
$ref: '#/definitions/Namespace'
'401':
description: User need to login first.
'404':
description: No registry found.
'403':
description: User has no privilege for the operation.
'404':
description: No registry found.
'500':
description: Unexpected internal errors.
/internal/syncregistry:
@ -2376,6 +2409,20 @@ paths:
$ref: '#/responses/UnsupportedMediaType'
'500':
description: Unexpected internal errors.
/internal/syncquota:
post:
summary: Sync quota from registry/chart to DB.
description: |
This endpoint is for syncing quota usage of registry/chart with database.
tags:
- Products
responses:
'200':
description: Sync repositories successfully.
'401':
description: User need to log in first.
'403':
description: User does not have permission of system admin role.
/systeminfo:
get:
summary: Get general system info
@ -2575,7 +2622,7 @@ paths:
'403':
description: User in session does not have permission to the user group.
'409':
description: An LDAP user group with same DN already exist.
description: A user group with same group name already exist, or an LDAP user group with same DN already exist.
'500':
description: Unexpected internal errors.
'/usergroups/{group_id}':
@ -3031,7 +3078,9 @@ paths:
description: The chart name
responses:
'200':
$ref: '#/definitions/ChartVersions'
description: Retrieved all versions of the specified chart
schema:
$ref: '#/definitions/ChartVersions'
'401':
$ref: '#/definitions/UnauthorizedChartAPIError'
'403':
@ -3091,7 +3140,9 @@ paths:
description: The chart version
responses:
'200':
$ref: '#/definitions/ChartVersionDetails'
description: Successfully retrieved the chart version
schema:
$ref: '#/definitions/ChartVersionDetails'
'401':
$ref: '#/definitions/UnauthorizedChartAPIError'
'403':
@ -3474,6 +3525,441 @@ paths:
description: The robot account is not found.
'500':
description: Unexpected internal errors.
'/system/oidc/ping':
post:
summary: Test the OIDC endpoint.
description: Test the OIDC endpoint, the setting of the endpoint is provided in the request. This API can only
be called by system admin.
tags:
- Products
- System
parameters:
- name: endpoint
in: body
description: Request body for OIDC endpoint to be tested.
required: true
schema:
type: object
properties:
url:
type: string
description: The URL of OIDC endpoint to be tested.
verify_cert:
type: boolean
description: Whether the certificate should be verified
responses:
'200':
description: Ping succeeded. The OIDC endpoint is valid.
'400':
description: The ping failed
'401':
description: User need to log in first.
'403':
description: User does not have permission to call this API
'/system/CVEWhitelist':
get:
summary: Get the system level whitelist of CVE.
description: Get the system level whitelist of CVE. This API can be called by all authenticated users.
tags:
- Products
- System
responses:
'200':
description: Successfully retrieved the CVE whitelist.
schema:
$ref: "#/definitions/CVEWhitelist"
'401':
description: User is not authenticated.
'500':
description: Unexpected internal errors.
put:
summary: Update the system level whitelist of CVE.
description: This API overwrites the system level whitelist of CVE with the list in request body. Only system Admin
has permission to call this API.
tags:
- Products
- System
parameters:
- in: body
name: whitelist
description: The whitelist with new content
schema:
$ref: "#/definitions/CVEWhitelist"
responses:
'200':
description: Successfully updated the CVE whitelist.
'401':
description: User is not authenticated.
'403':
description: User does not have permission to call this API.
'500':
description: Unexpected internal errors.
'/quotas':
get:
summary: List quotas
description: List quotas
tags:
- quota
parameters:
- name: reference
in: query
description: The reference type of quota.
required: false
type: string
- name: sort
in: query
type: string
required: false
description: |
Sort method, valid values include:
'hard.resource_name', '-hard.resource_name', 'used.resource_name', '-used.resource_name'.
Here '-' stands for descending order, resource_name should be the real resource name of the quota.
- name: page
in: query
type: integer
format: int32
required: false
description: 'The page nubmer, default is 1.'
- name: page_size
in: query
type: integer
format: int32
required: false
description: 'The size of per page, default is 10, maximum is 100.'
responses:
'200':
description: Successfully retrieved the quotas.
schema:
type: array
items:
$ref: '#/definitions/Quota'
headers:
X-Total-Count:
description: The total count of access logs
type: integer
Link:
description: Link refers to the previous page and next page
type: string
'401':
description: User is not authenticated.
'403':
description: User does not have permission to call this API.
'500':
description: Unexpected internal errors.
'/quotas/{id}':
get:
summary: Get the specified quota
description: Get the specified quota
tags:
- quota
parameters:
- name: id
in: path
type: integer
required: true
description: Quota ID
responses:
'200':
description: Successfully retrieved the quota.
schema:
$ref: '#/definitions/Quota'
'401':
description: User need to log in first.
'403':
description: User does not have permission to call this API
'404':
description: Quota does not exist.
'500':
description: Unexpected internal errors.
put:
summary: Update the specified quota
description: Update hard limits of the specified quota
tags:
- quota
parameters:
- name: id
in: path
type: integer
required: true
description: Quota ID
- name: hard
in: body
required: true
description: The new hard limits for the quota
schema:
$ref: '#/definitions/QuotaUpdateReq'
responses:
'200':
description: Updated quota hard limits successfully.
'400':
description: Illegal format of quota update request.
'401':
description: User need to log in first.
'403':
description: User does not have permission to the quota.
'404':
description: Quota ID does not exist.
'500':
description: Unexpected internal errors.
'/projects/{project_id}/webhook/policies':
get:
summary: List project webhook policies.
description: |
This endpoint returns webhook policies of a project.
parameters:
- name: project_id
in: path
type: integer
format: int64
required: true
description: Relevant project ID.
tags:
- Products
responses:
'200':
description: List project webhook policies successfully.
schema:
type: array
items:
$ref: '#/definitions/WebhookPolicy'
'400':
description: Illegal format of provided ID value.
'401':
description: User need to log in first.
'403':
description: User have no permission to list webhook policies of the project.
'500':
description: Unexpected internal errors.
post:
summary: Create project webhook policy.
description: |
This endpoint create a webhook policy if the project does not have one.
parameters:
- name: project_id
in: path
type: integer
format: int64
required: true
description: Relevant project ID
- name: policy
in: body
description: Properties "targets" and "event_types" needed.
required: true
schema:
$ref: '#/definitions/WebhookPolicy'
tags:
- Products
responses:
'201':
description: Project webhook policy create successfully.
'400':
description: Illegal format of provided ID value.
'401':
description: User need to log in first.
'403':
description: User have no permission to create webhook policy of the project.
'500':
description: Unexpected internal errors.
'/projects/{project_id}/webhook/policies/{policy_id}':
get:
summary: Get project webhook policy
description: |
This endpoint returns specified webhook policy of a project.
parameters:
- name: project_id
in: path
description: Relevant project ID.
required: true
type: integer
format: int64
- name: policy_id
in: path
description: The id of webhook policy.
required: true
type: integer
format: int64
tags:
- Products
responses:
'200':
description: Get webhook policy successfully.
schema:
$ref: '#/definitions/WebhookPolicy'
'400':
description: Illegal format of provided ID value.
'401':
description: User need to log in first.
'403':
description: User have no permission to get webhook policy of the project.
'404':
description: Webhook policy ID does not exist.
'500':
description: Internal server errors.
put:
summary: Update webhook policy of a project.
description: |
This endpoint is aimed to update the webhook policy of a project.
parameters:
- name: project_id
in: path
description: Relevant project ID.
required: true
type: integer
format: int64
- name: policy_id
in: path
description: The id of webhook policy.
required: true
type: integer
format: int64
- name: policy
in: body
description: All properties needed except "id", "project_id", "creation_time", "update_time".
required: true
schema:
$ref: '#/definitions/WebhookPolicy'
tags:
- Products
responses:
'200':
description: Update webhook policy successfully.
'400':
description: Illegal format of provided ID value.
'401':
description: User need to log in first.
'403':
description: User have no permission to update webhook policy of the project.
'404':
description: Webhook policy ID does not exist.
'500':
description: Internal server errors.
delete:
summary: Delete webhook policy of a project
description: |
This endpoint is aimed to delete webhookpolicy of a project.
parameters:
- name: project_id
in: path
description: Relevant project ID.
required: true
type: integer
format: int64
- name: policy_id
in: path
description: The id of webhook policy.
required: true
type: integer
format: int64
tags:
- Products
responses:
'200':
description: Delete webhook policy successfully.
'400':
description: Illegal format of provided ID value.
'401':
description: User need to log in first.
'403':
description: User have no permission to delete webhook policy of the project.
'404':
description: Webhook policy ID does not exist.
'500':
description: Internal server errors.
'/projects/{project_id}/webhook/policies/test':
post:
summary: Test project webhook connection
description: |
This endpoint tests webhook connection of a project.
parameters:
- name: project_id
in: path
description: Relevant project ID.
required: true
type: integer
format: int64
- name: policy
in: body
description: Only property "targets" needed.
required: true
schema:
$ref: '#/definitions/WebhookPolicy'
tags:
- Products
responses:
'200':
description: Test webhook connection successfully.
'400':
description: Illegal format of provided ID value.
'401':
description: User need to log in first.
'403':
description: User have no permission to get webhook policy of the project.
'500':
description: Internal server errors.
'/projects/{project_id}/webhook/lasttrigger':
get:
summary: Get project webhook policy last trigger info
description: |
This endpoint returns last trigger information of project webhook policy.
parameters:
- name: project_id
in: path
description: Relevant project ID.
required: true
type: integer
format: int64
tags:
- Products
responses:
'200':
description: Test webhook connection successfully.
schema:
type: array
items:
$ref: '#/definitions/WebhookLastTrigger'
'400':
description: Illegal format of provided ID value.
'401':
description: User need to log in first.
'403':
description: User have no permission to get webhook policy of the project.
'500':
description: Internal server errors.
'/projects/{project_id}/webhook/jobs':
get:
summary: List project webhook jobs
description: |
This endpoint returns webhook jobs of a project.
parameters:
- name: project_id
in: path
type: integer
format: int64
required: true
description: Relevant project ID.
- name: policy_id
in: query
type: integer
format: int64
required: true
description: The policy ID.
tags:
- Products
responses:
'200':
description: List project webhook jobs successfully.
schema:
type: array
items:
$ref: '#/definitions/WebhookJob'
'400':
description: Illegal format of provided ID value.
'401':
description: User need to log in first.
'403':
description: User have no permission to list webhook jobs of the project.
'500':
description: Unexpected internal errors.
responses:
OK:
description: 'Success'
@ -3556,6 +4042,17 @@ definitions:
metadata:
description: The metadata of the project.
$ref: '#/definitions/ProjectMetadata'
cve_whitelist:
description: The CVE whitelist of the project.
$ref: '#/definitions/CVEWhitelist'
count_limit:
type: integer
format: int64
description: The count quota of the project.
storage_limit:
type: integer
format: int64
description: The storage quota of the project.
Project:
type: object
properties:
@ -3597,6 +4094,9 @@ definitions:
metadata:
description: The metadata of the project.
$ref: '#/definitions/ProjectMetadata'
cve_whitelist:
description: The CVE whitelist of this project.
$ref: '#/definitions/CVEWhitelist'
ProjectMetadata:
type: object
properties:
@ -3605,16 +4105,50 @@ definitions:
description: 'The public status of the project. The valid values are "true", "false".'
enable_content_trust:
type: string
description: 'Whether content trust is enabled or not. If it is enabled, user cann''t pull unsigned images from this project. The valid values are "true", "false".'
description: 'Whether content trust is enabled or not. If it is enabled, user can''t pull unsigned images from this project. The valid values are "true", "false".'
prevent_vul:
type: string
description: 'Whether prevent the vulnerable images from running. The valid values are "true", "false".'
severity:
type: string
description: 'If the vulnerability is high than severity defined here, the images cann''t be pulled. The valid values are "negligible", "low", "medium", "high", "critical".'
description: 'If the vulnerability is high than severity defined here, the images can''t be pulled. The valid values are "negligible", "low", "medium", "high", "critical".'
auto_scan:
type: string
description: 'Whether scan images automatically when pushing. The valid values are "true", "false".'
reuse_sys_cve_whitelist:
type: string
description: 'Whether this project reuse the system level CVE whitelist as the whitelist of its own. The valid values are "true", "false".
If it is set to "true" the actual whitelist associate with this project, if any, will be ignored.'
ProjectSummary:
type: object
properties:
repo_count:
type: integer
description: The number of the repositories under this project.
chart_count:
type: integer
description: The total number of charts under this project.
project_admin_count:
type: integer
description: The total number of project admin members.
master_count:
type: integer
description: The total number of master members.
developer_count:
type: integer
description: The total number of developer members.
guest_count:
type: integer
description: The total number of guest members.
quota:
type: object
properties:
hard:
$ref: "#/definitions/ResourceList"
description: The hard limits of the quota
used:
$ref: "#/definitions/ResourceList"
description: The used status of the quota
Manifest:
type: object
properties:
@ -4270,6 +4804,9 @@ definitions:
auth_mode:
type: string
description: 'The auth mode of current system, such as "db_auth", "ldap_auth"'
count_per_project:
type: string
description: The default count quota for the new created projects.
email_from:
type: string
description: The sender name for Email notification.
@ -4330,12 +4867,18 @@ definitions:
project_creation_restriction:
type: string
description: This attribute restricts what users have the permission to create project. It can be "everyone" or "adminonly".
quota_per_project_enable:
type: boolean
description: This attribute indicates whether quota per project enabled in harbor
read_only:
type: boolean
description: '''docker push'' is prohibited by Harbor if you set it to true. '
self_registration:
type: boolean
description: 'Whether the Harbor instance supports self-registration. If it''s set to false, admin need to add user to the instance.'
storage_per_project:
type: string
description: The default storage quota for the new created projects.
token_expiration:
type: integer
description: 'The expiration time of the token for internal Registry, in minutes.'
@ -4361,6 +4904,9 @@ definitions:
auth_mode:
$ref: '#/definitions/StringConfigItem'
description: 'The auth mode of current system, such as "db_auth", "ldap_auth"'
count_per_project:
$ref: '#/definitions/IntegerConfigItem'
description: The default count quota for the new created projects.
email_from:
$ref: '#/definitions/StringConfigItem'
description: The sender name for Email notification.
@ -4421,12 +4967,18 @@ definitions:
project_creation_restriction:
$ref: '#/definitions/StringConfigItem'
description: This attribute restricts what users have the permission to create project. It can be "everyone" or "adminonly".
quota_per_project_enable:
$ref: '#/definitions/BoolConfigItem'
description: This attribute indicates whether quota per project enabled in harbor
read_only:
$ref: '#/definitions/BoolConfigItem'
description: '''docker push'' is prohibited by Harbor if you set it to true. '
self_registration:
$ref: '#/definitions/BoolConfigItem'
description: 'Whether the Harbor instance supports self-registration. If it''s set to false, admin need to add user to the instance.'
storage_per_project:
$ref: '#/definitions/IntegerConfigItem'
description: The default storage quota for the new created projects.
token_expiration:
$ref: '#/definitions/IntegerConfigItem'
description: 'The expiration time of the token for internal Registry, in minutes.'
@ -4502,7 +5054,7 @@ definitions:
description: the role id
entity_id:
type: integer
description: 'the id of entity, if the member is an user, it is user_id in user table. if the member is an user group, it is the user group''s ID in user_group table.'
description: 'the id of entity, if the member is a user, it is user_id in user table. if the member is a user group, it is the user group''s ID in user_group table.'
entity_type:
type: string
description: 'the entity''s type, u for user entity, g for group entity.'
@ -4542,7 +5094,7 @@ definitions:
description: The name of the user group
group_type:
type: integer
description: 'The group type, 1 for LDAP group.'
description: 'The group type, 1 for LDAP group, 2 for HTTP group.'
ldap_group_dn:
type: string
description: The DN of the LDAP group if group type is 1 (LDAP group).
@ -4596,7 +5148,7 @@ definitions:
allOf:
- $ref: '#/definitions/ChartAPIError'
ForbiddenChartAPIError:
description: Operation is forbidden
description: Operation is forbidden or quota exceeded
type: object
allOf:
- $ref: '#/definitions/ChartAPIError'
@ -4829,7 +5381,9 @@ definitions:
properties:
type:
type: string
description: The schedule type. The valid values are hourly, daily weekly, custom and None. 'None' means to cancel the schedule.
description: |
The schedule type. The valid values are 'Hourly', 'Daily', 'Weekly', 'Custom', 'Manually' and 'None'.
'Manually' means to trigger it right away and 'None' means to cancel the schedule.
cron:
type: string
description: A cron expression, a time-based job scheduler.
@ -5066,3 +5620,161 @@ definitions:
metadata:
type: object
description: The metadata of namespace
CVEWhitelist:
type: object
description: The CVE Whitelist for system or project
properties:
id:
type: integer
description: ID of the whitelist
project_id:
type: integer
description: ID of the project which the whitelist belongs to. For system level whitelist this attribute is zero.
expires_at:
type: integer
description: the time for expiration of the whitelist, in the form of seconds since epoch. This is an optional attribute, if it's not set the CVE whitelist does not expire.
items:
type: array
items:
$ref: "#/definitions/CVEWhitelistItem"
CVEWhitelistItem:
type: object
description: The item in CVE whitelist
properties:
cve_id:
type: string
description: The ID of the CVE, such as "CVE-2019-10164"
ResourceList:
type: object
additionalProperties:
type: integer
QuotaUpdateReq:
type: object
properties:
hard:
$ref: "#/definitions/ResourceList"
description: The new hard limits for the quota
QuotaRefObject:
type: object
additionalProperties: {}
Quota:
type: object
description: The quota object
properties:
id:
type: integer
description: ID of the quota
ref:
$ref: "#/definitions/QuotaRefObject"
description: The reference object of the quota
hard:
$ref: "#/definitions/ResourceList"
description: The hard limits of the quota
used:
$ref: "#/definitions/ResourceList"
description: The used status of the quota
creation_time:
type: string
description: the creation time of the quota
update_time:
type: string
description: the update time of the quota
WebhookTargetObject:
type: object
description: The webhook policy target object.
properties:
type:
type: string
description: The webhook target notify type.
address:
type: string
description: The webhook target address.
auth_header:
type: string
description: The webhook auth header.
skip_cert_verify:
type: boolean
description: Whether or not to skip cert verify.
WebhookPolicy:
type: object
description: The webhook policy object
properties:
id:
type: integer
format: int64
description: The webhook policy ID.
name:
type: string
description: The name of webhook policy.
description:
type: string
description: The description of webhook policy.
project_id:
type: integer
description: The project ID of webhook policy.
targets:
type: array
items:
$ref: '#/definitions/WebhookTargetObject'
event_types:
type: array
items:
type: string
creator:
type: string
description: The creator of the webhook policy.
creation_time:
type: string
description: The create time of the webhook policy.
update_time:
type: string
description: The update time of the webhook policy.
enabled:
type: boolean
description: Whether the webhook policy is enabled or not.
WebhookLastTrigger:
type: object
description: The webhook policy and last trigger time group by event type.
properties:
event_type:
type: string
description: The webhook event type.
enabled:
type: boolean
description: Whether or not the webhook policy enabled.
creation_time:
type: string
description: The creation time of webhook policy.
last_trigger_time:
type: string
description: The last trigger time of webhook policy.
WebhookJob:
type: object
description: The webhook job.
properties:
id:
type: integer
format: int64
description: The webhook job ID.
policy_id:
type: integer
format: int64
description: The webhook policy ID.
event_type:
type: string
description: The webhook job event type.
notify_type:
type: string
description: The webhook job notify type.
status:
type: string
description: The webhook job status.
job_detail:
type: string
description: The webhook job notify detailed data.
creation_time:
type: string
description: The webhook job creation time.
update_time:
type: string
description: The webhook job update time.

View File

@ -36,10 +36,10 @@ version | set harbor version
#### EXAMPLE:
#### Build and run harbor from source code.
make install GOBUILDIMAGE=golang:1.11.2 COMPILETAG=compile_golangimage NOTARYFLAG=true
make install GOBUILDIMAGE=golang:1.12.5 COMPILETAG=compile_golangimage NOTARYFLAG=true
### Package offline installer
make package_offline GOBUILDIMAGE=golang:1.11.2 COMPILETAG=compile_golangimage NOTARYFLAG=true
make package_offline GOBUILDIMAGE=golang:1.12.5 COMPILETAG=compile_golangimage NOTARYFLAG=true
### Start harbor with notary
make -e NOTARYFLAG=true start

View File

@ -573,7 +573,7 @@ Before working, Harbor should be added into the repository list with `helm repo
With this mode Helm can be made aware of all the charts located in different projects and which are accessible by the currently authenticated user.
```
helm repo add --ca-file ca.crt --cert-file server.crt --key-file server.key --username=admin --password=Passw0rd myrepo https://xx.xx.xx.xx/chartrepo
helm repo add --ca-file ca.crt --username=admin --password=Passw0rd myrepo https://xx.xx.xx.xx/chartrepo
```
**NOTES:** Providing both ca file and cert files is caused by an issue from helm.
@ -581,7 +581,7 @@ helm repo add --ca-file ca.crt --cert-file server.crt --key-file server.key --us
With this mode, helm can only pull charts in the specified project.
```
helm repo add --ca-file ca.crt --cert-file server.crt --key-file server.key --username=admin --password=Passw0rd myrepo https://xx.xx.xx.xx/chartrepo/myproject
helm repo add --ca-file ca.crt --username=admin --password=Passw0rd myrepo https://xx.xx.xx.xx/chartrepo/myproject
```
#### Push charts to the repository server by CLI
@ -591,7 +591,7 @@ helm plugin install https://github.com/chartmuseum/helm-push
```
After a successful installation, run `push` command to upload your charts:
```
helm push --ca-file=ca.crt --key-file=server.key --cert-file=server.crt --username=admin --password=passw0rd chart_repo/hello-helm-0.1.0.tgz myrepo
helm push --ca-file=ca.crt --username=admin --password=passw0rd chart_repo/hello-helm-0.1.0.tgz myrepo
```
**NOTES:** `push` command does not support pushing a prov file of a signed chart yet.
@ -609,7 +609,7 @@ helm search hello
```
Everything is ready, install the chart to your kubernetes:
```
helm install --ca-file=ca.crt --key-file=server.key --cert-file=server.crt --username=admin --password=Passw0rd --version 0.1.10 repo248/chart_repo/hello-helm
helm install --ca-file=ca.crt --username=admin --password=Passw0rd --version 0.1.10 repo248/chart_repo/hello-helm
```
For other more helm commands like how to sign a chart, please refer to the [helm doc](https://docs.helm.sh/helm/#helm).

View File

@ -30,6 +30,11 @@ harbor_admin_password: Harbor12345
database:
# The password for the root user of Harbor DB. Change this before any production use.
password: root123
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
max_idle_conns: 50
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
# Note: the default number of connections is 100 for postgres.
max_open_conns: 100
# The default data volume
data_volume: /data
@ -54,16 +59,14 @@ clair:
# The interval of clair updaters, the unit is hour, set to 0 to disable the updaters.
updaters_interval: 12
# Config http proxy for Clair, e.g. http://my.proxy.com:3128
# Clair doesn't need to connect to harbor internal components via http proxy.
http_proxy:
https_proxy:
no_proxy: 127.0.0.1,localhost,core,registry
jobservice:
# Maximum number of job workers in job service
max_job_workers: 10
notification:
# Maximum retry count for webhook job
webhook_job_max_retry: 10
chart:
# Change the value of absolute_url to enabled can enable absolute url in chart
absolute_url: disabled
@ -72,17 +75,28 @@ chart:
log:
# options are debug, info, warning, error, fatal
level: info
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
rotate_count: 50
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
# are all valid.
rotate_size: 200M
# The directory on your host that store log
location: /var/log/harbor
# configs for logs in local storage
local:
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
rotate_count: 50
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
# are all valid.
rotate_size: 200M
# The directory on your host that store log
location: /var/log/harbor
# Uncomment following lines to enable external syslog endpoint.
# external_endpoint:
# # protocol used to transmit log to external endpoint, options is tcp or udp
# protocol: tcp
# # The host of external endpoint
# host: localhost
# # Port of external endpoint
# port: 5140
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
_version: 1.8.0
_version: 1.9.0
# Uncomment external_database if using external database.
# external_database:
@ -93,6 +107,8 @@ _version: 1.8.0
# username: harbor_db_username
# password: harbor_db_password
# ssl_mode: disable
# max_idle_conns: 2
# max_open_conns: 0
# clair:
# host: clair_db_host
# port: clair_db_port
@ -128,3 +144,20 @@ _version: 1.8.0
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
# uaa:
# ca_file: /path/to/ca
# Global proxy
# Config http proxy for components, e.g. http://my.proxy.com:3128
# Components doesn't need to connect to each others via http proxy.
# Remove component from `components` array if want disable proxy
# for it. If you want use proxy for replication, MUST enable proxy
# for core and jobservice, and set `http_proxy` and `https_proxy`.
# Add domain to the `no_proxy` field, when you want disable proxy
# for some special registry.
proxy:
http_proxy:
https_proxy:
no_proxy: 127.0.0.1,localhost,.local,.internal,log,db,redis,nginx,core,portal,postgresql,jobservice,registry,registryctl,clair
components:
- core
- jobservice
- clair

View File

@ -117,7 +117,7 @@ function check_docker {
function check_dockercompose {
if ! docker-compose --version &> /dev/null
then
error "Need to install docker-compose(1.18.0+) by yourself first and run this script again."
error "Need to install docker-compose(1.23.0+) by yourself first and run this script again."
exit 1
fi
@ -129,9 +129,9 @@ function check_dockercompose {
docker_compose_version_part2=${BASH_REMATCH[3]}
# the version of docker-compose does not meet the requirement
if [ "$docker_compose_version_part1" -lt 1 ] || ([ "$docker_compose_version_part1" -eq 1 ] && [ "$docker_compose_version_part2" -lt 18 ])
if [ "$docker_compose_version_part1" -lt 1 ] || ([ "$docker_compose_version_part1" -eq 1 ] && [ "$docker_compose_version_part2" -lt 23 ])
then
error "Need to upgrade docker-compose package to 1.18.0+."
error "Need to upgrade docker-compose package to 1.23.0+."
exit 1
else
note "docker-compose version: $docker_compose_version"

View File

@ -56,9 +56,9 @@ $$;
CREATE TRIGGER harbor_user_update_time_at_modtime BEFORE UPDATE ON harbor_user FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
insert into harbor_user (username, email, password, realname, comment, deleted, sysadmin_flag, creation_time, update_time) values
('admin', 'admin@example.com', '', 'system admin', 'admin user',false, true, NOW(), NOW()),
('anonymous', 'anonymous@example.com', '', 'anonymous user', 'anonymous user', true, false, NOW(), NOW());
insert into harbor_user (username, password, realname, comment, deleted, sysadmin_flag, creation_time, update_time) values
('admin', '', 'system admin', 'admin user',false, true, NOW(), NOW()),
('anonymous', '', 'anonymous user', 'anonymous user', true, false, NOW(), NOW());
create table project (
project_id SERIAL PRIMARY KEY NOT NULL,

View File

@ -0,0 +1,30 @@
/*
Rename the duplicate names before adding "UNIQUE" constraint
*/
DO $$
BEGIN
WHILE EXISTS (SELECT count(*) FROM user_group GROUP BY group_name HAVING count(*) > 1) LOOP
UPDATE user_group AS r
SET group_name = (
/*
truncate the name if it is too long after appending the sequence number
*/
CASE WHEN (length(group_name)+length(v.seq::text)+1) > 256
THEN
substring(group_name from 1 for (255-length(v.seq::text))) || '_' || v.seq
ELSE
group_name || '_' || v.seq
END
)
FROM (SELECT id, row_number() OVER (PARTITION BY group_name ORDER BY id) AS seq FROM user_group) AS v
WHERE r.id = v.id AND v.seq > 1;
END LOOP;
END $$;
ALTER TABLE user_group ADD CONSTRAINT unique_group_name UNIQUE (group_name);
/*
Fix issue https://github.com/goharbor/harbor/issues/8526, delete the none scan_all schedule.
*/
UPDATE admin_job SET deleted='true' WHERE cron_str='{"type":"none"}';

View File

@ -0,0 +1,188 @@
/* add table for CVE whitelist */
CREATE TABLE cve_whitelist
(
id SERIAL PRIMARY KEY NOT NULL,
project_id int,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
expires_at bigint,
items text NOT NULL,
UNIQUE (project_id)
);
CREATE TABLE blob
(
id SERIAL PRIMARY KEY NOT NULL,
/*
digest of config, layer, manifest
*/
digest varchar(255) NOT NULL,
content_type varchar(1024) NOT NULL,
size bigint NOT NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
UNIQUE (digest)
);
/* add the table for project and blob */
CREATE TABLE project_blob (
id SERIAL PRIMARY KEY NOT NULL,
project_id int NOT NULL,
blob_id int NOT NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
CONSTRAINT unique_project_blob UNIQUE (project_id, blob_id)
);
CREATE TABLE artifact
(
id SERIAL PRIMARY KEY NOT NULL,
project_id int NOT NULL,
repo varchar(255) NOT NULL,
tag varchar(255) NOT NULL,
/*
digest of manifest
*/
digest varchar(255) NOT NULL,
/*
kind of artifact, image, chart, etc..
*/
kind varchar(255) NOT NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
pull_time timestamp,
push_time timestamp,
CONSTRAINT unique_artifact UNIQUE (project_id, repo, tag)
);
/* add the table for relation of artifact and blob */
CREATE TABLE artifact_blob
(
id SERIAL PRIMARY KEY NOT NULL,
digest_af varchar(255) NOT NULL,
digest_blob varchar(255) NOT NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
CONSTRAINT unique_artifact_blob UNIQUE (digest_af, digest_blob)
);
/* add quota table */
CREATE TABLE quota
(
id SERIAL PRIMARY KEY NOT NULL,
reference VARCHAR(255) NOT NULL,
reference_id VARCHAR(255) NOT NULL,
hard JSONB NOT NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
UNIQUE (reference, reference_id)
);
/* add quota usage table */
CREATE TABLE quota_usage
(
id SERIAL PRIMARY KEY NOT NULL,
reference VARCHAR(255) NOT NULL,
reference_id VARCHAR(255) NOT NULL,
used JSONB NOT NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
UNIQUE (reference, reference_id)
);
/* only set quota and usage for 'library', and let the sync quota handling others. */
INSERT INTO quota (reference, reference_id, hard, creation_time, update_time)
SELECT 'project',
CAST(project_id AS VARCHAR),
'{"count": -1, "storage": -1}',
NOW(),
NOW()
FROM project
WHERE name = 'library' and deleted = 'f';
INSERT INTO quota_usage (id, reference, reference_id, used, creation_time, update_time)
SELECT id,
reference,
reference_id,
'{"count": 0, "storage": 0}',
creation_time,
update_time
FROM quota;
create table retention_policy
(
id serial PRIMARY KEY NOT NULL,
scope_level varchar(20),
scope_reference integer,
trigger_kind varchar(20),
data text,
create_time time,
update_time time
);
create table retention_execution
(
id serial PRIMARY KEY NOT NULL,
policy_id integer,
dry_run boolean,
trigger varchar(20),
start_time timestamp
);
create table retention_task
(
id SERIAL NOT NULL,
execution_id integer,
repository varchar(255),
job_id varchar(64),
status varchar(32),
status_code integer,
status_revision integer,
start_time timestamp default CURRENT_TIMESTAMP,
end_time timestamp default CURRENT_TIMESTAMP,
total integer,
retained integer,
PRIMARY KEY (id)
);
create table schedule
(
id SERIAL NOT NULL,
job_id varchar(64),
status varchar(64),
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
PRIMARY KEY (id)
);
/*add notification policy table*/
create table notification_policy (
id SERIAL NOT NULL,
name varchar(256),
project_id int NOT NULL,
enabled boolean NOT NULL DEFAULT true,
description text,
targets text,
event_types text,
creator varchar(256),
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
PRIMARY KEY (id),
CONSTRAINT unique_project_id UNIQUE (project_id)
);
/*add notification job table*/
CREATE TABLE notification_job (
id SERIAL NOT NULL,
policy_id int NOT NULL,
status varchar(32),
/* event_type is the type of trigger event, eg. pushImage, pullImage, uploadChart... */
event_type varchar(256),
/* notify_type is the type to notify event to user, eg. HTTP, Email... */
notify_type varchar(256),
job_detail text,
job_uuid varchar(64),
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
PRIMARY KEY (id)
);
ALTER TABLE replication_task ADD COLUMN status_revision int DEFAULT 0;
DELETE FROM project_metadata WHERE deleted = TRUE;
ALTER TABLE project_metadata DROP COLUMN deleted;

View File

@ -4,7 +4,7 @@ set +e
usage(){
echo "Usage: builder <golang image:version> <code path> <code release tag> <main.go path> <binary name>"
echo "e.g: builder golang:1.11.2 github.com/helm/chartmuseum v0.8.1 cmd/chartmuseum chartm"
echo "e.g: builder golang:1.11.2 github.com/helm/chartmuseum v0.9.0 cmd/chartmuseum chartm"
exit 1
}
@ -13,7 +13,7 @@ if [ $# != 5 ]; then
fi
GOLANG_IMAGE="$1"
CODE_PATH="$2"
GIT_PATH="$2"
CODE_VERSION="$3"
MAIN_GO_PATH="$4"
BIN_NAME="$5"
@ -27,7 +27,7 @@ mkdir -p binary
rm -rf binary/$BIN_NAME || true
cp compile.sh binary/
docker run -it -v $cur/binary:/go/bin --name golang_code_builder $GOLANG_IMAGE /bin/bash /go/bin/compile.sh $CODE_PATH $CODE_VERSION $MAIN_GO_PATH $BIN_NAME
docker run -it --rm -v $cur/binary:/go/bin --name golang_code_builder $GOLANG_IMAGE /bin/bash /go/bin/compile.sh $GIT_PATH $CODE_VERSION $MAIN_GO_PATH $BIN_NAME
#Clear
docker rm -f golang_code_builder

View File

@ -11,24 +11,21 @@ if [ $# != 4 ]; then
usage
fi
CODE_PATH="$1"
GIT_PATH="$1"
VERSION="$2"
MAIN_GO_PATH="$3"
BIN_NAME="$4"
#Get the source code of chartmusem
go get $CODE_PATH
#Get the source code
git clone $GIT_PATH src_code
ls
SRC_PATH=$(pwd)/src_code
set -e
#Checkout the released tag branch
cd /go/src/$CODE_PATH
cd $SRC_PATH
git checkout tags/$VERSION -b $VERSION
#Install the go dep tool to restore the package dependencies
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
dep ensure
#Compile
cd /go/src/$CODE_PATH/$MAIN_GO_PATH && go build -a -o $BIN_NAME
cd $SRC_PATH/$MAIN_GO_PATH && go build -a -o $BIN_NAME
mv $BIN_NAME /go/bin/

View File

@ -1,16 +1,16 @@
FROM photon:2.0
RUN tdnf install sudo -y >> /dev/null\
RUN tdnf install sudo tzdata -y >> /dev/null \
&& tdnf clean all \
&& groupadd -r -g 10000 harbor && useradd --no-log-init -r -g 10000 -u 10000 harbor \
&& mkdir /harbor/
HEALTHCHECK CMD curl --fail -s http://127.0.0.1:8080/api/ping || exit 1
COPY ./make/photon/core/harbor_core ./make/photon/core/start.sh ./UIVERSION /harbor/
COPY ./make/photon/core/harbor_core ./UIVERSION /harbor/
COPY ./src/core/views /harbor/views
COPY ./make/migrations /harbor/migrations
RUN chmod u+x /harbor/start.sh /harbor/harbor_core
RUN chmod u+x /harbor/harbor_core
WORKDIR /harbor/
ENTRYPOINT ["/harbor/start.sh"]
USER harbor
ENTRYPOINT ["/harbor/harbor_core"]

View File

@ -1,3 +0,0 @@
#!/bin/sh
sudo -E -u \#10000 "/harbor/harbor_core"

View File

@ -18,15 +18,16 @@ RUN tdnf erase -y toybox && tdnf install -y util-linux net-tools
VOLUME /var/lib/postgresql/data
ADD ./make/photon/db/docker-entrypoint.sh /entrypoint.sh
ADD ./make/photon/db/docker-healthcheck.sh /docker-healthcheck.sh
RUN chmod u+x /entrypoint.sh /docker-healthcheck.sh
ENTRYPOINT ["/entrypoint.sh"]
HEALTHCHECK CMD ["/docker-healthcheck.sh"]
COPY ./make/photon/db/docker-entrypoint.sh /docker-entrypoint.sh
COPY ./make/photon/db/docker-healthcheck.sh /docker-healthcheck.sh
COPY ./make/photon/db/initial-notaryserver.sql /docker-entrypoint-initdb.d/
COPY ./make/photon/db/initial-notarysigner.sql /docker-entrypoint-initdb.d/
COPY ./make/photon/db/initial-registry.sql /docker-entrypoint-initdb.d/
RUN chown -R postgres:postgres /docker-entrypoint.sh /docker-healthcheck.sh /docker-entrypoint-initdb.d \
&& chmod u+x /docker-entrypoint.sh /docker-healthcheck.sh
ENTRYPOINT ["/docker-entrypoint.sh"]
HEALTHCHECK CMD ["/docker-healthcheck.sh"]
EXPOSE 5432
CMD ["postgres"]
USER postgres

View File

@ -23,95 +23,88 @@ file_env() {
unset "$fileVar"
}
if [ "${1:0:1}" = '-' ]; then
set -- postgres "$@"
fi
if [ "$1" = 'postgres' ]; then
chown -R postgres:postgres $PGDATA
# look specifically for PG_VERSION, as it is expected in the DB dir
if [ ! -s "$PGDATA/PG_VERSION" ]; then
file_env 'POSTGRES_INITDB_ARGS'
if [ "$POSTGRES_INITDB_XLOGDIR" ]; then
export POSTGRES_INITDB_ARGS="$POSTGRES_INITDB_ARGS --xlogdir $POSTGRES_INITDB_XLOGDIR"
fi
su - $1 -c "initdb -D $PGDATA -U postgres -E UTF-8 --lc-collate=en_US.UTF-8 --lc-ctype=en_US.UTF-8 $POSTGRES_INITDB_ARGS"
# check password first so we can output the warning before postgres
# messes it up
file_env 'POSTGRES_PASSWORD'
if [ "$POSTGRES_PASSWORD" ]; then
pass="PASSWORD '$POSTGRES_PASSWORD'"
authMethod=md5
else
# The - option suppresses leading tabs but *not* spaces. :)
cat >&2 <<-EOF
****************************************************
WARNING: No password has been set for the database.
This will allow anyone with access to the
Postgres port to access your database. In
Docker's default configuration, this is
effectively any other container on the same
system.
Use "-e POSTGRES_PASSWORD=password" to set
it in "docker run".
****************************************************
# look specifically for PG_VERSION, as it is expected in the DB dir
if [ ! -s "$PGDATA/PG_VERSION" ]; then
file_env 'POSTGRES_INITDB_ARGS'
if [ "$POSTGRES_INITDB_XLOGDIR" ]; then
export POSTGRES_INITDB_ARGS="$POSTGRES_INITDB_ARGS --xlogdir $POSTGRES_INITDB_XLOGDIR"
fi
initdb -D $PGDATA -U postgres -E UTF-8 --lc-collate=en_US.UTF-8 --lc-ctype=en_US.UTF-8 $POSTGRES_INITDB_ARGS
# check password first so we can output the warning before postgres
# messes it up
file_env 'POSTGRES_PASSWORD'
if [ "$POSTGRES_PASSWORD" ]; then
pass="PASSWORD '$POSTGRES_PASSWORD'"
authMethod=md5
else
# The - option suppresses leading tabs but *not* spaces. :)
cat >&2 <<-EOF
****************************************************
WARNING: No password has been set for the database.
This will allow anyone with access to the
Postgres port to access your database. In
Docker's default configuration, this is
effectively any other container on the same
system.
Use "-e POSTGRES_PASSWORD=password" to set
it in "docker run".
****************************************************
EOF
pass=
authMethod=trust
fi
pass=
authMethod=trust
fi
{
echo
echo "host all all all $authMethod"
} >> "$PGDATA/pg_hba.conf"
su postgres
echo `whoami`
# internal start of server in order to allow set-up using psql-client
# does not listen on external TCP/IP and waits until start finishes
su - $1 -c "pg_ctl -D \"$PGDATA\" -o \"-c listen_addresses='localhost'\" -w start"
{
echo
echo "host all all all $authMethod"
} >> "$PGDATA/pg_hba.conf"
echo `whoami`
# internal start of server in order to allow set-up using psql-client
# does not listen on external TCP/IP and waits until start finishes
pg_ctl -D "$PGDATA" -o "-c listen_addresses=''" -w start
file_env 'POSTGRES_USER' 'postgres'
file_env 'POSTGRES_DB' "$POSTGRES_USER"
file_env 'POSTGRES_USER' 'postgres'
file_env 'POSTGRES_DB' "$POSTGRES_USER"
psql=( psql -v ON_ERROR_STOP=1 )
psql=( psql -v ON_ERROR_STOP=1 )
if [ "$POSTGRES_DB" != 'postgres' ]; then
"${psql[@]}" --username postgres <<-EOSQL
CREATE DATABASE "$POSTGRES_DB" ;
EOSQL
echo
fi
if [ "$POSTGRES_USER" = 'postgres' ]; then
op='ALTER'
else
op='CREATE'
fi
if [ "$POSTGRES_DB" != 'postgres' ]; then
"${psql[@]}" --username postgres <<-EOSQL
$op USER "$POSTGRES_USER" WITH SUPERUSER $pass ;
CREATE DATABASE "$POSTGRES_DB" ;
EOSQL
echo
psql+=( --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" )
echo
for f in /docker-entrypoint-initdb.d/*; do
case "$f" in
*.sh) echo "$0: running $f"; . "$f" ;;
*.sql) echo "$0: running $f"; "${psql[@]}" -f "$f"; echo ;;
*.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${psql[@]}"; echo ;;
*) echo "$0: ignoring $f" ;;
esac
echo
done
PGUSER="${PGUSER:-postgres}" \
su - $1 -c "pg_ctl -D \"$PGDATA\" -m fast -w stop"
echo
echo 'PostgreSQL init process complete; ready for start up.'
echo
fi
if [ "$POSTGRES_USER" = 'postgres' ]; then
op='ALTER'
else
op='CREATE'
fi
"${psql[@]}" --username postgres <<-EOSQL
$op USER "$POSTGRES_USER" WITH SUPERUSER $pass ;
EOSQL
echo
psql+=( --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" )
echo
for f in /docker-entrypoint-initdb.d/*; do
case "$f" in
*.sh) echo "$0: running $f"; . "$f" ;;
*.sql) echo "$0: running $f"; "${psql[@]}" -f "$f"; echo ;;
*.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${psql[@]}"; echo ;;
*) echo "$0: ignoring $f" ;;
esac
echo
done
PGUSER="${PGUSER:-postgres}" \
pg_ctl -D "$PGDATA" -m fast -w stop
echo
echo 'PostgreSQL init process complete; ready for start up.'
echo
fi
exec su - $1 -c "$@ -D $PGDATA"
postgres -D $PGDATA

View File

@ -1,12 +1,19 @@
FROM photon:2.0
RUN mkdir /harbor/ \
&& tdnf install sudo -y >> /dev/null\
RUN tdnf install sudo tzdata -y >> /dev/null \
&& tdnf clean all \
&& groupadd -r -g 10000 harbor && useradd --no-log-init -r -g 10000 -u 10000 harbor
COPY ./make/photon/jobservice/start.sh ./make/photon/jobservice/harbor_jobservice /harbor/
COPY ./make/photon/jobservice/harbor_jobservice /harbor/
RUN chmod u+x /harbor/harbor_jobservice
RUN chmod u+x /harbor/harbor_jobservice /harbor/start.sh
WORKDIR /harbor/
ENTRYPOINT ["/harbor/start.sh"]
USER harbor
VOLUME ["/var/log/jobs/"]
HEALTHCHECK CMD curl --fail -s http://127.0.0.1:8080/api/v1/stats || exit 1
ENTRYPOINT ["/harbor/harbor_jobservice", "-c", "/etc/jobservice/config.yml"]

View File

@ -1,6 +0,0 @@
#!/bin/sh
if [ -d /var/log/jobs ]; then
chown -R 10000:10000 /var/log/jobs/
fi
sudo -E -u \#10000 "/harbor/harbor_jobservice" "-c" "/etc/jobservice/config.yml"

View File

@ -1,8 +1,5 @@
# Rsyslog configuration file for docker.
template(name="DynaFile" type="string"
string="/var/log/docker/%syslogtag:R,ERE,0,DFLT:[^[]*--end:secpath-replace%.log"
)
#if $programname == "docker" then ?DynaFile
if $programname != "rsyslogd" then -?DynaFile
template(name="DynaFile" type="string" string="/var/log/docker/%programname%.log")
if $programname != "rsyslogd" then {
action(type="omfile" dynaFile="DynaFile")
}

View File

@ -1,14 +1,19 @@
FROM photon:2.0
RUN tdnf install -y nginx >> /dev/null\
RUN tdnf install sudo nginx -y >> /dev/null\
&& tdnf clean all \
&& groupadd -r -g 10000 nginx && useradd --no-log-init -r -g 10000 -u 10000 nginx \
&& ln -sf /dev/stdout /var/log/nginx/access.log \
&& ln -sf /dev/stderr /var/log/nginx/error.log \
&& tdnf clean all
&& ln -sf /dev/stderr /var/log/nginx/error.log
EXPOSE 80
VOLUME /var/cache/nginx /var/log/nginx /run
EXPOSE 8080
STOPSIGNAL SIGQUIT
HEALTHCHECK CMD curl --fail -s http://127.0.0.1 || exit 1
HEALTHCHECK CMD curl --fail -s http://127.0.0.1:8080 || exit 1
USER nginx
CMD ["nginx", "-g", "daemon off;"]

View File

@ -1,2 +0,0 @@
#!/bin/sh
sudo -E -u \#10000 sh -c "migrate-patch -database='${DB_URL}' && /migrations/migrate.sh && /bin/notary-server -config=/etc/notary/server-config.postgres.json -logf=logfmt"

View File

@ -4,12 +4,12 @@ RUN tdnf install -y shadow sudo \
&& tdnf clean all \
&& groupadd -r -g 10000 notary \
&& useradd --no-log-init -r -g 10000 -u 10000 notary
COPY ./make/photon/notary/migrate-patch /bin/migrate-patch
COPY ./make/photon/notary/binary/notary-server /bin/notary-server
COPY ./make/photon/notary/binary/migrate /bin/migrate
COPY ./make/photon/notary/binary/migrations/ /migrations/
COPY ./make/photon/notary/server-start.sh /bin/server-start.sh
RUN chmod +x /bin/notary-server /migrations/migrate.sh /bin/migrate /bin/migrate-patch /bin/server-start.sh
RUN chmod +x /bin/notary-server /migrations/migrate.sh /bin/migrate /bin/migrate-patch
ENV SERVICE_NAME=notary_server
ENTRYPOINT [ "/bin/server-start.sh" ]
USER notary
CMD migrate-patch -database=${DB_URL} && /migrations/migrate.sh && /bin/notary-server -config=/etc/notary/server-config.postgres.json -logf=logfmt

View File

@ -1,2 +0,0 @@
#!/bin/sh
sudo -E -u \#10000 sh -c "migrate-patch -database='${DB_URL}' && /migrations/migrate.sh && /bin/notary-signer -config=/etc/notary/signer-config.postgres.json -logf=logfmt"

View File

@ -8,8 +8,8 @@ COPY ./make/photon/notary/migrate-patch /bin/migrate-patch
COPY ./make/photon/notary/binary/notary-signer /bin/notary-signer
COPY ./make/photon/notary/binary/migrate /bin/migrate
COPY ./make/photon/notary/binary/migrations/ /migrations/
COPY ./make/photon/notary/signer-start.sh /bin/signer-start.sh
RUN chmod +x /bin/notary-signer /migrations/migrate.sh /bin/migrate /bin/migrate-patch /bin/signer-start.sh
RUN chmod +x /bin/notary-signer /migrations/migrate.sh /bin/migrate /bin/migrate-patch
ENV SERVICE_NAME=notary_signer
ENTRYPOINT [ "/bin/signer-start.sh" ]
USER notary
CMD migrate-patch -database=${DB_URL} && /migrations/migrate.sh && /bin/notary-signer -config=/etc/notary/signer-config.postgres.json -logf=logfmt

View File

@ -1,39 +1,44 @@
FROM node:10.15.0 as nodeportal
RUN mkdir -p /portal_src
RUN mkdir -p /build_dir
COPY make/photon/portal/entrypoint.sh /
COPY src/portal /portal_src
COPY ./docs/swagger.yaml /portal_src
COPY ./LICENSE /portal_src
WORKDIR /portal_src
WORKDIR /build_dir
RUN npm install && \
chmod u+x /entrypoint.sh
RUN /entrypoint.sh
VOLUME ["/portal_src"]
RUN cp -r /portal_src/* /build_dir \
&& ls -la \
&& apt-get update \
&& apt-get install -y --no-install-recommends python-yaml=3.12-1 \
&& python -c 'import sys, yaml, json; y=yaml.load(sys.stdin.read()); print json.dumps(y)' < swagger.yaml > swagger.json \
&& npm install \
&& npm run build_lib \
&& npm run link_lib \
&& npm run release
FROM photon:2.0
RUN tdnf install -y nginx >> /dev/null \
&& ln -sf /dev/stdout /var/log/nginx/access.log \
&& ln -sf /dev/stderr /var/log/nginx/error.log \
&& tdnf clean all
EXPOSE 80
VOLUME /var/cache/nginx /var/log/nginx /run
COPY --from=nodeportal /build_dir/dist /usr/share/nginx/html
COPY --from=nodeportal /build_dir/swagger.yaml /usr/share/nginx/html
COPY --from=nodeportal /build_dir/swagger.json /usr/share/nginx/html
COPY --from=nodeportal /build_dir/LICENSE /usr/share/nginx/html
COPY make/photon/portal/nginx.conf /etc/nginx/nginx.conf
RUN tdnf install -y nginx sudo >> /dev/null \
&& ln -sf /dev/stdout /var/log/nginx/access.log \
&& ln -sf /dev/stderr /var/log/nginx/error.log \
&& groupadd -r -g 10000 nginx && useradd --no-log-init -r -g 10000 -u 10000 nginx \
&& chown -R nginx:nginx /etc/nginx \
&& tdnf clean all
EXPOSE 8080
VOLUME /var/cache/nginx /var/log/nginx /run
STOPSIGNAL SIGQUIT
HEALTHCHECK CMD curl --fail -s http://127.0.0.1 || exit 1
HEALTHCHECK CMD curl --fail -s http://127.0.0.1:8080 || exit 1
USER nginx
CMD ["nginx", "-g", "daemon off;"]

View File

@ -1,21 +0,0 @@
#!/bin/bash
set -e
cd /build_dir
cp -r /portal_src/* .
ls -la
# Update
apt-get update
apt-get install -y ruby
ruby -ryaml -rjson -e 'puts JSON.pretty_generate(YAML.load(ARGF))' swagger.yaml>swagger.json
cat ./package.json
npm install
## Build harbor-portal and link it
npm run build_lib
npm run link_lib
## Build production
npm run release

View File

@ -1,13 +1,21 @@
worker_processes 1;
worker_processes auto;
pid /tmp/nginx.pid;
events {
worker_connections 1024;
}
http {
client_body_temp_path /tmp/client_body_temp;
proxy_temp_path /tmp/proxy_temp;
fastcgi_temp_path /tmp/fastcgi_temp;
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
server {
listen 80;
listen 8080;
server_name localhost;
root /usr/share/nginx/html;

View File

@ -5,13 +5,21 @@ from pathlib import Path
DEFAULT_UID = 10000
DEFAULT_GID = 10000
PG_UID = 999
PG_GID = 999
REDIS_UID = 999
REDIS_GID = 999
## Global variable
host_root_dir = '/hostfs'
base_dir = '/harbor_make'
templates_dir = "/usr/src/app/templates"
config_dir = '/config'
data_dir = '/data'
secret_dir = '/secret'
secret_key_dir='/secret/keys'
secret_key_dir = '/secret/keys'
old_private_key_pem_path = Path('/config/core/private_key.pem')
old_crt_path = Path('/config/registry/root.crt')

View File

@ -16,6 +16,7 @@ from utils.clair import prepare_clair
from utils.chart import prepare_chartmuseum
from utils.docker_compose import prepare_docker_compose
from utils.nginx import prepare_nginx, nginx_confd_dir
from utils.redis import prepare_redis
from g import (config_dir, input_config_path, private_key_pem_path, root_crt_path, secret_key_dir,
old_private_key_pem_path, old_crt_path)
@ -38,6 +39,7 @@ def main(conf, with_notary, with_clair, with_chartmuseum):
prepare_registry_ctl(config_dict)
prepare_db(config_dict)
prepare_job_service(config_dict)
prepare_redis(config_dict)
get_secret_key(secret_key_dir)

View File

@ -1,3 +1,3 @@
http_proxy={{clair_http_proxy}}
https_proxy={{clair_https_proxy}}
no_proxy={{clair_no_proxy}}
HTTP_PROXY={{clair_http_proxy}}
HTTPS_PROXY={{clair_https_proxy}}
NO_PROXY={{clair_no_proxy}}

View File

@ -17,9 +17,3 @@ clair:
timeout: 300s
updater:
interval: {{clair_updaters_interval}}h
notifier:
attempts: 3
renotifyinterval: 2h
http:
endpoint: http://core:8080/service/notifications/clair

View File

@ -15,6 +15,8 @@ POSTGRESQL_USERNAME={{harbor_db_username}}
POSTGRESQL_PASSWORD={{harbor_db_password}}
POSTGRESQL_DATABASE={{harbor_db_name}}
POSTGRESQL_SSLMODE={{harbor_db_sslmode}}
POSTGRESQL_MAX_IDLE_CONNS={{harbor_db_max_idle_conns}}
POSTGRESQL_MAX_OPEN_CONNS={{harbor_db_max_open_conns}}
REGISTRY_URL={{registry_url}}
TOKEN_SERVICE_URL={{token_service_url}}
HARBOR_ADMIN_PASSWORD={{harbor_admin_password}}
@ -31,6 +33,7 @@ CLAIR_DB_USERNAME={{clair_db_username}}
CLAIR_DB={{clair_db_name}}
CLAIR_DB_SSLMODE={{clair_db_sslmode}}
CORE_URL={{core_url}}
CORE_LOCAL_URL={{core_local_url}}
JOBSERVICE_URL={{jobservice_url}}
CLAIR_URL={{clair_url}}
NOTARY_URL={{notary_url}}
@ -40,3 +43,7 @@ RELOAD_KEY={{reload_key}}
CHART_REPOSITORY_URL={{chart_repository_url}}
REGISTRY_CONTROLLER_URL={{registry_controller_url}}
WITH_CHARTMUSEUM={{with_chartmuseum}}
HTTP_PROXY={{core_http_proxy}}
HTTPS_PROXY={{core_https_proxy}}
NO_PROXY={{core_no_proxy}}

View File

@ -14,7 +14,8 @@ services:
- SETUID
volumes:
- {{log_location}}/:/var/log/docker/:z
- ./common/config/log/:/etc/logrotate.d/:z
- ./common/config/log/logrotate.conf:/etc/logrotate.d/logrotate.conf:z
- ./common/config/log/rsyslog_docker.conf:/etc/rsyslog.d/rsyslog_docker.conf:z
ports:
- 127.0.0.1:1514:10514
networks:
@ -91,6 +92,7 @@ services:
options:
syslog-address: "tcp://127.0.0.1:1514"
tag: "registryctl"
{% if external_database == False %}
postgresql:
image: goharbor/harbor-db:{{version}}
container_name: harbor-db
@ -106,16 +108,16 @@ services:
- {{data_volume}}/database:/var/lib/postgresql/data:z
networks:
harbor:
{% if with_notary %}
{% if with_notary %}
harbor-notary:
aliases:
- harbor-db
{% endif %}
{% if with_clair %}
{% endif %}
{% if with_clair %}
harbor-clair:
aliases:
- harbor-db
{% endif %}
{% endif %}
dns_search: .
env_file:
- ./common/config/db/env
@ -126,6 +128,7 @@ services:
options:
syslog-address: "tcp://127.0.0.1:1514"
tag: "postgresql"
{% endif %}
core:
image: goharbor/harbor-core:{{version}}
container_name: harbor-core
@ -175,6 +178,12 @@ services:
depends_on:
- log
- registry
{% if external_redis == False %}
- redis
{% endif %}
{% if external_database == False %}
- postgresql
{% endif %}
logging:
driver: "syslog"
options:
@ -196,7 +205,6 @@ services:
dns_search: .
depends_on:
- log
- core
logging:
driver: "syslog"
options:
@ -227,13 +235,13 @@ services:
{% endif %}
dns_search: .
depends_on:
- redis
- core
logging:
driver: "syslog"
options:
syslog-address: "tcp://127.0.0.1:1514"
tag: "jobservice"
{% if external_redis == False %}
redis:
image: goharbor/redis-photon:{{redis_version}}
container_name: redis
@ -248,11 +256,11 @@ services:
- {{data_volume}}/redis:/var/lib/redis
networks:
harbor:
{% if with_chartmuseum %}
{% if with_chartmuseum %}
harbor-chartmuseum:
aliases:
- redis
{% endif %}
{% endif %}
dns_search: .
depends_on:
- log
@ -261,8 +269,9 @@ services:
options:
syslog-address: "tcp://127.0.0.1:1514"
tag: "redis"
{% endif %}
proxy:
image: goharbor/nginx-photon:{{redis_version}}
image: goharbor/nginx-photon:{{version}}
container_name: nginx
restart: always
cap_drop:
@ -275,12 +284,7 @@ services:
volumes:
- ./common/config/nginx:/etc/nginx:z
{% if protocol == 'https' %}
- type: bind
source: {{cert_key_path}}
target: /etc/cert/server.key
- type: bind
source: {{cert_path}}
target: /etc/cert/server.crt
- {{data_volume}}/secret/cert:/etc/cert:z
{% endif %}
networks:
- harbor
@ -289,15 +293,14 @@ services:
{% endif %}
dns_search: .
ports:
- {{http_port}}:80
- {{http_port}}:8080
{% if protocol == 'https' %}
- {{https_port}}:443
- {{https_port}}:8443
{% endif %}
{% if with_notary %}
- 4443:4443
{% endif %}
depends_on:
- postgresql
- registry
- core
- portal
@ -327,7 +330,9 @@ services:
env_file:
- ./common/config/notary/server_env
depends_on:
{% if external_database == False %}
- postgresql
{% endif %}
- notary-signer
logging:
driver: "syslog"
@ -355,7 +360,10 @@ services:
env_file:
- ./common/config/notary/signer_env
depends_on:
- log
{% if external_database == False %}
- postgresql
{% endif %}
logging:
driver: "syslog"
options:
@ -378,16 +386,19 @@ services:
cpu_quota: 50000
dns_search: .
depends_on:
- log
{% if external_database == False %}
- postgresql
{% endif %}
volumes:
- type: bind
source: ./common/config/clair/config.yaml
target: /etc/clair/config.yaml
{%if registry_custom_ca_bundle_path %}
{%if registry_custom_ca_bundle_path %}
- type: bind
source: {{registry_custom_ca_bundle_path}}
target: /harbor_cust_cert/custom-ca-bundle.crt
{% endif %}
{% endif %}
logging:
driver: "syslog"
options:
@ -412,14 +423,14 @@ services:
- harbor-chartmuseum
dns_search: .
depends_on:
- redis
- log
volumes:
- {{data_volume}}/chart_storage:/chart_storage:z
- ./common/config/chartserver:/etc/chartserver:z
{% if gcs_keyfile %}
- type: bind
source: {{gcs_keyfile}}
target: /etc/registry/gcs.key
target: /etc/chartserver/gcs.key
{% endif %}
{%if registry_custom_ca_bundle_path %}
- type: bind

View File

@ -20,6 +20,7 @@ worker_pool:
#redis://[arbitrary_username:password@]ipaddress:port/database_index
redis_url: {{redis_url}}
namespace: "harbor_job_service_namespace"
idle_timeout_second: 3600
#Loggers for the running job
job_loggers:
- name: "STD_OUTPUT" # logger backend name, only support "FILE" and "STD_OUTPUT"

View File

@ -1,3 +1,8 @@
CORE_SECRET={{core_secret}}
JOBSERVICE_SECRET={{jobservice_secret}}
CORE_URL={{core_url}}
JOBSERVICE_WEBHOOK_JOB_MAX_RETRY={{notification_webhook_job_max_retry}}
HTTP_PROXY={{jobservice_http_proxy}}
HTTPS_PROXY={{jobservice_https_proxy}}
NO_PROXY={{jobservice_no_proxy}}

View File

@ -0,0 +1,11 @@
# Rsyslog configuration file for docker.
template(name="DynaFile" type="string" string="/var/log/docker/%programname%.log")
if $programname != "rsyslogd" then {
{%if log_external %}
action(type="omfwd" Target="{{log_ep_host}}" Port="{{log_ep_port}}" Protocol="{{log_ep_protocol}}" Template="RSYSLOG_SyslogProtocol23Format")
{% else %}
action(type="omfile" dynaFile="DynaFile")
{% endif %}
}

View File

@ -1,4 +1,5 @@
worker_processes auto;
pid /tmp/nginx.pid;
events {
worker_connections 1024;
@ -7,6 +8,11 @@ events {
}
http {
client_body_temp_path /tmp/client_body_temp;
proxy_temp_path /tmp/proxy_temp;
fastcgi_temp_path /tmp/fastcgi_temp;
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
tcp_nodelay on;
# this is necessary for us to be able to disable request buffering in all cases
@ -17,7 +23,7 @@ http {
}
upstream portal {
server portal:80;
server portal:8080;
}
log_format timed_combined '$remote_addr - '
@ -28,7 +34,7 @@ http {
access_log /dev/stdout timed_combined;
server {
listen 80;
listen 8080;
server_tokens off;
# disable any limits to avoid HTTP 413 for large image uploads
client_max_body_size 0;
@ -117,7 +123,7 @@ http {
proxy_request_buffering off;
}
location /service/notifications {
location /service/notifications {
return 404;
}
}

View File

@ -1,4 +1,5 @@
worker_processes auto;
pid /tmp/nginx.pid;
events {
worker_connections 1024;
@ -7,6 +8,11 @@ events {
}
http {
client_body_temp_path /tmp/client_body_temp;
proxy_temp_path /tmp/proxy_temp;
fastcgi_temp_path /tmp/fastcgi_temp;
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
tcp_nodelay on;
include /etc/nginx/conf.d/*.upstream.conf;
@ -18,7 +24,7 @@ http {
}
upstream portal {
server portal:80;
server portal:8080;
}
log_format timed_combined '$remote_addr - '
@ -31,7 +37,7 @@ http {
include /etc/nginx/conf.d/*.server.conf;
server {
listen 443 ssl;
listen 8443 ssl;
# server_name harbordomain.com;
server_tokens off;
# SSL
@ -137,12 +143,12 @@ http {
proxy_request_buffering off;
}
location /service/notifications {
location /service/notifications {
return 404;
}
}
server {
listen 80;
server {
listen 8080;
#server_name harbordomain.com;
return 308 https://$host$request_uri;
}

View File

@ -4,8 +4,11 @@ from pathlib import Path
from subprocess import DEVNULL
from functools import wraps
from .misc import mark_file
from .misc import generate_random_string
from g import DEFAULT_GID, DEFAULT_UID
from .misc import (
mark_file,
generate_random_string,
check_permission)
SSL_CERT_PATH = os.path.join("/etc/cert", "server.crt")
SSL_CERT_KEY_PATH = os.path.join("/etc/cert", "server.key")
@ -102,3 +105,9 @@ def prepare_ca(
else:
shutil.move(old_crt_path, root_crt_path)
shutil.move(old_private_key_pem_path, private_key_pem_path)
if not check_permission(root_crt_path, uid=DEFAULT_UID, gid=DEFAULT_GID):
os.chown(root_crt_path, DEFAULT_UID, DEFAULT_GID)
if not check_permission(private_key_pem_path, uid=DEFAULT_UID, gid=DEFAULT_GID):
os.chown(private_key_pem_path, DEFAULT_UID, DEFAULT_GID)

View File

@ -1,17 +1,19 @@
import os, shutil
from g import templates_dir, config_dir
from g import templates_dir, config_dir, data_dir, DEFAULT_UID, DEFAULT_GID
from .jinja import render_jinja
from .misc import prepare_dir
chartm_temp_dir = os.path.join(templates_dir, "chartserver")
chartm_env_temp = os.path.join(chartm_temp_dir, "env.jinja")
chart_museum_temp_dir = os.path.join(templates_dir, "chartserver")
chart_museum_env_temp = os.path.join(chart_museum_temp_dir, "env.jinja")
chartm_config_dir = os.path.join(config_dir, "chartserver")
chartm_env = os.path.join(config_dir, "chartserver", "env")
chart_museum_config_dir = os.path.join(config_dir, "chartserver")
chart_museum_env = os.path.join(config_dir, "chartserver", "env")
chart_museum_data_dir = os.path.join(data_dir, 'chart_storage')
def prepare_chartmuseum(config_dict):
core_secret = config_dict['core_secret']
redis_host = config_dict['redis_host']
redis_port = config_dict['redis_port']
redis_password = config_dict['redis_password']
@ -19,9 +21,8 @@ def prepare_chartmuseum(config_dict):
storage_provider_name = config_dict['storage_provider_name']
storage_provider_config_map = config_dict['storage_provider_config']
if not os.path.isdir(chartm_config_dir):
print ("Create config folder: %s" % chartm_config_dir)
os.makedirs(chartm_config_dir)
prepare_dir(chart_museum_data_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
prepare_dir(chart_museum_config_dir)
# process redis info
cache_store = "redis"
@ -94,8 +95,8 @@ def prepare_chartmuseum(config_dict):
all_storage_provider_configs = ('\n').join(storage_provider_config_options)
render_jinja(
chartm_env_temp,
chartm_env,
chart_museum_env_temp,
chart_museum_env,
cache_store=cache_store,
cache_redis_addr=cache_redis_addr,
cache_redis_password=cache_redis_password,

View File

@ -2,12 +2,12 @@ import os, shutil
from g import templates_dir, config_dir, DEFAULT_UID, DEFAULT_GID
from .jinja import render_jinja
from .misc import prepare_config_dir
from .misc import prepare_dir
clair_template_dir = os.path.join(templates_dir, "clair")
def prepare_clair(config_dict):
clair_config_dir = prepare_config_dir(config_dir, "clair")
clair_config_dir = prepare_dir(config_dir, "clair")
if os.path.exists(os.path.join(clair_config_dir, "postgresql-init.d")):
print("Copying offline data file for clair DB")

View File

@ -2,6 +2,9 @@ import yaml
from g import versions_file_path
from .misc import generate_random_string
default_db_max_idle_conns = 2 # NOTE: https://golang.org/pkg/database/sql/#DB.SetMaxIdleConns
default_db_max_open_conns = 0 # NOTE: https://golang.org/pkg/database/sql/#DB.SetMaxOpenConns
def validate(conf, **kwargs):
protocol = conf.get("protocol")
if protocol != "https" and kwargs.get('notary_mode'):
@ -13,6 +16,14 @@ def validate(conf, **kwargs):
if not conf.get("cert_key_path"):
raise Exception("Error: The protocol is https but attribute ssl_cert_key is not set")
# log endpoint validate
if ('log_ep_host' in conf) and not conf['log_ep_host']:
raise Exception('Error: must set log endpoint host to enable external host')
if ('log_ep_port' in conf) and not conf['log_ep_port']:
raise Exception('Error: must set log endpoint port to enable external host')
if ('log_ep_protocol' in conf) and (conf['log_ep_protocol'] not in ['udp', 'tcp']):
raise Exception("Protocol in external log endpoint must be one of 'udp' or 'tcp' ")
# Storage validate
valid_storage_drivers = ["filesystem", "azure", "gcs", "s3", "swift", "oss"]
storage_provider_name = conf.get("storage_provider_name")
@ -59,6 +70,7 @@ def parse_yaml_config(config_file_path):
'registry_url': "http://registry:5000",
'registry_controller_url': "http://registryctl:8080",
'core_url': "http://core:8080",
'core_local_url': "http://127.0.0.1:8080",
'token_service_url': "http://core:8080/service/token",
'jobservice_url': 'http://jobservice:8080',
'clair_url': 'http://clair:6060',
@ -103,6 +115,8 @@ def parse_yaml_config(config_file_path):
config_dict['harbor_db_username'] = 'postgres'
config_dict['harbor_db_password'] = db_configs.get("password") or ''
config_dict['harbor_db_sslmode'] = 'disable'
config_dict['harbor_db_max_idle_conns'] = db_configs.get("max_idle_conns") or default_db_max_idle_conns
config_dict['harbor_db_max_open_conns'] = db_configs.get("max_open_conns") or default_db_max_open_conns
# clari db
config_dict['clair_db_host'] = 'postgresql'
config_dict['clair_db_port'] = 5432
@ -162,13 +176,18 @@ def parse_yaml_config(config_file_path):
if storage_config.get('redirect'):
config_dict['storage_redirect_disabled'] = storage_config['redirect']['disabled']
# Global proxy configs
proxy_config = configs.get('proxy') or {}
proxy_components = proxy_config.get('components') or []
for proxy_component in proxy_components:
config_dict[proxy_component + '_http_proxy'] = proxy_config.get('http_proxy') or ''
config_dict[proxy_component + '_https_proxy'] = proxy_config.get('https_proxy') or ''
config_dict[proxy_component + '_no_proxy'] = proxy_config.get('no_proxy') or '127.0.0.1,localhost,core,registry'
# Clair configs, optional
clair_configs = configs.get("clair") or {}
config_dict['clair_db'] = 'postgres'
config_dict['clair_updaters_interval'] = clair_configs.get("updaters_interval") or 12
config_dict['clair_http_proxy'] = clair_configs.get('http_proxy') or ''
config_dict['clair_https_proxy'] = clair_configs.get('https_proxy') or ''
config_dict['clair_no_proxy'] = clair_configs.get('no_proxy') or '127.0.0.1,localhost,core,registry'
# Chart configs
chart_configs = configs.get("chart") or {}
@ -179,22 +198,39 @@ def parse_yaml_config(config_file_path):
config_dict['max_job_workers'] = js_config["max_job_workers"]
config_dict['jobservice_secret'] = generate_random_string(16)
# notification config
notification_config = configs.get('notification') or {}
config_dict['notification_webhook_job_max_retry'] = notification_config["webhook_job_max_retry"]
# Log configs
allowed_levels = ['debug', 'info', 'warning', 'error', 'fatal']
log_configs = configs.get('log') or {}
config_dict['log_location'] = log_configs["location"]
config_dict['log_rotate_count'] = log_configs["rotate_count"]
config_dict['log_rotate_size'] = log_configs["rotate_size"]
log_level = log_configs['level']
if log_level not in allowed_levels:
raise Exception('log level must be one of debug, info, warning, error, fatal')
config_dict['log_level'] = log_level.lower()
# parse local log related configs
local_logs = log_configs.get('local') or {}
if local_logs:
config_dict['log_location'] = local_logs.get('location') or '/var/log/harbor'
config_dict['log_rotate_count'] = local_logs.get('rotate_count') or 50
config_dict['log_rotate_size'] = local_logs.get('rotate_size') or '200M'
# parse external log endpoint related configs
if log_configs.get('external_endpoint'):
config_dict['log_external'] = True
config_dict['log_ep_protocol'] = log_configs['external_endpoint']['protocol']
config_dict['log_ep_host'] = log_configs['external_endpoint']['host']
config_dict['log_ep_port'] = log_configs['external_endpoint']['port']
else:
config_dict['log_external'] = False
# external DB, optional, if external_db enabled, it will cover the database config
external_db_configs = configs.get('external_database') or {}
if external_db_configs:
config_dict['external_database'] = True
# harbor db
config_dict['harbor_db_host'] = external_db_configs['harbor']['host']
config_dict['harbor_db_port'] = external_db_configs['harbor']['port']
@ -202,7 +238,9 @@ def parse_yaml_config(config_file_path):
config_dict['harbor_db_username'] = external_db_configs['harbor']['username']
config_dict['harbor_db_password'] = external_db_configs['harbor']['password']
config_dict['harbor_db_sslmode'] = external_db_configs['harbor']['ssl_mode']
# clari db
config_dict['harbor_db_max_idle_conns'] = external_db_configs['harbor'].get("max_idle_conns") or default_db_max_idle_conns
config_dict['harbor_db_max_open_conns'] = external_db_configs['harbor'].get("max_open_conns") or default_db_max_open_conns
# clair db
config_dict['clair_db_host'] = external_db_configs['clair']['host']
config_dict['clair_db_port'] = external_db_configs['clair']['port']
config_dict['clair_db_name'] = external_db_configs['clair']['db_name']
@ -223,11 +261,14 @@ def parse_yaml_config(config_file_path):
config_dict['notary_server_db_username'] = external_db_configs['notary_server']['username']
config_dict['notary_server_db_password'] = external_db_configs['notary_server']['password']
config_dict['notary_server_db_sslmode'] = external_db_configs['notary_server']['ssl_mode']
else:
config_dict['external_database'] = False
# redis config
redis_configs = configs.get("external_redis")
if redis_configs:
config_dict['external_redis'] = True
# using external_redis
config_dict['redis_host'] = redis_configs['host']
config_dict['redis_port'] = redis_configs['port']
@ -236,6 +277,7 @@ def parse_yaml_config(config_file_path):
config_dict['redis_db_index_js'] = redis_configs.get('jobservice_db_index') or 2
config_dict['redis_db_index_chart'] = redis_configs.get('chartmuseum_db_index') or 3
else:
config_dict['external_redis'] = False
## Using local redis
config_dict['redis_host'] = 'redis'
config_dict['redis_port'] = 6379

View File

@ -1,7 +1,7 @@
import shutil, os
from g import config_dir, templates_dir
from utils.misc import prepare_config_dir, generate_random_string
from g import config_dir, templates_dir, data_dir, DEFAULT_GID, DEFAULT_UID
from utils.misc import prepare_dir, generate_random_string
from utils.jinja import render_jinja
core_config_dir = os.path.join(config_dir, "core", "certificates")
@ -10,8 +10,14 @@ core_conf_env = os.path.join(config_dir, "core", "env")
core_conf_template_path = os.path.join(templates_dir, "core", "app.conf.jinja")
core_conf = os.path.join(config_dir, "core", "app.conf")
ca_download_dir = os.path.join(data_dir, 'ca_download')
psc_dir = os.path.join(data_dir, 'psc')
def prepare_core(config_dict, with_notary, with_clair, with_chartmuseum):
prepare_core_config_dir()
prepare_dir(psc_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
prepare_dir(ca_download_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
prepare_dir(core_config_dir)
# Render Core
# set cache for chart repo server
# default set 'memory' mode, if redis is configured then set to 'redis'
@ -32,8 +38,6 @@ def prepare_core(config_dict, with_notary, with_clair, with_chartmuseum):
# Copy Core app.conf
copy_core_config(core_conf_template_path, core_conf)
def prepare_core_config_dir():
prepare_config_dir(core_config_dir)
def copy_core_config(core_templates_path, core_config_path):
shutil.copyfile(core_templates_path, core_config_path)

View File

@ -1,20 +1,18 @@
import os
from g import config_dir, templates_dir
from utils.misc import prepare_config_dir
from g import config_dir, templates_dir, data_dir, PG_UID, PG_GID
from utils.misc import prepare_dir
from utils.jinja import render_jinja
db_config_dir = os.path.join(config_dir, "db")
db_env_template_path = os.path.join(templates_dir, "db", "env.jinja")
db_conf_env = os.path.join(config_dir, "db", "env")
database_data_path = os.path.join(data_dir, 'database')
def prepare_db(config_dict):
prepare_db_config_dir()
prepare_dir(database_data_path, uid=PG_UID, gid=PG_GID, mode=0o700)
prepare_dir(db_config_dir)
render_jinja(
db_env_template_path,
db_conf_env,
harbor_db_password=config_dict['harbor_db_password'])
def prepare_db_config_dir():
prepare_config_dir(db_config_dir)

View File

@ -13,8 +13,8 @@ def prepare_docker_compose(configs, with_clair, with_notary, with_chartmuseum):
VERSION_TAG = versions.get('VERSION_TAG') or 'dev'
REGISTRY_VERSION = versions.get('REGISTRY_VERSION') or 'v2.7.1'
NOTARY_VERSION = versions.get('NOTARY_VERSION') or 'v0.6.1'
CLAIR_VERSION = versions.get('CLAIR_VERSION') or 'v2.0.7'
CHARTMUSEUM_VERSION = versions.get('CHARTMUSEUM_VERSION') or 'v0.8.1'
CLAIR_VERSION = versions.get('CLAIR_VERSION') or 'v2.0.9'
CHARTMUSEUM_VERSION = versions.get('CHARTMUSEUM_VERSION') or 'v0.9.0'
rendering_variables = {
'version': VERSION_TAG,
@ -28,22 +28,32 @@ def prepare_docker_compose(configs, with_clair, with_notary, with_chartmuseum):
'protocol': configs['protocol'],
'http_port': configs['http_port'],
'registry_custom_ca_bundle_path': configs['registry_custom_ca_bundle_path'],
'external_redis': configs['external_redis'],
'external_database': configs['external_database'],
'with_notary': with_notary,
'with_clair': with_clair,
'with_chartmuseum': with_chartmuseum
}
# for gcs
storage_config = configs.get('storage_provider_config') or {}
if storage_config.get('keyfile') and configs['storage_provider_name'] == 'gcs':
rendering_variables['gcs_keyfile'] = storage_config['keyfile']
# for http
if configs['protocol'] == 'https':
rendering_variables['cert_key_path'] = configs['cert_key_path']
rendering_variables['cert_path'] = configs['cert_path']
rendering_variables['https_port'] = configs['https_port']
# for uaa
uaa_config = configs.get('uaa') or {}
if uaa_config.get('ca_file'):
rendering_variables['uaa_ca_file'] = uaa_config['ca_file']
render_jinja(docker_compose_template_path, docker_compose_yml_path, **rendering_variables)
# for log
log_ep_host = configs.get('log_ep_host')
if log_ep_host:
rendering_variables['external_log_endpoint'] = True
render_jinja(docker_compose_template_path, docker_compose_yml_path, mode=0o644, **rendering_variables)

View File

@ -1,7 +1,7 @@
from jinja2 import Environment, FileSystemLoader
from .misc import mark_file
jinja_env = Environment(loader=FileSystemLoader('/'), trim_blocks=True)
jinja_env = Environment(loader=FileSystemLoader('/'), trim_blocks=True, lstrip_blocks=True)
def render_jinja(src, dest,mode=0o640, uid=0, gid=0, **kw):
t = jinja_env.get_template(src)

View File

@ -1,7 +1,7 @@
import os
from g import config_dir, DEFAULT_GID, DEFAULT_UID, templates_dir
from utils.misc import prepare_config_dir
from utils.misc import prepare_dir
from utils.jinja import render_jinja
job_config_dir = os.path.join(config_dir, "jobservice")
@ -10,15 +10,14 @@ job_service_conf_env = os.path.join(config_dir, "jobservice", "env")
job_service_conf_template_path = os.path.join(templates_dir, "jobservice", "config.yml.jinja")
jobservice_conf = os.path.join(config_dir, "jobservice", "config.yml")
def prepare_job_service(config_dict):
prepare_config_dir(job_config_dir)
prepare_dir(job_config_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
log_level = config_dict['log_level'].upper()
# Job log is stored in data dir
job_log_dir = os.path.join('/data', "job_logs")
prepare_config_dir(job_log_dir)
prepare_dir(job_log_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
# Render Jobservice env
render_jinja(
job_service_env_template_path,

View File

@ -1,15 +1,21 @@
import os
from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID
from utils.misc import prepare_config_dir
from utils.misc import prepare_dir
from utils.jinja import render_jinja
log_config_dir = os.path.join(config_dir, "log")
# logrotate config file
logrotate_template_path = os.path.join(templates_dir, "log", "logrotate.conf.jinja")
log_rotate_config = os.path.join(config_dir, "log", "logrotate.conf")
# syslog docker config file
log_syslog_docker_template_path = os.path.join(templates_dir, 'log', 'rsyslog_docker.conf.jinja')
log_syslog_docker_config = os.path.join(config_dir, 'log', 'rsyslog_docker.conf')
def prepare_log_configs(config_dict):
prepare_config_dir(log_config_dir)
prepare_dir(log_config_dir)
# Render Log config
render_jinja(
@ -18,3 +24,12 @@ def prepare_log_configs(config_dict):
uid=DEFAULT_UID,
gid=DEFAULT_GID,
**config_dict)
# Render syslog docker config
render_jinja(
log_syslog_docker_template_path,
log_syslog_docker_config,
uid=DEFAULT_UID,
gid=DEFAULT_GID,
**config_dict
)

View File

@ -1,6 +1,7 @@
import os
import string
import random
from pathlib import Path
from g import DEFAULT_UID, DEFAULT_GID
@ -78,11 +79,33 @@ def generate_random_string(length):
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length))
def prepare_config_dir(root, *name):
absolute_path = os.path.join(root, *name)
if not os.path.exists(absolute_path):
os.makedirs(absolute_path)
return absolute_path
def prepare_dir(root: str, *args, **kwargs) -> str:
gid, uid = kwargs.get('gid'), kwargs.get('uid')
absolute_path = Path(os.path.join(root, *args))
if absolute_path.is_file():
raise Exception('Path exists and the type is regular file')
mode = kwargs.get('mode') or 0o755
# we need make sure this dir has the right permission
if not absolute_path.exists():
absolute_path.mkdir(mode=mode, parents=True)
elif not check_permission(absolute_path, mode=mode):
absolute_path.chmod(mode)
# if uid or gid not None, then change the ownership of this dir
if not(gid is None and uid is None):
dir_uid, dir_gid = absolute_path.stat().st_uid, absolute_path.stat().st_gid
if uid is None:
uid = dir_uid
if gid is None:
gid = dir_gid
# We decide to recursively chown only if the dir is not owned by correct user
# to save time if the dir is extremely large
if not check_permission(absolute_path, uid, gid):
recursive_chown(absolute_path, uid, gid)
return str(absolute_path)
def delfile(src):
@ -93,6 +116,27 @@ def delfile(src):
except Exception as e:
print(e)
elif os.path.isdir(src):
for item in os.listdir(src):
itemsrc = os.path.join(src, item)
delfile(itemsrc)
for dir_name in os.listdir(src):
dir_path = os.path.join(src, dir_name)
delfile(dir_path)
def recursive_chown(path, uid, gid):
os.chown(path, uid, gid)
for root, dirs, files in os.walk(path):
for d in dirs:
os.chown(os.path.join(root, d), uid, gid)
for f in files:
os.chown(os.path.join(root, f), uid, gid)
def check_permission(path: str, uid:int = None, gid:int = None, mode:int = None):
if not isinstance(path, Path):
path = Path(path)
if uid is not None and uid != path.stat().st_uid:
return False
if gid is not None and gid != path.stat().st_gid:
return False
if mode is not None and (path.stat().st_mode - mode) % 0o1000 != 0:
return False
return True

View File

@ -2,11 +2,13 @@ import os, shutil
from fnmatch import fnmatch
from pathlib import Path
from g import config_dir, templates_dir
from utils.misc import prepare_config_dir, mark_file
from g import config_dir, templates_dir, host_root_dir, DEFAULT_GID, DEFAULT_UID, data_dir
from utils.misc import prepare_dir, mark_file
from utils.jinja import render_jinja
from utils.cert import SSL_CERT_KEY_PATH, SSL_CERT_PATH
host_ngx_real_cert_dir = Path(os.path.join(data_dir, 'secret', 'cert'))
nginx_conf = os.path.join(config_dir, "nginx", "nginx.conf")
nginx_confd_dir = os.path.join(config_dir, "nginx", "conf.d")
nginx_https_conf_template = os.path.join(templates_dir, "nginx", "nginx.https.conf.jinja")
@ -17,44 +19,76 @@ CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTPS = 'harbor.https.*.conf'
CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTP = 'harbor.http.*.conf'
def prepare_nginx(config_dict):
prepare_config_dir(nginx_confd_dir)
prepare_dir(nginx_confd_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
render_nginx_template(config_dict)
def prepare_nginx_certs(cert_key_path, cert_path):
"""
Prepare the certs file with proper ownership
1. Remove nginx cert files in secret dir
2. Copy cert files on host filesystem to secret dir
3. Change the permission to 644 and ownership to 10000:10000
"""
host_ngx_cert_key_path = Path(os.path.join(host_root_dir, cert_key_path.lstrip('/')))
host_ngx_cert_path = Path(os.path.join(host_root_dir, cert_path.lstrip('/')))
if host_ngx_real_cert_dir.exists() and host_ngx_real_cert_dir.is_dir():
shutil.rmtree(host_ngx_real_cert_dir)
os.makedirs(host_ngx_real_cert_dir, mode=0o755)
real_key_path = os.path.join(host_ngx_real_cert_dir, 'server.key')
real_crt_path = os.path.join(host_ngx_real_cert_dir, 'server.crt')
shutil.copy2(host_ngx_cert_key_path, real_key_path)
shutil.copy2(host_ngx_cert_path, real_crt_path)
os.chown(host_ngx_real_cert_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
mark_file(real_key_path, uid=DEFAULT_UID, gid=DEFAULT_GID)
mark_file(real_crt_path, uid=DEFAULT_UID, gid=DEFAULT_GID)
def render_nginx_template(config_dict):
if config_dict['protocol'] == "https":
render_jinja(nginx_https_conf_template, nginx_conf,
"""
1. render nginx config file through protocol
2. copy additional configs to cert.d dir
"""
if config_dict['protocol'] == 'https':
prepare_nginx_certs(config_dict['cert_key_path'], config_dict['cert_path'])
render_jinja(
nginx_https_conf_template,
nginx_conf,
uid=DEFAULT_UID,
gid=DEFAULT_GID,
ssl_cert=SSL_CERT_PATH,
ssl_cert_key=SSL_CERT_KEY_PATH)
location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTPS
cert_dir = Path(os.path.join(config_dir, 'cert'))
ssl_key_path = Path(os.path.join(cert_dir, 'server.key'))
ssl_crt_path = Path(os.path.join(cert_dir, 'server.crt'))
cert_dir.mkdir(parents=True, exist_ok=True)
ssl_key_path.touch()
ssl_crt_path.touch()
else:
render_jinja(
nginx_http_conf_template,
nginx_conf)
nginx_conf,
uid=DEFAULT_UID,
gid=DEFAULT_GID)
location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTP
copy_nginx_location_configs_if_exist(nginx_template_ext_dir, nginx_confd_dir, location_file_pattern)
def add_additional_location_config(src, dst):
"""
These conf files is used for user that wanna add additional customized locations to harbor proxy
:params src: source of the file
:params dst: destination file path
"""
if not os.path.isfile(src):
return
print("Copying nginx configuration file {src} to {dst}".format(
src=src, dst=dst))
shutil.copy2(src, dst)
mark_file(dst, mode=0o644)
def copy_nginx_location_configs_if_exist(src_config_dir, dst_config_dir, filename_pattern):
if not os.path.exists(src_config_dir):
return
def add_additional_location_config(src, dst):
"""
These conf files is used for user that wanna add additional customized locations to harbor proxy
:params src: source of the file
:params dst: destination file path
"""
if not os.path.isfile(src):
return
print("Copying nginx configuration file {src} to {dst}".format(src=src, dst=dst))
shutil.copy2(src, dst)
mark_file(dst, mode=0o644)
map(lambda filename: add_additional_location_config(
os.path.join(src_config_dir, filename),
os.path.join(dst_config_dir, filename)),

View File

@ -2,7 +2,7 @@ import os, shutil, pathlib
from g import templates_dir, config_dir, root_crt_path, secret_key_dir,DEFAULT_UID, DEFAULT_GID
from .cert import openssl_installed, create_cert, create_root_cert, get_alias
from .jinja import render_jinja
from .misc import mark_file, prepare_config_dir
from .misc import mark_file, prepare_dir
notary_template_dir = os.path.join(templates_dir, "notary")
notary_signer_pg_template = os.path.join(notary_template_dir, "signer-config.postgres.json.jinja")
@ -20,12 +20,12 @@ notary_server_env_path = os.path.join(notary_config_dir, "server_env")
def prepare_env_notary(nginx_config_dir):
notary_config_dir = prepare_config_dir(config_dir, "notary")
notary_config_dir = prepare_dir(config_dir, "notary")
old_signer_cert_secret_path = pathlib.Path(os.path.join(config_dir, 'notary-signer.crt'))
old_signer_key_secret_path = pathlib.Path(os.path.join(config_dir, 'notary-signer.key'))
old_signer_ca_cert_secret_path = pathlib.Path(os.path.join(config_dir, 'notary-signer-ca.crt'))
notary_secret_dir = prepare_config_dir('/secret/notary')
notary_secret_dir = prepare_dir('/secret/notary')
signer_cert_secret_path = pathlib.Path(os.path.join(notary_secret_dir, 'notary-signer.crt'))
signer_key_secret_path = pathlib.Path(os.path.join(notary_secret_dir, 'notary-signer.key'))
signer_ca_cert_secret_path = pathlib.Path(os.path.join(notary_secret_dir, 'notary-signer-ca.crt'))
@ -72,9 +72,12 @@ def prepare_env_notary(nginx_config_dir):
print("Copying nginx configuration file for notary")
shutil.copy2(
render_jinja(
os.path.join(templates_dir, "nginx", "notary.upstream.conf.jinja"),
os.path.join(nginx_config_dir, "notary.upstream.conf"))
os.path.join(nginx_config_dir, "notary.upstream.conf"),
gid=DEFAULT_GID,
uid=DEFAULT_UID)
mark_file(os.path.join(notary_secret_dir, "notary-signer.crt"))
mark_file(os.path.join(notary_secret_dir, "notary-signer.key"))
@ -88,6 +91,8 @@ def prepare_notary(config_dict, nginx_config_dir, ssl_cert_path, ssl_cert_key_pa
render_jinja(
notary_server_nginx_config_template,
os.path.join(nginx_config_dir, "notary.server.conf"),
gid=DEFAULT_GID,
uid=DEFAULT_UID,
ssl_cert=ssl_cert_path,
ssl_cert_key=ssl_cert_key_path)

View File

@ -0,0 +1,9 @@
import os
from g import data_dir, REDIS_UID, REDIS_GID
from utils.misc import prepare_dir
redis_data_path = os.path.join(data_dir, 'redis')
def prepare_redis(config_dict):
prepare_dir(redis_data_path, uid=REDIS_UID, gid=REDIS_GID)

View File

@ -1,17 +1,26 @@
import os, copy
from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID
from utils.misc import prepare_config_dir
from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID, data_dir
from utils.misc import prepare_dir
from utils.jinja import render_jinja
registry_config_dir = os.path.join(config_dir, "registry")
registry_config_template_path = os.path.join(templates_dir, "registry", "config.yml.jinja")
registry_conf = os.path.join(config_dir, "registry", "config.yml")
registry_data_dir = os.path.join(data_dir, 'registry')
levels_map = {
'debug': 'debug',
'info': 'info',
'warning': 'warn',
'error': 'error',
'fatal': 'fatal'
}
def prepare_registry(config_dict):
prepare_config_dir(registry_config_dir)
prepare_dir(registry_data_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
prepare_dir(registry_config_dir)
storage_provider_info = get_storage_provider_info(
config_dict['storage_provider_name'],
@ -22,6 +31,7 @@ def prepare_registry(config_dict):
registry_conf,
uid=DEFAULT_UID,
gid=DEFAULT_GID,
level=levels_map[config_dict['log_level']],
storage_provider_info=storage_provider_info,
**config_dict)

View File

@ -1,7 +1,7 @@
import os, shutil
from g import config_dir, templates_dir
from utils.misc import prepare_config_dir
from utils.misc import prepare_dir
from utils.jinja import render_jinja
registryctl_config_dir = os.path.join(config_dir, "registryctl")
@ -24,7 +24,7 @@ def prepare_registry_ctl(config_dict):
copy_registry_ctl_conf(registryctl_config_template_path, registryctl_conf)
def prepare_registry_ctl_config_dir():
prepare_config_dir(registryctl_config_dir)
prepare_dir(registryctl_config_dir)
def copy_registry_ctl_conf(src, dst):
shutil.copyfile(src, dst)

View File

@ -4,11 +4,12 @@ RUN tdnf install -y redis sudo
VOLUME /var/lib/redis
WORKDIR /var/lib/redis
COPY ./make/photon/redis/docker-entrypoint.sh /usr/bin/
COPY ./make/photon/redis/docker-healthcheck /usr/bin/
COPY ./make/photon/redis/redis.conf /etc/redis.conf
RUN chmod +x /usr/bin/docker-entrypoint.sh \
RUN chmod +x /usr/bin/docker-healthcheck \
&& chown redis:redis /etc/redis.conf
ENTRYPOINT ["docker-entrypoint.sh"]
HEALTHCHECK CMD ["docker-healthcheck"]
USER redis
EXPOSE 6379
CMD ["redis-server", "/etc/redis.conf"]

View File

@ -1,13 +0,0 @@
#!/bin/sh
set -e
if [ "${1#-}" != "$1" ] || [ "${1%.conf}" != "$1" ]; then
set -- redis-server "$@"
fi
if [ "$1" = 'redis-server' -a "$(id -u)" = '0' ]; then
chown -R redis .
exec sudo -u redis "$@"
fi
exec "$@"

View File

@ -0,0 +1,9 @@
#!/bin/bash
set -eo pipefail
if ping="$(redis-cli -h "127.0.0.1" ping)" && [ "$ping" = 'PONG' ]; then
exit 0
fi
exit 1

View File

@ -1,8 +1,8 @@
#!/bin/bash
set +e
# If compling source code this dir is harbor's make dir
# If install harbor via pacakge, this dir is harbor's root dir
# If compiling source code this dir is harbor's make dir.
# If installing harbor via pacakge, this dir is harbor's root dir.
if [[ -n "$HARBOR_BUNDLE_DIR" ]]; then
harbor_prepare_path=$HARBOR_BUNDLE_DIR
else
@ -45,10 +45,12 @@ secret_dir=${data_path}/secret
config_dir=$harbor_prepare_path/common/config
# Run prepare script
docker run --rm -v $input_dir:/input \
-v $harbor_prepare_path:/compose_location \
-v $config_dir:/config \
-v $secret_dir:/secret \
docker run --rm -v $input_dir:/input:z \
-v $data_path:/data:z \
-v $harbor_prepare_path:/compose_location:z \
-v $config_dir:/config:z \
-v $secret_dir:/secret:z \
-v /:/hostfs:z \
goharbor/prepare:dev $@
echo "Clean up the input dir"

802
src/Gopkg.lock generated
View File

@ -1,802 +0,0 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
digest = "1:b16fbfbcc20645cb419f78325bb2e85ec729b338e996a228124d68931a6f2a37"
name = "github.com/BurntSushi/toml"
packages = ["."]
pruneopts = "UT"
revision = "b26d9c308763d68093482582cea63d69be07a0f0"
version = "v0.3.0"
[[projects]]
digest = "1:5d3e23515e7916c152cc665eda0f7eaf6fdf8fdfe7c3dbac97049bcbd649b33f"
name = "github.com/Knetic/govaluate"
packages = ["."]
pruneopts = "UT"
revision = "d216395917cc49052c7c7094cf57f09657ca08a8"
version = "v3.0.0"
[[projects]]
digest = "1:55388fd080150b9a072912f97b1f5891eb0b50df43401f8b75fb4273d3fec9fc"
name = "github.com/Masterminds/semver"
packages = ["."]
pruneopts = "UT"
revision = "c7af12943936e8c39859482e61f0574c2fd7fc75"
version = "v1.4.2"
[[projects]]
digest = "1:e8078e5f9d84e87745efb3c0961e78045500cda10d7102fdf839fbac4b49a423"
name = "github.com/Unknwon/goconfig"
packages = ["."]
pruneopts = "UT"
revision = "5f601ca6ef4d5cea8d52be2f8b3a420ee4b574a5"
[[projects]]
branch = "master"
digest = "1:47ea4fbe2ab4aeb9808502c51e657041c2e49b36b83fc1c1a349135cdf16342f"
name = "github.com/agl/ed25519"
packages = [
".",
"edwards25519",
]
pruneopts = "UT"
revision = "5312a61534124124185d41f09206b9fef1d88403"
[[projects]]
digest = "1:d2dbd0b0ec5373e89b27d0dd9f59793aa47020a05805b4b75c63aa1b2406781b"
name = "github.com/astaxie/beego"
packages = [
".",
"cache",
"cache/redis",
"config",
"context",
"context/param",
"grace",
"logs",
"orm",
"session",
"session/redis",
"toolbox",
"utils",
"validation",
]
pruneopts = "UT"
revision = "d96289a81bf67728cff7a19b067aaecc65a62ec6"
version = "v1.9.0"
[[projects]]
digest = "1:4522bd966f53adb3da34201b39df1153534e441c8067d5e674964f05ecca3a71"
name = "github.com/beego/i18n"
packages = ["."]
pruneopts = "UT"
revision = "e87155e8f0c05bf323d0b13470e1b97af0cb5652"
[[projects]]
digest = "1:2aaf2cc045d0219bba79655e4df795b973168c310574669cb75786684f7287d3"
name = "github.com/bmatcuk/doublestar"
packages = ["."]
pruneopts = "UT"
revision = "85a78806aa1b4707d1dbace9be592cf1ece91ab3"
version = "v1.1.1"
[[projects]]
digest = "1:76ca0dfcbf951d1868c7449453981dba9e1f79034706d1500a5a785000f5f222"
name = "github.com/casbin/casbin"
packages = [
".",
"config",
"effect",
"log",
"model",
"persist",
"persist/file-adapter",
"rbac",
"rbac/default-role-manager",
"util",
]
pruneopts = "UT"
revision = "542e16cac74562eefac970a7d0d1467640d1f1cb"
version = "v1.7.0"
[[projects]]
digest = "1:f6e5e1bc64c2908167e6aa9a1fe0c084d515132a1c63ad5b6c84036aa06dc0c1"
name = "github.com/coreos/go-oidc"
packages = ["."]
pruneopts = "UT"
revision = "1180514eaf4d9f38d0d19eef639a1d695e066e72"
version = "v2.0.0"
[[projects]]
digest = "1:a2c1d0e43bd3baaa071d1b9ed72c27d78169b2b269f71c105ac4ba34b1be4a39"
name = "github.com/davecgh/go-spew"
packages = ["spew"]
pruneopts = "UT"
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
digest = "1:ace1aef6acdf2c4647365dc87c14fb8b71ed8bb0b3ae114ffb216614a24da219"
name = "github.com/dghubble/sling"
packages = ["."]
pruneopts = "UT"
revision = "eb56e89ac5088bebb12eef3cb4b293300f43608b"
version = "v1.1.0"
[[projects]]
digest = "1:d912bf9afc98bbb6539ea99c9ac3e83119853310dd1a3aec1583d76f340ece27"
name = "github.com/dgrijalva/jwt-go"
packages = ["."]
pruneopts = "UT"
revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c"
version = "v3.0.0"
[[projects]]
digest = "1:d06c54bbda3a04ec18a2fa0577896b3c40f13409639b442379ee0a5a53be8259"
name = "github.com/docker/distribution"
packages = [
".",
"context",
"digestset",
"health",
"manifest",
"manifest/manifestlist",
"manifest/schema1",
"manifest/schema2",
"reference",
"registry/api/errcode",
"registry/auth",
"registry/auth/token",
"registry/client/auth/challenge",
"uuid",
]
pruneopts = "UT"
revision = "2461543d988979529609e8cb6fca9ca190dc48da"
version = "v2.7.1"
[[projects]]
branch = "master"
digest = "1:72ba344e60095ac4fe0eac56f56fe95644421670b808238a1c849ea92721037e"
name = "github.com/docker/go"
packages = ["canonical/json"]
pruneopts = "UT"
revision = "d30aec9fd63c35133f8f79c3412ad91a3b08be06"
[[projects]]
branch = "master"
digest = "1:4841e14252a2cecf11840bd05230412ad469709bbacfc12467e2ce5ad07f339b"
name = "github.com/docker/libtrust"
packages = ["."]
pruneopts = "UT"
revision = "aabc10ec26b754e797f9028f4589c5b7bd90dc20"
[[projects]]
digest = "1:0594af97b2f4cec6554086eeace6597e20a4b69466eb4ada25adf9f4300dddd2"
name = "github.com/garyburd/redigo"
packages = [
"internal",
"redis",
]
pruneopts = "UT"
revision = "a69d19351219b6dd56f274f96d85a7014a2ec34e"
version = "v1.6.0"
[[projects]]
digest = "1:2cd7915ab26ede7d95b8749e6b1f933f1c6d5398030684e6505940a10f31cfda"
name = "github.com/ghodss/yaml"
packages = ["."]
pruneopts = "UT"
revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
version = "v1.0.0"
[[projects]]
digest = "1:850c49ca338a10fec2cb9e78f793043ed23965489d09e30bcc19fe29719da313"
name = "github.com/go-sql-driver/mysql"
packages = ["."]
pruneopts = "UT"
revision = "a0583e0143b1624142adab07e0e97fe106d99561"
version = "v1.3"
[[projects]]
digest = "1:9ae31ce33b4bab257668963e844d98765b44160be4ee98cafc44637a213e530d"
name = "github.com/gobwas/glob"
packages = [
".",
"compiler",
"match",
"syntax",
"syntax/ast",
"syntax/lexer",
"util/runes",
"util/strings",
]
pruneopts = "UT"
revision = "5ccd90ef52e1e632236f7326478d4faa74f99438"
version = "v0.2.3"
[[projects]]
digest = "1:615643b442214e7a9bade98fa7d50ec072fd17bdc5c955daa194b32e73a532a8"
name = "github.com/gocraft/work"
packages = ["."]
pruneopts = "UT"
revision = "1d4117a214abff263b472043871c8666aedb716b"
version = "v0.5.1"
[[projects]]
digest = "1:4d02824a56d268f74a6b6fdd944b20b58a77c3d70e81008b3ee0c4f1a6777340"
name = "github.com/gogo/protobuf"
packages = [
"proto",
"sortkeys",
]
pruneopts = "UT"
revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c"
version = "v1.2.1"
[[projects]]
digest = "1:39d9284259004077d3b89109d592fce5f311788745ce94a7ccd4545e536ad3ac"
name = "github.com/golang-migrate/migrate"
packages = [
".",
"database",
"database/postgres",
"source",
"source/file",
]
pruneopts = "UT"
revision = "bcd996f3df28363f43e2d0935484c4559537a3eb"
version = "v3.3.0"
[[projects]]
branch = "master"
digest = "1:1ba1d79f2810270045c328ae5d674321db34e3aae468eb4233883b473c5c0467"
name = "github.com/golang/glog"
packages = ["."]
pruneopts = "UT"
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
[[projects]]
digest = "1:41e5cefde26c58f1560df2d1c32c2fa85e332d7cb4460d2077ae8fd8e0f3d789"
name = "github.com/golang/protobuf"
packages = [
"proto",
"ptypes/any",
"ptypes/timestamp",
]
pruneopts = "UT"
revision = "130e6b02ab059e7b717a096f397c5b60111cae74"
[[projects]]
digest = "1:38ec74012390146c45af1f92d46e5382b50531247929ff3a685d2b2be65155ac"
name = "github.com/gomodule/redigo"
packages = [
"internal",
"redis",
]
pruneopts = "UT"
revision = "9c11da706d9b7902c6da69c592f75637793fe121"
version = "v2.0.0"
[[projects]]
branch = "master"
digest = "1:a63cff6b5d8b95638bfe300385d93b2a6d9d687734b863da8e09dc834510a690"
name = "github.com/google/go-querystring"
packages = ["query"]
pruneopts = "UT"
revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
[[projects]]
branch = "master"
digest = "1:3ee90c0d94da31b442dde97c99635aaafec68d0b8a3c12ee2075c6bdabeec6bb"
name = "github.com/google/gofuzz"
packages = ["."]
pruneopts = "UT"
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
[[projects]]
digest = "1:160eabf7a69910fd74f29c692718bc2437c1c1c7d4c9dea9712357752a70e5df"
name = "github.com/gorilla/context"
packages = ["."]
pruneopts = "UT"
revision = "1ea25387ff6f684839d82767c1733ff4d4d15d0a"
version = "v1.1"
[[projects]]
digest = "1:185a43b59a1f4e7ad4e7ccafb8a1538193d897a2a75be16dda093ec42ad231cf"
name = "github.com/gorilla/handlers"
packages = ["."]
pruneopts = "UT"
revision = "90663712d74cb411cbef281bc1e08c19d1a76145"
version = "v1.3.0"
[[projects]]
digest = "1:3c44722a6360b8d8abf6f70f122c69007189be992a150e39571224c54a9bc380"
name = "github.com/gorilla/mux"
packages = ["."]
pruneopts = "UT"
revision = "7f08801859139f86dfafd1c296e2cba9a80d292e"
version = "v1.6.0"
[[projects]]
digest = "1:f5a2051c55d05548d2d4fd23d244027b59fbd943217df8aa3b5e170ac2fd6e1b"
name = "github.com/json-iterator/go"
packages = ["."]
pruneopts = "UT"
revision = "0ff49de124c6f76f8494e194af75bde0f1a49a29"
version = "v1.1.6"
[[projects]]
digest = "1:31e761d97c76151dde79e9d28964a812c46efc5baee4085b86f68f0c654450de"
name = "github.com/konsorten/go-windows-terminal-sequences"
packages = ["."]
pruneopts = "UT"
revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e"
version = "v1.0.2"
[[projects]]
branch = "master"
digest = "1:bd26bbaf1e9f9dfe829a88f87a0849b56f717c31785443a67668f2c752fa8412"
name = "github.com/lib/pq"
packages = [
".",
"oid",
]
pruneopts = "UT"
revision = "b2004221932bd6b13167ef654c81cffac36f7537"
[[projects]]
digest = "1:5113b1edf6e2f370f9ce6101e7b5a86c3e8decd108067e34b762ae91e42964ee"
name = "github.com/miekg/pkcs11"
packages = ["."]
pruneopts = "UT"
revision = "7283ca79f35edb89bc1b4ecae7f86a3680ce737f"
[[projects]]
digest = "1:33422d238f147d247752996a26574ac48dcf472976eda7f5134015f06bf16563"
name = "github.com/modern-go/concurrent"
packages = ["."]
pruneopts = "UT"
revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
version = "1.0.3"
[[projects]]
digest = "1:e32bdbdb7c377a07a9a46378290059822efdce5c8d96fe71940d87cb4f918855"
name = "github.com/modern-go/reflect2"
packages = ["."]
pruneopts = "UT"
revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
version = "1.0.1"
[[projects]]
digest = "1:159d8a990f45d4891f1f04cb6ad7eb18b307cd02d783f7d37fa7a3b93912b172"
name = "github.com/opencontainers/go-digest"
packages = ["."]
pruneopts = "UT"
revision = "aa2ec055abd10d26d539eb630a92241b781ce4bc"
version = "v1.0.0-rc0"
[[projects]]
digest = "1:11db38d694c130c800d0aefb502fb02519e514dc53d9804ce51d1ad25ec27db6"
name = "github.com/opencontainers/image-spec"
packages = [
"specs-go",
"specs-go/v1",
]
pruneopts = "UT"
revision = "d60099175f88c47cd379c4738d158884749ed235"
version = "v1.0.1"
[[projects]]
digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b"
name = "github.com/pkg/errors"
packages = ["."]
pruneopts = "UT"
revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4"
version = "v0.8.1"
[[projects]]
digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
pruneopts = "UT"
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
branch = "master"
digest = "1:bd9efe4e0b0f768302a1e2f0c22458149278de533e521206e5ddc71848c269a0"
name = "github.com/pquerna/cachecontrol"
packages = [
".",
"cacheobject",
]
pruneopts = "UT"
revision = "1555304b9b35fdd2b425bccf1a5613677705e7d0"
[[projects]]
digest = "1:3f68283c56d93b885f33c679708079e834815138649e9f59ffbc572c2993e0f8"
name = "github.com/robfig/cron"
packages = ["."]
pruneopts = "UT"
revision = "b024fc5ea0e34bc3f83d9941c8d60b0622bfaca4"
version = "v1"
[[projects]]
digest = "1:fd61cf4ae1953d55df708acb6b91492d538f49c305b364a014049914495db426"
name = "github.com/sirupsen/logrus"
packages = ["."]
pruneopts = "UT"
revision = "8bdbc7bcc01dcbb8ec23dc8a28e332258d25251f"
version = "v1.4.1"
[[projects]]
digest = "1:9424f440bba8f7508b69414634aef3b2b3a877e522d8a4624692412805407bb7"
name = "github.com/spf13/pflag"
packages = ["."]
pruneopts = "UT"
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
version = "v1.0.1"
[[projects]]
digest = "1:ac83cf90d08b63ad5f7e020ef480d319ae890c208f8524622a2f3136e2686b02"
name = "github.com/stretchr/objx"
packages = ["."]
pruneopts = "UT"
revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c"
version = "v0.1.1"
[[projects]]
digest = "1:288e2ba4192b77ec619875ab54d82e2179ca8978e8baa690dcb4343a4a1f4da7"
name = "github.com/stretchr/testify"
packages = [
"assert",
"mock",
"require",
"suite",
]
pruneopts = "UT"
revision = "ffdc059bfe9ce6a4e144ba849dbedead332c6053"
version = "v1.3.0"
[[projects]]
digest = "1:a5702d6fd0891671faf050c05451d3ee4cfd70cb958e11556fefaca628ce832e"
name = "github.com/theupdateframework/notary"
packages = [
".",
"client",
"client/changelist",
"cryptoservice",
"storage",
"trustmanager",
"trustmanager/yubikey",
"trustpinning",
"tuf",
"tuf/data",
"tuf/signed",
"tuf/utils",
"tuf/validation",
]
pruneopts = "UT"
revision = "d6e1431feb32348e0650bf7551ac5cffd01d857b"
version = "v0.6.1"
[[projects]]
digest = "1:ab3259b9f5008a18ff8c1cc34623eccce354f3a9faf5b409983cd6717d64b40b"
name = "golang.org/x/crypto"
packages = [
"cast5",
"ed25519",
"ed25519/internal/edwards25519",
"openpgp",
"openpgp/armor",
"openpgp/clearsign",
"openpgp/elgamal",
"openpgp/errors",
"openpgp/packet",
"openpgp/s2k",
"pbkdf2",
"ssh/terminal",
]
pruneopts = "UT"
revision = "5f961cd492ac9d43fc33a8ef646bae79d113fd97"
[[projects]]
digest = "1:2a465dcd21dc1094bd90bc28adc168d5c12d4d754b49d67b34362d26bd5c21b2"
name = "golang.org/x/net"
packages = [
"context",
"context/ctxhttp",
"http2",
"http2/hpack",
"lex/httplex",
]
pruneopts = "UT"
revision = "075e191f18186a8ff2becaf64478e30f4545cdad"
[[projects]]
digest = "1:3d57c230f6800023b6fec274f38a139337b5fc0d00169a100a538eb3ef5e3da8"
name = "golang.org/x/oauth2"
packages = [
".",
"clientcredentials",
"internal",
]
pruneopts = "UT"
revision = "bb50c06baba3d0c76f9d125c0719093e315b5b44"
[[projects]]
branch = "master"
digest = "1:f21f21efdd315b95a015ffd7ddca70ca60ff021848618b5a4efd88bb1603335f"
name = "golang.org/x/sys"
packages = ["unix"]
pruneopts = "UT"
revision = "571f7bbbe08da2a8955aed9d4db316e78630e9a3"
[[projects]]
branch = "master"
digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90"
name = "golang.org/x/time"
packages = ["rate"]
pruneopts = "UT"
revision = "9d24e82272b4f38b78bc8cff74fa936d31ccd8ef"
[[projects]]
digest = "1:52133d6859535332391e6193c8878d06347f28881111efa900392802485e9a18"
name = "google.golang.org/appengine"
packages = [
"internal",
"internal/base",
"internal/datastore",
"internal/log",
"internal/remote_api",
"internal/urlfetch",
"urlfetch",
]
pruneopts = "UT"
revision = "24e4144ec923c2374f6b06610c0df16a9222c3d9"
[[projects]]
digest = "1:79decf236a2000df456fe7478fd23da8af950563c922747b299e1fab7fa7d78f"
name = "gopkg.in/asn1-ber.v1"
packages = ["."]
pruneopts = "UT"
revision = "4e86f4367175e39f69d9358a5f17b4dda270378d"
version = "v1.1"
[[projects]]
digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a"
name = "gopkg.in/inf.v0"
packages = ["."]
pruneopts = "UT"
revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
version = "v0.9.1"
[[projects]]
digest = "1:79691acfc86fc3204928daf67e44955e8021ec5e10091599d344b0e16de32236"
name = "gopkg.in/ldap.v2"
packages = ["."]
pruneopts = "UT"
revision = "8168ee085ee43257585e50c6441aadf54ecb2c9f"
version = "v2.5.0"
[[projects]]
digest = "1:c0c30f47f9c16f227ba82f0bdfd14fa968453c30b7677a07903b3b4f34b98d49"
name = "gopkg.in/square/go-jose.v2"
packages = [
".",
"cipher",
"json",
]
pruneopts = "UT"
revision = "628223f44a71f715d2881ea69afc795a1e9c01be"
version = "v2.3.0"
[[projects]]
digest = "1:2a81c6e126d36ad027328cffaa4888fc3be40f09dc48028d1f93705b718130b9"
name = "gopkg.in/yaml.v2"
packages = ["."]
pruneopts = "UT"
revision = "7f97868eec74b32b0982dd158a51a446d1da7eb5"
version = "v2.1.1"
[[projects]]
digest = "1:7727a365529cdf6af394821dd990b046c56b8afac31e15e78fed58cf7bc179ad"
name = "k8s.io/api"
packages = [
"admissionregistration/v1alpha1",
"admissionregistration/v1beta1",
"apps/v1",
"apps/v1beta1",
"apps/v1beta2",
"authentication/v1",
"authentication/v1beta1",
"authorization/v1",
"authorization/v1beta1",
"autoscaling/v1",
"autoscaling/v2beta1",
"batch/v1",
"batch/v1beta1",
"batch/v2alpha1",
"certificates/v1beta1",
"core/v1",
"events/v1beta1",
"extensions/v1beta1",
"networking/v1",
"policy/v1beta1",
"rbac/v1",
"rbac/v1alpha1",
"rbac/v1beta1",
"scheduling/v1alpha1",
"scheduling/v1beta1",
"settings/v1alpha1",
"storage/v1",
"storage/v1alpha1",
"storage/v1beta1",
]
pruneopts = "UT"
revision = "5cb15d34447165a97c76ed5a60e4e99c8a01ecfe"
version = "kubernetes-1.13.4"
[[projects]]
branch = "master"
digest = "1:d0d43cf61b49d2750351759e1d220134ab7731db608b6716dc4ed792a493027d"
name = "k8s.io/apimachinery"
packages = [
"pkg/api/errors",
"pkg/api/resource",
"pkg/apis/meta/v1",
"pkg/apis/meta/v1/unstructured",
"pkg/conversion",
"pkg/conversion/queryparams",
"pkg/fields",
"pkg/labels",
"pkg/runtime",
"pkg/runtime/schema",
"pkg/runtime/serializer",
"pkg/runtime/serializer/json",
"pkg/runtime/serializer/protobuf",
"pkg/runtime/serializer/recognizer",
"pkg/runtime/serializer/streaming",
"pkg/runtime/serializer/versioning",
"pkg/selection",
"pkg/types",
"pkg/util/clock",
"pkg/util/errors",
"pkg/util/framer",
"pkg/util/intstr",
"pkg/util/json",
"pkg/util/net",
"pkg/util/runtime",
"pkg/util/sets",
"pkg/util/validation",
"pkg/util/validation/field",
"pkg/util/wait",
"pkg/util/yaml",
"pkg/version",
"pkg/watch",
"third_party/forked/golang/reflect",
]
pruneopts = "UT"
revision = "f534d624797b270e5e46104dc7e2c2d61edbb85d"
[[projects]]
digest = "1:131682c26796b64f0abb77ac3d85525712706fde0b085aaa7b6d10b4398167cc"
name = "k8s.io/client-go"
packages = [
"kubernetes/scheme",
"pkg/apis/clientauthentication",
"pkg/apis/clientauthentication/v1alpha1",
"pkg/apis/clientauthentication/v1beta1",
"pkg/version",
"plugin/pkg/client/auth/exec",
"rest",
"rest/watch",
"tools/clientcmd/api",
"tools/metrics",
"transport",
"util/cert",
"util/connrotation",
"util/flowcontrol",
"util/homedir",
"util/integer",
]
pruneopts = "UT"
revision = "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65"
version = "v8.0.0"
[[projects]]
digest = "1:1076dbb6a69b965ccfda2a06a04e5038db78eff586f74b5daf4a41444e6f6077"
name = "k8s.io/helm"
packages = [
"cmd/helm/search",
"pkg/chartutil",
"pkg/getter",
"pkg/helm/environment",
"pkg/helm/helmpath",
"pkg/ignore",
"pkg/plugin",
"pkg/proto/hapi/chart",
"pkg/proto/hapi/version",
"pkg/provenance",
"pkg/repo",
"pkg/sympath",
"pkg/tlsutil",
"pkg/urlutil",
"pkg/version",
]
pruneopts = "UT"
revision = "20adb27c7c5868466912eebdf6664e7390ebe710"
version = "v2.9.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"github.com/Masterminds/semver",
"github.com/astaxie/beego",
"github.com/astaxie/beego/cache",
"github.com/astaxie/beego/cache/redis",
"github.com/astaxie/beego/context",
"github.com/astaxie/beego/orm",
"github.com/astaxie/beego/session",
"github.com/astaxie/beego/session/redis",
"github.com/astaxie/beego/validation",
"github.com/beego/i18n",
"github.com/bmatcuk/doublestar",
"github.com/casbin/casbin",
"github.com/casbin/casbin/model",
"github.com/casbin/casbin/persist",
"github.com/casbin/casbin/util",
"github.com/coreos/go-oidc",
"github.com/dghubble/sling",
"github.com/dgrijalva/jwt-go",
"github.com/docker/distribution",
"github.com/docker/distribution/health",
"github.com/docker/distribution/manifest/manifestlist",
"github.com/docker/distribution/manifest/schema1",
"github.com/docker/distribution/manifest/schema2",
"github.com/docker/distribution/reference",
"github.com/docker/distribution/registry/auth/token",
"github.com/docker/distribution/registry/client/auth/challenge",
"github.com/docker/libtrust",
"github.com/garyburd/redigo/redis",
"github.com/ghodss/yaml",
"github.com/go-sql-driver/mysql",
"github.com/gocraft/work",
"github.com/golang-migrate/migrate",
"github.com/golang-migrate/migrate/database/postgres",
"github.com/golang-migrate/migrate/source/file",
"github.com/gomodule/redigo/redis",
"github.com/gorilla/handlers",
"github.com/gorilla/mux",
"github.com/lib/pq",
"github.com/opencontainers/go-digest",
"github.com/pkg/errors",
"github.com/robfig/cron",
"github.com/stretchr/testify/assert",
"github.com/stretchr/testify/mock",
"github.com/stretchr/testify/require",
"github.com/stretchr/testify/suite",
"github.com/theupdateframework/notary",
"github.com/theupdateframework/notary/client",
"github.com/theupdateframework/notary/trustpinning",
"github.com/theupdateframework/notary/tuf/data",
"golang.org/x/crypto/pbkdf2",
"golang.org/x/oauth2",
"golang.org/x/oauth2/clientcredentials",
"gopkg.in/ldap.v2",
"gopkg.in/yaml.v2",
"k8s.io/api/authentication/v1beta1",
"k8s.io/apimachinery/pkg/apis/meta/v1",
"k8s.io/apimachinery/pkg/runtime/schema",
"k8s.io/apimachinery/pkg/runtime/serializer",
"k8s.io/client-go/kubernetes/scheme",
"k8s.io/client-go/rest",
"k8s.io/helm/cmd/helm/search",
"k8s.io/helm/pkg/chartutil",
"k8s.io/helm/pkg/proto/hapi/chart",
"k8s.io/helm/pkg/repo",
]
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -1,137 +0,0 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
ignored = ["github.com/goharbor/harbor/tests*"]
[prune]
go-tests = true
unused-packages = true
[[constraint]]
name = "github.com/astaxie/beego"
version = "=1.9.0"
[[constraint]]
name = "github.com/casbin/casbin"
version = "=1.7.0"
[[constraint]]
name = "github.com/dghubble/sling"
version = "=1.1.0"
[[constraint]]
name = "github.com/dgrijalva/jwt-go"
version = "=3.0.0"
[[constraint]]
name = "github.com/docker/distribution"
version = "=2.7.1"
[[constraint]]
branch = "master"
name = "github.com/docker/libtrust"
[[constraint]]
name = "github.com/go-sql-driver/mysql"
version = "=1.3.0"
[[override]]
name = "github.com/mattn/go-sqlite3"
version = "=1.6.0"
[[constraint]]
name = "github.com/opencontainers/go-digest"
version = "=1.0.0-rc0"
[[constraint]]
name = "gopkg.in/ldap.v2"
version = "=2.5.0"
[[constraint]]
name = "github.com/stretchr/testify"
version = "=1.3.0"
[[constraint]]
name = "github.com/gorilla/handlers"
version = "=1.3.0"
[[constraint]]
name = "github.com/gorilla/mux"
version = "=1.6.0"
[[override]]
name = "github.com/Sirupsen/logrus"
version = "=1.0.5"
[[override]]
name = "github.com/gorilla/context"
version = "=1.1"
[[override]]
name = "github.com/garyburd/redigo"
version = "=1.6.0"
[[constraint]]
name = "github.com/golang-migrate/migrate"
version = "=3.3.0"
[[constraint]]
name = "k8s.io/helm"
version = "2.9.1"
[[constraint]]
name = "github.com/ghodss/yaml"
version = "=1.0.0"
[[constraint]]
name = "github.com/Masterminds/semver"
version = "=1.4.2"
[[constraint]]
name = "github.com/gocraft/work"
version = "=0.5.1"
[[constraint]]
name = "github.com/robfig/cron"
version = "=1.0"
[[constraint]]
name = "github.com/coreos/go-oidc"
version = "=2.0.0"
[[constraint]]
name = "gopkg.in/yaml.v2"
version = "=2.1.1"
[[constraint]]
name = "k8s.io/api"
version = "kubernetes-1.13.4"
[[constraint]]
name = "github.com/bmatcuk/doublestar"
version = "=1.1.1"
[[constraint]]
name = "github.com/pkg/errors"
version = "=0.8.1"
[[constraint]]
name = "github.com/docker/notary"
version = "=0.6.1"

View File

@ -1,16 +1,16 @@
package chartserver
import (
"errors"
"fmt"
commonhttp "github.com/goharbor/harbor/src/common/http"
hlog "github.com/goharbor/harbor/src/common/utils/log"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
"time"
commonhttp "github.com/goharbor/harbor/src/common/http"
"github.com/pkg/errors"
)
const (
@ -49,11 +49,13 @@ func NewChartClient(credential *Credential) *ChartClient { // Create http client
func (cc *ChartClient) GetContent(addr string) ([]byte, error) {
response, err := cc.sendRequest(addr, http.MethodGet, nil)
if err != nil {
err = errors.Wrap(err, "get content failed")
return nil, err
}
content, err := ioutil.ReadAll(response.Body)
if err != nil {
err = errors.Wrap(err, "Read response body error")
return nil, err
}
defer response.Body.Close()
@ -61,6 +63,7 @@ func (cc *ChartClient) GetContent(addr string) ([]byte, error) {
if response.StatusCode != http.StatusOK {
text, err := extractError(content)
if err != nil {
err = errors.Wrap(err, "Extract content error failed")
return nil, err
}
return nil, &commonhttp.Error{
@ -106,7 +109,8 @@ func (cc *ChartClient) sendRequest(addr string, method string, body io.Reader) (
fullURI, err := url.Parse(addr)
if err != nil {
return nil, fmt.Errorf("invalid url: %s", err.Error())
err = errors.Wrap(err, "Invalid url")
return nil, err
}
request, err := http.NewRequest(method, addr, body)
@ -121,7 +125,7 @@ func (cc *ChartClient) sendRequest(addr string, method string, body io.Reader) (
response, err := cc.httpClient.Do(request)
if err != nil {
hlog.Errorf("%s '%s' failed with error: %s", method, fullURI.Path, err)
err = errors.Wrap(err, fmt.Sprintf("send request %s %s failed", method, fullURI.Path))
return nil, err
}

View File

@ -7,6 +7,7 @@ import (
"os"
hlog "github.com/goharbor/harbor/src/common/utils/log"
"github.com/justinas/alice"
)
const (
@ -42,7 +43,7 @@ type Controller struct {
}
// NewController is constructor of the chartserver.Controller
func NewController(backendServer *url.URL) (*Controller, error) {
func NewController(backendServer *url.URL, chains ...*alice.Chain) (*Controller, error) {
if backendServer == nil {
return nil, errors.New("failed to create chartserver.Controller: backend sever address is required")
}
@ -68,7 +69,7 @@ func NewController(backendServer *url.URL) (*Controller, error) {
return &Controller{
backendServerAddress: backendServer,
// Use customized reverse proxy
trafficProxy: NewProxyEngine(backendServer, cred),
trafficProxy: NewProxyEngine(backendServer, cred, chains...),
// Initialize chart operator for use
chartOperator: &ChartOperator{},
// Create http client with customized timeouts

View File

@ -2,19 +2,20 @@ package chartserver
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"net/http/httptest"
"os"
"strings"
"github.com/ghodss/yaml"
commonhttp "github.com/goharbor/harbor/src/common/http"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/replication"
rep_event "github.com/goharbor/harbor/src/replication/event"
"github.com/goharbor/harbor/src/replication/model"
"github.com/pkg/errors"
helm_repo "k8s.io/helm/pkg/repo"
"os"
"github.com/goharbor/harbor/src/common/utils/log"
)
// ListCharts gets the chart list under the namespace
@ -68,11 +69,21 @@ func (c *Controller) DeleteChartVersion(namespace, chartName, version string) er
return errors.New("invalid chart for deleting")
}
url := fmt.Sprintf("%s/%s/%s", c.APIPrefix(namespace), chartName, version)
url := fmt.Sprintf("/api/chartrepo/%s/charts/%s/%s", namespace, chartName, version)
req, _ := http.NewRequest(http.MethodDelete, url, nil)
w := httptest.NewRecorder()
err := c.apiClient.DeleteContent(url)
if err != nil {
return err
c.trafficProxy.ServeHTTP(w, req)
if w.Code != http.StatusOK {
text, err := extractError(w.Body.Bytes())
if err != nil {
return err
}
return &commonhttp.Error{
Code: w.Code,
Message: text,
}
}
// send notification to replication handler

View File

@ -15,8 +15,11 @@ import (
"github.com/goharbor/harbor/src/common"
hlog "github.com/goharbor/harbor/src/common/utils/log"
n_event "github.com/goharbor/harbor/src/core/notifier/event"
"github.com/goharbor/harbor/src/replication"
rep_event "github.com/goharbor/harbor/src/replication/event"
"github.com/justinas/alice"
"time"
)
const (
@ -36,20 +39,29 @@ type ProxyEngine struct {
backend *url.URL
// Use go reverse proxy as engine
engine *httputil.ReverseProxy
engine http.Handler
}
// NewProxyEngine is constructor of NewProxyEngine
func NewProxyEngine(target *url.URL, cred *Credential) *ProxyEngine {
func NewProxyEngine(target *url.URL, cred *Credential, chains ...*alice.Chain) *ProxyEngine {
var engine http.Handler
engine = &httputil.ReverseProxy{
ErrorLog: log.New(os.Stdout, "", log.Ldate|log.Ltime|log.Lshortfile),
Director: func(req *http.Request) {
director(target, cred, req)
},
ModifyResponse: modifyResponse,
}
if len(chains) > 0 {
hlog.Info("New chart server traffic proxy with middlewares")
engine = chains[0].Then(engine)
}
return &ProxyEngine{
backend: target,
engine: &httputil.ReverseProxy{
ErrorLog: log.New(os.Stdout, "", log.Ldate|log.Ltime|log.Lshortfile),
Director: func(req *http.Request) {
director(target, cred, req)
},
ModifyResponse: modifyResponse,
},
engine: engine,
}
}
@ -100,8 +112,46 @@ func modifyResponse(res *http.Response) error {
hlog.Errorf("failed to handle event: %v", err)
}
}()
}
// Trigger harbor webhook
if e != nil && e.Resource != nil && e.Resource.Metadata != nil && len(e.Resource.Metadata.Vtags) > 0 &&
len(e.Resource.ExtendedInfo) > 0 {
event := &n_event.Event{}
metaData := &n_event.ChartUploadMetaData{
ChartMetaData: n_event.ChartMetaData{
ProjectName: e.Resource.ExtendedInfo["projectName"].(string),
ChartName: e.Resource.ExtendedInfo["chartName"].(string),
Versions: e.Resource.Metadata.Vtags,
OccurAt: time.Now(),
Operator: e.Resource.ExtendedInfo["operator"].(string),
},
}
if err := event.Build(metaData); err == nil {
if err := event.Publish(); err != nil {
hlog.Errorf("failed to publish chart upload event: %v", err)
}
} else {
hlog.Errorf("failed to build chart upload event metadata: %v", err)
}
}
}
}
// Process downloading chart success webhook event
if res.StatusCode == http.StatusOK {
chartDownloadEvent := res.Request.Context().Value(common.ChartDownloadCtxKey)
eventMetaData, ok := chartDownloadEvent.(*n_event.ChartDownloadMetaData)
if ok && eventMetaData != nil {
// Trigger harbor webhook
event := &n_event.Event{}
if err := event.Build(eventMetaData); err == nil {
if err := event.Publish(); err != nil {
hlog.Errorf("failed to publish chart download event: %v", err)
}
} else {
hlog.Errorf("failed to build chart download event metadata: %v", err)
}
}
}
// Accept cases

View File

@ -20,12 +20,11 @@ import (
"net/http"
"strconv"
"github.com/astaxie/beego"
"github.com/astaxie/beego/validation"
commonhttp "github.com/goharbor/harbor/src/common/http"
"github.com/goharbor/harbor/src/common/utils/log"
"errors"
"github.com/astaxie/beego"
"github.com/pkg/errors"
)
const (

View File

@ -210,12 +210,14 @@ func (c *CfgManager) GetDatabaseCfg() *models.Database {
return &models.Database{
Type: c.Get(common.DatabaseType).GetString(),
PostGreSQL: &models.PostGreSQL{
Host: c.Get(common.PostGreSQLHOST).GetString(),
Port: c.Get(common.PostGreSQLPort).GetInt(),
Username: c.Get(common.PostGreSQLUsername).GetString(),
Password: c.Get(common.PostGreSQLPassword).GetString(),
Database: c.Get(common.PostGreSQLDatabase).GetString(),
SSLMode: c.Get(common.PostGreSQLSSLMode).GetString(),
Host: c.Get(common.PostGreSQLHOST).GetString(),
Port: c.Get(common.PostGreSQLPort).GetInt(),
Username: c.Get(common.PostGreSQLUsername).GetString(),
Password: c.Get(common.PostGreSQLPassword).GetString(),
Database: c.Get(common.PostGreSQLDatabase).GetString(),
SSLMode: c.Get(common.PostGreSQLSSLMode).GetString(),
MaxIdleConns: c.Get(common.PostGreSQLMaxIdleConns).GetInt(),
MaxOpenConns: c.Get(common.PostGreSQLMaxOpenConns).GetInt(),
},
}
}

View File

@ -47,6 +47,7 @@ const (
HTTPAuthGroup = "http_auth"
OIDCGroup = "oidc"
DatabaseGroup = "database"
QuotaGroup = "quota"
// Put all config items do not belong a existing group into basic
BasicGroup = "basic"
ClairGroup = "clair"
@ -74,6 +75,7 @@ var (
{Name: common.ClairURL, Scope: SystemScope, Group: ClairGroup, EnvKey: "CLAIR_URL", DefaultValue: "http://clair:6060", ItemType: &StringType{}, Editable: false},
{Name: common.CoreURL, Scope: SystemScope, Group: BasicGroup, EnvKey: "CORE_URL", DefaultValue: "http://core:8080", ItemType: &StringType{}, Editable: false},
{Name: common.CoreLocalURL, Scope: SystemScope, Group: BasicGroup, EnvKey: "CORE_LOCAL_URL", DefaultValue: "http://127.0.0.1:8080", ItemType: &StringType{}, Editable: false},
{Name: common.DatabaseType, Scope: SystemScope, Group: BasicGroup, EnvKey: "DATABASE_TYPE", DefaultValue: "postgresql", ItemType: &StringType{}, Editable: false},
{Name: common.EmailFrom, Scope: UserScope, Group: EmailGroup, EnvKey: "EMAIL_FROM", DefaultValue: "admin <sample_admin@mydomain.com>", ItemType: &StringType{}, Editable: false},
@ -91,7 +93,7 @@ var (
{Name: common.LDAPBaseDN, Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_BASE_DN", DefaultValue: "", ItemType: &NonEmptyStringType{}, Editable: false},
{Name: common.LDAPFilter, Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_FILTER", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: common.LDAPGroupBaseDN, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_BASE_DN", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: common.LdapGroupAdminDn, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_ADMIN_DN", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: common.LDAPGroupAdminDn, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_ADMIN_DN", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: common.LDAPGroupAttributeName, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_GID", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: common.LDAPGroupSearchFilter, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_FILTER", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: common.LDAPGroupSearchScope, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_SCOPE", DefaultValue: "2", ItemType: &LdapScopeType{}, Editable: false},
@ -114,6 +116,8 @@ var (
{Name: common.PostGreSQLPort, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_PORT", DefaultValue: "5432", ItemType: &PortType{}, Editable: false},
{Name: common.PostGreSQLSSLMode, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_SSLMODE", DefaultValue: "disable", ItemType: &StringType{}, Editable: false},
{Name: common.PostGreSQLUsername, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_USERNAME", DefaultValue: "postgres", ItemType: &StringType{}, Editable: false},
{Name: common.PostGreSQLMaxIdleConns, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_MAX_IDLE_CONNS", DefaultValue: "2", ItemType: &IntType{}, Editable: false},
{Name: common.PostGreSQLMaxOpenConns, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_MAX_OPEN_CONNS", DefaultValue: "0", ItemType: &IntType{}, Editable: false},
{Name: common.ProjectCreationRestriction, Scope: UserScope, Group: BasicGroup, EnvKey: "PROJECT_CREATION_RESTRICTION", DefaultValue: common.ProCrtRestrEveryone, ItemType: &ProjectCreationRestrictionType{}, Editable: false},
{Name: common.ReadOnly, Scope: UserScope, Group: BasicGroup, EnvKey: "READ_ONLY", DefaultValue: "false", ItemType: &BoolType{}, Editable: false},
@ -133,7 +137,7 @@ var (
{Name: common.HTTPAuthProxyEndpoint, Scope: UserScope, Group: HTTPAuthGroup, ItemType: &StringType{}},
{Name: common.HTTPAuthProxyTokenReviewEndpoint, Scope: UserScope, Group: HTTPAuthGroup, ItemType: &StringType{}},
{Name: common.HTTPAuthProxyVerifyCert, Scope: UserScope, Group: HTTPAuthGroup, DefaultValue: "true", ItemType: &BoolType{}},
{Name: common.HTTPAuthProxyAlwaysOnboard, Scope: UserScope, Group: HTTPAuthGroup, DefaultValue: "false", ItemType: &BoolType{}},
{Name: common.HTTPAuthProxySkipSearch, Scope: UserScope, Group: HTTPAuthGroup, DefaultValue: "false", ItemType: &BoolType{}},
{Name: common.OIDCName, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}},
{Name: common.OIDCEndpoint, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}},
@ -147,5 +151,10 @@ var (
{Name: common.WithNotary, Scope: SystemScope, Group: BasicGroup, EnvKey: "WITH_NOTARY", DefaultValue: "false", ItemType: &BoolType{}, Editable: true},
// the unit of expiration is minute, 43200 minutes = 30 days
{Name: common.RobotTokenDuration, Scope: UserScope, Group: BasicGroup, EnvKey: "ROBOT_TOKEN_DURATION", DefaultValue: "43200", ItemType: &IntType{}, Editable: true},
{Name: common.NotificationEnable, Scope: UserScope, Group: BasicGroup, EnvKey: "NOTIFICATION_ENABLE", DefaultValue: "true", ItemType: &BoolType{}, Editable: true},
{Name: common.QuotaPerProjectEnable, Scope: UserScope, Group: QuotaGroup, EnvKey: "QUOTA_PER_PROJECT_ENABLE", DefaultValue: "true", ItemType: &BoolType{}, Editable: true},
{Name: common.CountPerProject, Scope: UserScope, Group: QuotaGroup, EnvKey: "COUNT_PER_PROJECT", DefaultValue: "-1", ItemType: &QuotaType{}, Editable: true},
{Name: common.StoragePerProject, Scope: UserScope, Group: QuotaGroup, EnvKey: "STORAGE_PER_PROJECT", DefaultValue: "-1", ItemType: &QuotaType{}, Editable: true},
}
)

View File

@ -18,9 +18,10 @@ package metadata
import (
"encoding/json"
"fmt"
"github.com/goharbor/harbor/src/common"
"strconv"
"strings"
"github.com/goharbor/harbor/src/common"
)
// Type - Use this interface to define and encapsulate the behavior of validation and transformation
@ -186,3 +187,21 @@ func (t *MapType) get(str string) (interface{}, error) {
err := json.Unmarshal([]byte(str), &result)
return result, err
}
// QuotaType ...
type QuotaType struct {
Int64Type
}
func (t *QuotaType) validate(str string) error {
val, err := strconv.ParseInt(str, 10, 64)
if err != nil {
return err
}
if val <= 0 && val != -1 {
return fmt.Errorf("quota value should be -1 or great than zero")
}
return nil
}

View File

@ -40,7 +40,7 @@ func (d *Database) Load() (map[string]interface{}, error) {
itemMetadata, ok := metadata.Instance().GetByName(item.Key)
if !ok {
log.Warningf("failed to get metadata, key:%v, error:%v, skip to load item", item.Key, err)
log.Debugf("failed to get metadata, key:%v, error:%v, skip to load item", item.Key, err)
continue
}
if itemMetadata.Scope == metadata.SystemScope {

24
src/common/const.go Normal file → Executable file
View File

@ -53,8 +53,11 @@ const (
PostGreSQLPassword = "postgresql_password"
PostGreSQLDatabase = "postgresql_database"
PostGreSQLSSLMode = "postgresql_sslmode"
PostGreSQLMaxIdleConns = "postgresql_max_idle_conns"
PostGreSQLMaxOpenConns = "postgresql_max_open_conns"
SelfRegistration = "self_registration"
CoreURL = "core_url"
CoreLocalURL = "core_local_url"
JobServiceURL = "jobservice_url"
LDAPURL = "ldap_url"
LDAPSearchDN = "ldap_search_dn"
@ -100,7 +103,7 @@ const (
HTTPAuthProxyEndpoint = "http_authproxy_endpoint"
HTTPAuthProxyTokenReviewEndpoint = "http_authproxy_tokenreview_endpoint"
HTTPAuthProxyVerifyCert = "http_authproxy_verify_cert"
HTTPAuthProxyAlwaysOnboard = "http_authproxy_always_onboard"
HTTPAuthProxySkipSearch = "http_authproxy_skip_search"
OIDCName = "oidc_name"
OIDCEndpoint = "oidc_endpoint"
OIDCCLientID = "oidc_client_id"
@ -120,8 +123,9 @@ const (
NotaryURL = "notary_url"
DefaultCoreEndpoint = "http://core:8080"
DefaultNotaryEndpoint = "http://notary-server:4443"
LdapGroupType = 1
LdapGroupAdminDn = "ldap_group_admin_dn"
LDAPGroupType = 1
HTTPGroupType = 2
LDAPGroupAdminDn = "ldap_group_admin_dn"
LDAPGroupMembershipAttribute = "ldap_group_membership_attribute"
DefaultRegistryControllerEndpoint = "http://registryctl:8080"
WithChartMuseum = "with_chartmuseum"
@ -140,5 +144,17 @@ const (
OIDCCallbackPath = "/c/oidc/callback"
OIDCLoginPath = "/c/oidc/login"
ChartUploadCtxKey = contextKey("chart_upload_event")
ChartUploadCtxKey = contextKey("chart_upload_event")
ChartDownloadCtxKey = contextKey("chart_download_event")
// Global notification enable configuration
NotificationEnable = "notification_enable"
// Quota setting items for project
QuotaPerProjectEnable = "quota_per_project_enable"
CountPerProject = "count_per_project"
StoragePerProject = "storage_per_project"
// ForeignLayer
ForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
)

142
src/common/dao/artifact.go Normal file
View File

@ -0,0 +1,142 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dao
import (
"strings"
"time"
"github.com/astaxie/beego/orm"
"github.com/goharbor/harbor/src/common/models"
)
// AddArtifact ...
func AddArtifact(af *models.Artifact) (int64, error) {
now := time.Now()
af.CreationTime = now
af.PushTime = now
id, err := GetOrmer().Insert(af)
if err != nil {
if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
return 0, ErrDupRows
}
return 0, err
}
return id, nil
}
// UpdateArtifact ...
func UpdateArtifact(af *models.Artifact) error {
_, err := GetOrmer().Update(af)
return err
}
// UpdateArtifactDigest ...
func UpdateArtifactDigest(af *models.Artifact) error {
_, err := GetOrmer().Update(af, "digest")
return err
}
// UpdateArtifactPullTime updates the pull time of the artifact.
func UpdateArtifactPullTime(af *models.Artifact) error {
_, err := GetOrmer().Update(af, "pull_time")
return err
}
// DeleteArtifact ...
func DeleteArtifact(id int64) error {
_, err := GetOrmer().QueryTable(&models.Artifact{}).Filter("ID", id).Delete()
return err
}
// DeleteArtifactByDigest ...
func DeleteArtifactByDigest(projectID int64, repo, digest string) error {
_, err := GetOrmer().Raw(`delete from artifact where project_id = ? and repo = ? and digest = ? `,
projectID, repo, digest).Exec()
if err != nil {
return err
}
return nil
}
// DeleteArtifactByTag ...
func DeleteArtifactByTag(projectID int64, repo, tag string) error {
_, err := GetOrmer().Raw(`delete from artifact where project_id = ? and repo = ? and tag = ? `,
projectID, repo, tag).Exec()
if err != nil {
return err
}
return nil
}
// ListArtifacts list artifacts according to the query conditions
func ListArtifacts(query *models.ArtifactQuery) ([]*models.Artifact, error) {
qs := getArtifactQuerySetter(query)
if query.Size > 0 {
qs = qs.Limit(query.Size)
if query.Page > 0 {
qs = qs.Offset((query.Page - 1) * query.Size)
}
}
afs := []*models.Artifact{}
_, err := qs.All(&afs)
return afs, err
}
// GetArtifact by repository and tag
func GetArtifact(repo, tag string) (*models.Artifact, error) {
artifact := &models.Artifact{}
err := GetOrmer().QueryTable(&models.Artifact{}).
Filter("Repo", repo).
Filter("Tag", tag).One(artifact)
if err != nil {
if err == orm.ErrNoRows {
return nil, nil
}
return nil, err
}
return artifact, nil
}
// GetTotalOfArtifacts returns total of artifacts
func GetTotalOfArtifacts(query ...*models.ArtifactQuery) (int64, error) {
var qs orm.QuerySeter
if len(query) > 0 {
qs = getArtifactQuerySetter(query[0])
} else {
qs = GetOrmer().QueryTable(&models.Artifact{})
}
return qs.Count()
}
func getArtifactQuerySetter(query *models.ArtifactQuery) orm.QuerySeter {
qs := GetOrmer().QueryTable(&models.Artifact{})
if query.PID != 0 {
qs = qs.Filter("PID", query.PID)
}
if len(query.Repo) > 0 {
qs = qs.Filter("Repo", query.Repo)
}
if len(query.Tag) > 0 {
qs = qs.Filter("Tag", query.Tag)
}
if len(query.Digest) > 0 {
qs = qs.Filter("Digest", query.Digest)
}
return qs
}

View File

@ -0,0 +1,110 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dao
import (
"fmt"
"github.com/astaxie/beego/orm"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/pkg/errors"
"strconv"
"strings"
"time"
)
// AddArtifactNBlob ...
func AddArtifactNBlob(afnb *models.ArtifactAndBlob) (int64, error) {
now := time.Now()
afnb.CreationTime = now
id, err := GetOrmer().Insert(afnb)
if err != nil {
if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
return 0, ErrDupRows
}
return 0, err
}
return id, nil
}
// AddArtifactNBlobs ...
func AddArtifactNBlobs(afnbs []*models.ArtifactAndBlob) error {
o := orm.NewOrm()
err := o.Begin()
if err != nil {
return err
}
var errInsertMultiple error
total := len(afnbs)
successNums, err := o.InsertMulti(total, afnbs)
if err != nil {
errInsertMultiple = err
if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
errInsertMultiple = errors.Wrap(errInsertMultiple, ErrDupRows.Error())
}
err := o.Rollback()
if err != nil {
log.Errorf("fail to rollback when to insert multiple artifact and blobs, %v", err)
errInsertMultiple = errors.Wrap(errInsertMultiple, err.Error())
}
return errInsertMultiple
}
// part of them cannot be inserted successfully.
if successNums != int64(total) {
errInsertMultiple = errors.New("Not all of artifact and blobs are inserted successfully")
err := o.Rollback()
if err != nil {
log.Errorf("fail to rollback when to insert multiple artifact and blobs, %v", err)
errInsertMultiple = errors.Wrap(errInsertMultiple, err.Error())
}
return errInsertMultiple
}
err = o.Commit()
if err != nil {
log.Errorf("fail to commit when to insert multiple artifact and blobs, %v", err)
return fmt.Errorf("fail to commit when to insert multiple artifact and blobs, %v", err)
}
return nil
}
// DeleteArtifactAndBlobByDigest ...
func DeleteArtifactAndBlobByDigest(digest string) error {
_, err := GetOrmer().Raw(`delete from artifact_blob where digest_af = ? `, digest).Exec()
if err != nil {
return err
}
return nil
}
// CountSizeOfArtifact ...
func CountSizeOfArtifact(digest string) (int64, error) {
var res []orm.Params
num, err := GetOrmer().Raw(`SELECT sum(bb.size) FROM artifact_blob afnb LEFT JOIN blob bb ON afnb.digest_blob = bb.digest WHERE afnb.digest_af = ? `, digest).Values(&res)
if err != nil {
return -1, err
}
if num > 0 {
size, err := strconv.ParseInt(res[0]["sum"].(string), 0, 64)
if err != nil {
return -1, err
}
return size, nil
}
return -1, err
}

View File

@ -0,0 +1,131 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dao
import (
"testing"
"github.com/goharbor/harbor/src/common/models"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAddArtifactNBlob(t *testing.T) {
afnb := &models.ArtifactAndBlob{
DigestAF: "vvvv",
DigestBlob: "aaaa",
}
// add
id, err := AddArtifactNBlob(afnb)
require.Nil(t, err)
afnb.ID = id
assert.Equal(t, id, int64(1))
}
func TestAddArtifactNBlobs(t *testing.T) {
afnb1 := &models.ArtifactAndBlob{
DigestAF: "zzzz",
DigestBlob: "zzza",
}
afnb2 := &models.ArtifactAndBlob{
DigestAF: "zzzz",
DigestBlob: "zzzb",
}
afnb3 := &models.ArtifactAndBlob{
DigestAF: "zzzz",
DigestBlob: "zzzc",
}
var afnbs []*models.ArtifactAndBlob
afnbs = append(afnbs, afnb1)
afnbs = append(afnbs, afnb2)
afnbs = append(afnbs, afnb3)
// add
err := AddArtifactNBlobs(afnbs)
require.Nil(t, err)
}
func TestDeleteArtifactAndBlobByDigest(t *testing.T) {
afnb := &models.ArtifactAndBlob{
DigestAF: "vvvv",
DigestBlob: "vvva",
}
// add
_, err := AddArtifactNBlob(afnb)
require.Nil(t, err)
// delete
err = DeleteArtifactAndBlobByDigest(afnb.DigestAF)
require.Nil(t, err)
}
func TestCountSizeOfArtifact(t *testing.T) {
afnb1 := &models.ArtifactAndBlob{
DigestAF: "xxxx",
DigestBlob: "aaaa",
}
afnb2 := &models.ArtifactAndBlob{
DigestAF: "xxxx",
DigestBlob: "aaab",
}
afnb3 := &models.ArtifactAndBlob{
DigestAF: "xxxx",
DigestBlob: "aaac",
}
var afnbs []*models.ArtifactAndBlob
afnbs = append(afnbs, afnb1)
afnbs = append(afnbs, afnb2)
afnbs = append(afnbs, afnb3)
err := AddArtifactNBlobs(afnbs)
require.Nil(t, err)
blob1 := &models.Blob{
Digest: "aaaa",
ContentType: "v2.blob",
Size: 100,
}
_, err = AddBlob(blob1)
require.Nil(t, err)
blob2 := &models.Blob{
Digest: "aaab",
ContentType: "v2.blob",
Size: 200,
}
_, err = AddBlob(blob2)
require.Nil(t, err)
blob3 := &models.Blob{
Digest: "aaac",
ContentType: "v2.blob",
Size: 300,
}
_, err = AddBlob(blob3)
require.Nil(t, err)
imageSize, err := CountSizeOfArtifact("xxxx")
require.Nil(t, err)
require.Equal(t, imageSize, int64(600))
}

View File

@ -0,0 +1,184 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dao
import (
"testing"
"time"
"github.com/goharbor/harbor/src/common/models"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAddArtifact(t *testing.T) {
af := &models.Artifact{
PID: 1,
Repo: "hello-world",
Tag: "latest",
Digest: "1234abcd",
Kind: "image",
}
// add
id, err := AddArtifact(af)
require.Nil(t, err)
af.ID = id
assert.Equal(t, id, int64(1))
}
func TestGetArtifact(t *testing.T) {
repo := "hello-world"
tag := "latest"
artifact, err := GetArtifact(repo, tag)
require.Nil(t, err)
require.NotNil(t, artifact)
assert.Equal(t, repo, artifact.Repo)
assert.Equal(t, tag, artifact.Tag)
}
func TestUpdateArtifactDigest(t *testing.T) {
af := &models.Artifact{
PID: 1,
Repo: "hello-world",
Tag: "v2.0",
Digest: "4321abcd",
Kind: "image",
}
// add
_, err := AddArtifact(af)
require.Nil(t, err)
af.Digest = "update_4321abcd"
require.Nil(t, UpdateArtifactDigest(af))
assert.Equal(t, af.Digest, "update_4321abcd")
}
func TestUpdateArtifactPullTime(t *testing.T) {
timeNow := time.Now()
af := &models.Artifact{
PID: 1,
Repo: "TestUpdateArtifactPullTime",
Tag: "v1.0",
Digest: "4321abcd",
Kind: "image",
PullTime: timeNow,
}
// add
_, err := AddArtifact(af)
require.Nil(t, err)
time.Sleep(time.Second * 1)
af.PullTime = time.Now()
require.Nil(t, UpdateArtifactPullTime(af))
assert.NotEqual(t, timeNow, af.PullTime)
}
func TestDeleteArtifact(t *testing.T) {
af := &models.Artifact{
PID: 1,
Repo: "hello-world",
Tag: "v1.0",
Digest: "1234abcd",
Kind: "image",
}
// add
id, err := AddArtifact(af)
require.Nil(t, err)
// delete
err = DeleteArtifact(id)
require.Nil(t, err)
}
func TestDeleteArtifactByDigest(t *testing.T) {
af := &models.Artifact{
PID: 1,
Repo: "hello-world",
Tag: "v1.1",
Digest: "TestDeleteArtifactByDigest",
Kind: "image",
}
// add
_, err := AddArtifact(af)
require.Nil(t, err)
// delete
err = DeleteArtifactByDigest(af.PID, af.Repo, af.Digest)
require.Nil(t, err)
}
func TestDeleteArtifactByTag(t *testing.T) {
af := &models.Artifact{
PID: 1,
Repo: "hello-world",
Tag: "v1.2",
Digest: "TestDeleteArtifactByTag",
Kind: "image",
}
// add
_, err := AddArtifact(af)
require.Nil(t, err)
// delete
err = DeleteArtifactByTag(1, "hello-world", "v1.2")
require.Nil(t, err)
}
func TestListArtifacts(t *testing.T) {
af := &models.Artifact{
PID: 1,
Repo: "hello-world",
Tag: "v3.0",
Digest: "TestListArtifacts",
Kind: "image",
}
// add
_, err := AddArtifact(af)
require.Nil(t, err)
afs, err := ListArtifacts(&models.ArtifactQuery{
PID: 1,
Repo: "hello-world",
Tag: "v3.0",
})
require.Nil(t, err)
assert.Equal(t, 1, len(afs))
}
func TestGetTotalOfArtifacts(t *testing.T) {
af := &models.Artifact{
PID: 2,
Repo: "hello-world",
Tag: "v3.0",
Digest: "TestGetTotalOfArtifacts",
Kind: "image",
}
// add
_, err := AddArtifact(af)
require.Nil(t, err)
total, err := GetTotalOfArtifacts(&models.ArtifactQuery{
PID: 2,
Repo: "hello-world",
Tag: "v3.0",
})
require.Nil(t, err)
assert.Equal(t, int64(1), total)
}

View File

@ -121,12 +121,16 @@ func getDatabase(database *models.Database) (db Database, err error) {
switch database.Type {
case "", "postgresql":
db = NewPGSQL(database.PostGreSQL.Host,
db = NewPGSQL(
database.PostGreSQL.Host,
strconv.Itoa(database.PostGreSQL.Port),
database.PostGreSQL.Username,
database.PostGreSQL.Password,
database.PostGreSQL.Database,
database.PostGreSQL.SSLMode)
database.PostGreSQL.SSLMode,
database.PostGreSQL.MaxIdleConns,
database.PostGreSQL.MaxOpenConns,
)
default:
err = fmt.Errorf("invalid database: %s", database.Type)
}
@ -139,6 +143,8 @@ var once sync.Once
// GetOrmer :set ormer singleton
func GetOrmer() orm.Ormer {
once.Do(func() {
// override the default value(1000) to return all records when setting no limit
orm.DefaultRowsLimit = -1
globalOrm = orm.NewOrm()
})
return globalOrm
@ -167,11 +173,13 @@ func ClearTable(table string) error {
return err
}
func paginateForRawSQL(sql string, limit, offset int64) string {
// PaginateForRawSQL ...
func PaginateForRawSQL(sql string, limit, offset int64) string {
return fmt.Sprintf("%s limit %d offset %d", sql, limit, offset)
}
func paginateForQuerySetter(qs orm.QuerySeter, page, size int64) orm.QuerySeter {
// PaginateForQuerySetter ...
func PaginateForQuerySetter(qs orm.QuerySeter, page, size int64) orm.QuerySeter {
if size > 0 {
qs = qs.Limit(size)
if page > 0 {
@ -183,7 +191,34 @@ func paginateForQuerySetter(qs orm.QuerySeter, page, size int64) orm.QuerySeter
// Escape ..
func Escape(str string) string {
str = strings.Replace(str, `\`, `\\`, -1)
str = strings.Replace(str, `%`, `\%`, -1)
str = strings.Replace(str, `_`, `\_`, -1)
return str
}
// WithTransaction helper for transaction
func WithTransaction(handler func(o orm.Ormer) error) error {
o := orm.NewOrm()
if err := o.Begin(); err != nil {
log.Errorf("begin transaction failed: %v", err)
return err
}
if err := handler(o); err != nil {
if e := o.Rollback(); e != nil {
log.Errorf("rollback transaction failed: %v", e)
return e
}
return err
}
if err := o.Commit(); err != nil {
log.Errorf("commit transaction failed: %v", err)
return err
}
return nil
}

232
src/common/dao/blob.go Normal file
View File

@ -0,0 +1,232 @@
package dao
import (
"fmt"
"strings"
"time"
"github.com/docker/distribution"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils/log"
)
// AddBlob ...
func AddBlob(blob *models.Blob) (int64, error) {
now := time.Now()
blob.CreationTime = now
id, err := GetOrmer().Insert(blob)
if err != nil {
if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
return 0, ErrDupRows
}
return 0, err
}
return id, nil
}
// GetOrCreateBlob returns blob by digest, create it if not exists
func GetOrCreateBlob(blob *models.Blob) (bool, *models.Blob, error) {
blob.CreationTime = time.Now()
created, id, err := GetOrmer().ReadOrCreate(blob, "digest")
if err != nil {
return false, nil, err
}
blob.ID = id
return created, blob, nil
}
// GetBlob ...
func GetBlob(digest string) (*models.Blob, error) {
o := GetOrmer()
qs := o.QueryTable(&models.Blob{})
qs = qs.Filter("Digest", digest)
b := []*models.Blob{}
_, err := qs.All(&b)
if err != nil {
return nil, fmt.Errorf("failed to get blob for digest %s, error: %v", digest, err)
}
if len(b) == 0 {
log.Infof("No blob found for digest %s, returning empty.", digest)
return &models.Blob{}, nil
} else if len(b) > 1 {
log.Infof("Multiple blob found for digest %s", digest)
return &models.Blob{}, fmt.Errorf("Multiple blob found for digest %s", digest)
}
return b[0], nil
}
// DeleteBlob ...
func DeleteBlob(digest string) error {
o := GetOrmer()
_, err := o.QueryTable("blob").Filter("digest", digest).Delete()
return err
}
// ListBlobs list blobs according to the query conditions
func ListBlobs(query *models.BlobQuery) ([]*models.Blob, error) {
qs := GetOrmer().QueryTable(&models.Blob{})
if query != nil {
if query.Digest != "" {
qs = qs.Filter("Digest", query.Digest)
}
if query.ContentType != "" {
qs = qs.Filter("ContentType", query.ContentType)
}
if len(query.Digests) > 0 {
qs = qs.Filter("Digest__in", query.Digests)
}
if query.Size > 0 {
qs = qs.Limit(query.Size)
if query.Page > 0 {
qs = qs.Offset((query.Page - 1) * query.Size)
}
}
}
blobs := []*models.Blob{}
_, err := qs.All(&blobs)
return blobs, err
}
// SyncBlobs sync references to blobs
func SyncBlobs(references []distribution.Descriptor) error {
if len(references) == 0 {
return nil
}
var digests []string
for _, reference := range references {
digests = append(digests, reference.Digest.String())
}
existing, err := ListBlobs(&models.BlobQuery{Digests: digests})
if err != nil {
return err
}
mp := make(map[string]*models.Blob, len(existing))
for _, blob := range existing {
mp[blob.Digest] = blob
}
var missing, updating []*models.Blob
for _, reference := range references {
if blob, found := mp[reference.Digest.String()]; found {
if blob.ContentType != reference.MediaType {
blob.ContentType = reference.MediaType
updating = append(updating, blob)
}
} else {
missing = append(missing, &models.Blob{
Digest: reference.Digest.String(),
ContentType: reference.MediaType,
Size: reference.Size,
CreationTime: time.Now(),
})
}
}
o := GetOrmer()
if len(updating) > 0 {
for _, blob := range updating {
if _, err := o.Update(blob, "content_type"); err != nil {
log.Warningf("Failed to update blob %s, error: %v", blob.Digest, err)
}
}
}
if len(missing) > 0 {
_, err = o.InsertMulti(10, missing)
if err != nil {
if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
return ErrDupRows
}
}
return err
}
return nil
}
// GetBlobsByArtifact returns blobs of artifact
func GetBlobsByArtifact(artifactDigest string) ([]*models.Blob, error) {
sql := `SELECT * FROM blob WHERE digest IN (SELECT digest_blob FROM artifact_blob WHERE digest_af = ?)`
var blobs []*models.Blob
if _, err := GetOrmer().Raw(sql, artifactDigest).QueryRows(&blobs); err != nil {
return nil, err
}
return blobs, nil
}
// GetExclusiveBlobs returns layers of repository:tag which are not shared with other repositories in the project
func GetExclusiveBlobs(projectID int64, repository, digest string) ([]*models.Blob, error) {
var exclusive []*models.Blob
blobs, err := GetBlobsByArtifact(digest)
if err != nil {
return nil, err
}
if len(blobs) == 0 {
return exclusive, nil
}
sql := fmt.Sprintf(`
SELECT
DISTINCT b.digest_blob AS digest
FROM
(
SELECT
digest
FROM
artifact
WHERE
(
project_id = ?
AND repo != ?
)
OR (
project_id = ?
AND digest != ?
)
) AS a
LEFT JOIN artifact_blob b ON a.digest = b.digest_af
AND b.digest_blob IN (%s)`, ParamPlaceholderForIn(len(blobs)))
params := []interface{}{projectID, repository, projectID, digest}
for _, blob := range blobs {
params = append(params, blob.Digest)
}
var rows []struct {
Digest string
}
if _, err := GetOrmer().Raw(sql, params...).QueryRows(&rows); err != nil {
return nil, err
}
shared := map[string]bool{}
for _, row := range rows {
shared[row.Digest] = true
}
for _, blob := range blobs {
if !shared[blob.Digest] {
exclusive = append(exclusive, blob)
}
}
return exclusive, nil
}

306
src/common/dao/blob_test.go Normal file
View File

@ -0,0 +1,306 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dao
import (
"strings"
"testing"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest/schema2"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils"
"github.com/opencontainers/go-digest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
)
func TestAddBlob(t *testing.T) {
blob := &models.Blob{
Digest: "1234abcd",
ContentType: "v2.blob",
Size: 1523,
}
// add
_, err := AddBlob(blob)
require.Nil(t, err)
}
func TestGetBlob(t *testing.T) {
blob := &models.Blob{
Digest: "12345abcde",
ContentType: "v2.blob",
Size: 453,
}
// add
id, err := AddBlob(blob)
require.Nil(t, err)
blob.ID = id
blob2, err := GetBlob("12345abcde")
require.Nil(t, err)
assert.Equal(t, blob.Digest, blob2.Digest)
}
func TestDeleteBlob(t *testing.T) {
blob := &models.Blob{
Digest: "123456abcdef",
ContentType: "v2.blob",
Size: 4543,
}
id, err := AddBlob(blob)
require.Nil(t, err)
blob.ID = id
err = DeleteBlob(blob.Digest)
require.Nil(t, err)
}
func TestListBlobs(t *testing.T) {
assert := assert.New(t)
d1 := digest.FromString(utils.GenerateRandomString())
d2 := digest.FromString(utils.GenerateRandomString())
d3 := digest.FromString(utils.GenerateRandomString())
d4 := digest.FromString(utils.GenerateRandomString())
for _, e := range []struct {
Digest digest.Digest
ContentType string
Size int64
}{
{d1, schema2.MediaTypeLayer, 1},
{d2, schema2.MediaTypeLayer, 2},
{d3, schema2.MediaTypeForeignLayer, 3},
{d4, schema2.MediaTypeForeignLayer, 4},
} {
blob := &models.Blob{
Digest: e.Digest.String(),
ContentType: e.ContentType,
Size: e.Size,
}
_, err := AddBlob(blob)
assert.Nil(err)
}
defer func() {
for _, d := range []digest.Digest{d1, d2, d3, d4} {
DeleteBlob(d.String())
}
}()
blobs, err := ListBlobs(&models.BlobQuery{Digest: d1.String()})
assert.Nil(err)
assert.Len(blobs, 1)
blobs, err = ListBlobs(&models.BlobQuery{ContentType: schema2.MediaTypeForeignLayer})
assert.Nil(err)
assert.Len(blobs, 2)
blobs, err = ListBlobs(&models.BlobQuery{Digests: []string{d1.String(), d2.String(), d3.String()}})
assert.Nil(err)
assert.Len(blobs, 3)
}
func TestSyncBlobs(t *testing.T) {
assert := assert.New(t)
d1 := digest.FromString(utils.GenerateRandomString())
d2 := digest.FromString(utils.GenerateRandomString())
d3 := digest.FromString(utils.GenerateRandomString())
d4 := digest.FromString(utils.GenerateRandomString())
blob := &models.Blob{
Digest: d1.String(),
ContentType: schema2.MediaTypeLayer,
Size: 1,
}
_, err := AddBlob(blob)
assert.Nil(err)
assert.Nil(SyncBlobs([]distribution.Descriptor{}))
references := []distribution.Descriptor{
{MediaType: schema2.MediaTypeLayer, Digest: d1, Size: 1},
{MediaType: schema2.MediaTypeForeignLayer, Digest: d2, Size: 2},
{MediaType: schema2.MediaTypeForeignLayer, Digest: d3, Size: 3},
{MediaType: schema2.MediaTypeForeignLayer, Digest: d4, Size: 4},
}
assert.Nil(SyncBlobs(references))
defer func() {
for _, d := range []digest.Digest{d1, d2, d3, d4} {
DeleteBlob(d.String())
}
}()
blobs, err := ListBlobs(&models.BlobQuery{Digests: []string{d1.String(), d2.String(), d3.String(), d4.String()}})
assert.Nil(err)
assert.Len(blobs, 4)
}
func prepareImage(projectID int64, projectName, name, tag string, layerDigests ...string) (string, error) {
digest := digest.FromString(strings.Join(layerDigests, ":")).String()
artifact := &models.Artifact{PID: projectID, Repo: projectName + "/" + name, Digest: digest, Tag: tag}
if _, err := AddArtifact(artifact); err != nil {
return "", err
}
var afnbs []*models.ArtifactAndBlob
blobDigests := append([]string{digest}, layerDigests...)
for _, blobDigest := range blobDigests {
blob := &models.Blob{Digest: blobDigest, Size: 1}
if _, _, err := GetOrCreateBlob(blob); err != nil {
return "", err
}
afnbs = append(afnbs, &models.ArtifactAndBlob{DigestAF: digest, DigestBlob: blobDigest})
}
total, err := GetTotalOfArtifacts(&models.ArtifactQuery{Digest: digest})
if err != nil {
return "", err
}
if total == 1 {
if err := AddArtifactNBlobs(afnbs); err != nil {
return "", err
}
}
return digest, nil
}
func withProject(f func(int64, string)) {
projectName := utils.GenerateRandomString()
projectID, err := AddProject(models.Project{
Name: projectName,
OwnerID: 1,
})
if err != nil {
panic(err)
}
defer func() {
DeleteProject(projectID)
}()
f(projectID, projectName)
}
type GetExclusiveBlobsSuite struct {
suite.Suite
}
func (suite *GetExclusiveBlobsSuite) mustPrepareImage(projectID int64, projectName, name, tag string, layerDigests ...string) string {
digest, err := prepareImage(projectID, projectName, name, tag, layerDigests...)
suite.Nil(err)
return digest
}
func (suite *GetExclusiveBlobsSuite) TestInSameRepository() {
withProject(func(projectID int64, projectName string) {
digest1 := digest.FromString(utils.GenerateRandomString()).String()
digest2 := digest.FromString(utils.GenerateRandomString()).String()
digest3 := digest.FromString(utils.GenerateRandomString()).String()
manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 3)
}
manifest2 := suite.mustPrepareImage(projectID, projectName, "mysql", "8.0", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) {
suite.Len(blobs, 3)
}
manifest3 := suite.mustPrepareImage(projectID, projectName, "mysql", "dev", digest1, digest2, digest3)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 1)
suite.Equal(manifest1, blobs[0].Digest)
}
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) {
suite.Len(blobs, 1)
suite.Equal(manifest2, blobs[0].Digest)
}
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest3); suite.Nil(err) {
suite.Len(blobs, 2)
}
})
}
func (suite *GetExclusiveBlobsSuite) TestInDifferentRepositories() {
withProject(func(projectID int64, projectName string) {
digest1 := digest.FromString(utils.GenerateRandomString()).String()
digest2 := digest.FromString(utils.GenerateRandomString()).String()
digest3 := digest.FromString(utils.GenerateRandomString()).String()
manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 3)
}
manifest2 := suite.mustPrepareImage(projectID, projectName, "mariadb", "latest", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 0)
}
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mariadb", manifest2); suite.Nil(err) {
suite.Len(blobs, 0)
}
manifest3 := suite.mustPrepareImage(projectID, projectName, "mysql", "dev", digest1, digest2, digest3)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 0)
}
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) {
suite.Len(blobs, 0)
}
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest3); suite.Nil(err) {
suite.Len(blobs, 2)
}
})
}
func (suite *GetExclusiveBlobsSuite) TestInDifferentProjects() {
withProject(func(projectID int64, projectName string) {
digest1 := digest.FromString(utils.GenerateRandomString()).String()
digest2 := digest.FromString(utils.GenerateRandomString()).String()
manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 3)
}
withProject(func(id int64, name string) {
manifest2 := suite.mustPrepareImage(id, name, "mysql", "latest", digest1, digest2)
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
suite.Len(blobs, 3)
}
if blobs, err := GetExclusiveBlobs(id, name+"/mysql", manifest2); suite.Nil(err) {
suite.Len(blobs, 3)
}
})
})
}
func TestRunGetExclusiveBlobsSuite(t *testing.T) {
suite.Run(t, new(GetExclusiveBlobsSuite))
}

View File

@ -54,7 +54,7 @@ func GetConfigEntries() ([]*models.ConfigEntry, error) {
func SaveConfigEntries(entries []models.ConfigEntry) error {
o := GetOrmer()
for _, entry := range entries {
if entry.Key == common.LdapGroupAdminDn {
if entry.Key == common.LDAPGroupAdminDn {
entry.Value = utils.TrimLower(entry.Value)
}
tempEntry := models.ConfigEntry{}

View File

@ -0,0 +1,64 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dao
import (
"encoding/json"
"fmt"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils/log"
)
// CreateCVEWhitelist creates the CVE whitelist
func CreateCVEWhitelist(l models.CVEWhitelist) (int64, error) {
o := GetOrmer()
itemsBytes, _ := json.Marshal(l.Items)
l.ItemsText = string(itemsBytes)
return o.Insert(&l)
}
// UpdateCVEWhitelist Updates the vulnerability white list to DB
func UpdateCVEWhitelist(l models.CVEWhitelist) (int64, error) {
o := GetOrmer()
itemsBytes, _ := json.Marshal(l.Items)
l.ItemsText = string(itemsBytes)
id, err := o.InsertOrUpdate(&l, "project_id")
return id, err
}
// GetCVEWhitelist Gets the CVE whitelist of the project based on the project ID in parameter
func GetCVEWhitelist(pid int64) (*models.CVEWhitelist, error) {
o := GetOrmer()
qs := o.QueryTable(&models.CVEWhitelist{})
qs = qs.Filter("ProjectID", pid)
r := []*models.CVEWhitelist{}
_, err := qs.All(&r)
if err != nil {
return nil, fmt.Errorf("failed to get CVE whitelist for project %d, error: %v", pid, err)
}
if len(r) == 0 {
return nil, nil
} else if len(r) > 1 {
log.Infof("Multiple CVE whitelists found for project %d, length: %d, returning first element.", pid, len(r))
}
items := []models.CVEWhitelistItem{}
err = json.Unmarshal([]byte(r[0].ItemsText), &items)
if err != nil {
log.Errorf("Failed to decode item list, err: %v, text: %s", err, r[0].ItemsText)
return nil, err
}
r[0].Items = items
return r[0], nil
}

View File

@ -0,0 +1,55 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dao
import (
"github.com/goharbor/harbor/src/common/models"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"testing"
)
func TestUpdateAndGetCVEWhitelist(t *testing.T) {
require.Nil(t, ClearTable("cve_whitelist"))
l2, err := GetCVEWhitelist(5)
assert.Nil(t, err)
assert.Nil(t, l2)
longList := []models.CVEWhitelistItem{}
for i := 0; i < 50; i++ {
longList = append(longList, models.CVEWhitelistItem{CVEID: "CVE-1999-0067"})
}
e := int64(1573254000)
in1 := models.CVEWhitelist{ProjectID: 3, Items: longList, ExpiresAt: &e}
_, err = UpdateCVEWhitelist(in1)
require.Nil(t, err)
// assert.Equal(t, int64(1), n)
out1, err := GetCVEWhitelist(3)
require.Nil(t, err)
assert.Equal(t, int64(3), out1.ProjectID)
assert.Equal(t, longList, out1.Items)
assert.Equal(t, e, *out1.ExpiresAt)
sysCVEs := []models.CVEWhitelistItem{
{CVEID: "CVE-2019-10164"},
{CVEID: "CVE-2017-12345"},
}
in3 := models.CVEWhitelist{Items: sysCVEs}
_, err = UpdateCVEWhitelist(in3)
require.Nil(t, err)
require.Nil(t, ClearTable("cve_whitelist"))
}

View File

@ -302,9 +302,6 @@ func TestListUsers(t *testing.T) {
if err != nil {
t.Errorf("Error occurred in ListUsers: %v", err)
}
if len(users) != 1 {
t.Errorf("Expect one user in list, but the acutal length is %d, the list: %+v", len(users), users)
}
users2, err := ListUsers(&models.UserQuery{Username: username})
if len(users2) != 1 {
t.Errorf("Expect one user in list, but the acutal length is %d, the list: %+v", len(users), users)
@ -1035,3 +1032,53 @@ func TestIsDupRecError(t *testing.T) {
assert.True(t, isDupRecErr(fmt.Errorf("pq: duplicate key value violates unique constraint \"properties_k_key\"")))
assert.False(t, isDupRecErr(fmt.Errorf("other error")))
}
func TestWithTransaction(t *testing.T) {
reference := "transaction"
quota := models.Quota{
Reference: reference,
ReferenceID: "1",
Hard: "{}",
}
failed := func(o orm.Ormer) error {
o.Insert(&quota)
return fmt.Errorf("failed")
}
var quotaID int64
success := func(o orm.Ormer) error {
id, err := o.Insert(&quota)
if err != nil {
return err
}
quotaID = id
return nil
}
assert := assert.New(t)
if assert.Error(WithTransaction(failed)) {
var quota models.Quota
quota.Reference = reference
quota.ReferenceID = "1"
err := GetOrmer().Read(&quota, "reference", "reference_id")
assert.Error(err)
assert.False(quota.ID != 0)
}
if assert.Nil(WithTransaction(success)) {
var quota models.Quota
quota.Reference = reference
quota.ReferenceID = "1"
err := GetOrmer().Read(&quota, "reference", "reference_id")
assert.Nil(err)
assert.True(quota.ID != 0)
assert.Equal(quotaID, quota.ID)
GetOrmer().Delete(&models.Quota{ID: quotaID}, "id")
}
}

View File

@ -18,23 +18,35 @@ import (
"strings"
"time"
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/utils"
"fmt"
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/pkg/errors"
)
// ErrGroupNameDup ...
var ErrGroupNameDup = errors.New("duplicated user group name")
// AddUserGroup - Add User Group
func AddUserGroup(userGroup models.UserGroup) (int, error) {
userGroupList, err := QueryUserGroup(models.UserGroup{GroupName: userGroup.GroupName, GroupType: common.HTTPGroupType})
if err != nil {
return 0, ErrGroupNameDup
}
if len(userGroupList) > 0 {
return 0, ErrGroupNameDup
}
o := dao.GetOrmer()
sql := "insert into user_group (group_name, group_type, ldap_group_dn, creation_time, update_time) values (?, ?, ?, ?, ?) RETURNING id"
var id int
now := time.Now()
err := o.Raw(sql, userGroup.GroupName, userGroup.GroupType, utils.TrimLower(userGroup.LdapGroupDN), now, now).QueryRow(&id)
err = o.Raw(sql, userGroup.GroupName, userGroup.GroupType, utils.TrimLower(userGroup.LdapGroupDN), now, now).QueryRow(&id)
if err != nil {
return 0, err
}
@ -47,10 +59,10 @@ func QueryUserGroup(query models.UserGroup) ([]*models.UserGroup, error) {
o := dao.GetOrmer()
sql := `select id, group_name, group_type, ldap_group_dn from user_group where 1=1 `
sqlParam := make([]interface{}, 1)
groups := []*models.UserGroup{}
var groups []*models.UserGroup
if len(query.GroupName) != 0 {
sql += ` and group_name like ? `
sqlParam = append(sqlParam, `%`+dao.Escape(query.GroupName)+`%`)
sql += ` and group_name = ? `
sqlParam = append(sqlParam, query.GroupName)
}
if query.GroupType != 0 {
@ -86,6 +98,27 @@ func GetUserGroup(id int) (*models.UserGroup, error) {
return nil, nil
}
// GetGroupIDByGroupName - Return the group ID by given group name. it is possible less group ID than the given group name if some group doesn't exist.
func GetGroupIDByGroupName(groupName []string, groupType int) ([]int, error) {
var retGroupID []int
var conditions []string
if len(groupName) == 0 {
return retGroupID, nil
}
for _, gName := range groupName {
con := "'" + gName + "'"
conditions = append(conditions, con)
}
sql := fmt.Sprintf("select id from user_group where group_name in ( %s ) and group_type = %v", strings.Join(conditions, ","), groupType)
o := dao.GetOrmer()
cnt, err := o.Raw(sql).QueryRows(&retGroupID)
if err != nil {
return retGroupID, err
}
log.Debugf("Found rows %v", cnt)
return retGroupID, nil
}
// DeleteUserGroup ...
func DeleteUserGroup(id int) error {
userGroup := models.UserGroup{ID: id}
@ -111,11 +144,7 @@ func UpdateUserGroupName(id int, groupName string) error {
return err
}
// OnBoardUserGroup will check if a usergroup exists in usergroup table, if not insert the usergroup and
// put the id in the pointer of usergroup model, if it does exist, return the usergroup's profile.
// This is used for ldap and uaa authentication, such the usergroup can have an ID in Harbor.
// the keyAttribute and combinedKeyAttribute are key columns used to check duplicate usergroup in harbor
func OnBoardUserGroup(g *models.UserGroup, keyAttribute string, combinedKeyAttributes ...string) error {
func onBoardCommonUserGroup(g *models.UserGroup, keyAttribute string, combinedKeyAttributes ...string) error {
g.LdapGroupDN = utils.TrimLower(g.LdapGroupDN)
o := dao.GetOrmer()
@ -140,19 +169,11 @@ func OnBoardUserGroup(g *models.UserGroup, keyAttribute string, combinedKeyAttri
return nil
}
// GetGroupDNQueryCondition get the part of IN ('XXX', 'XXX') condition
func GetGroupDNQueryCondition(userGroupList []*models.UserGroup) string {
result := make([]string, 0)
count := 0
for _, userGroup := range userGroupList {
if userGroup.GroupType == common.LdapGroupType {
result = append(result, "'"+userGroup.LdapGroupDN+"'")
count++
}
// OnBoardUserGroup will check if a usergroup exists in usergroup table, if not insert the usergroup and
// put the id in the pointer of usergroup model, if it does exist, return the usergroup's profile.
func OnBoardUserGroup(g *models.UserGroup) error {
if g.GroupType == common.LDAPGroupType {
return onBoardCommonUserGroup(g, "LdapGroupDN", "GroupType")
}
// No LDAP Group found
if count == 0 {
return ""
}
return strings.Join(result, ",")
return onBoardCommonUserGroup(g, "GroupName", "GroupType")
}

View File

@ -17,6 +17,7 @@ package group
import (
"fmt"
"os"
"reflect"
"testing"
"github.com/goharbor/harbor/src/common"
@ -46,20 +47,30 @@ func TestMain(m *testing.M) {
// Extract to test utils
initSqls := []string{
"insert into harbor_user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')",
"insert into harbor_user (username, email, password, realname) values ('grouptestu09', 'grouptestu09@example.com', '123456', 'grouptestu09')",
"insert into project (name, owner_id) values ('member_test_01', 1)",
`insert into project (name, owner_id) values ('group_project2', 1)`,
`insert into project (name, owner_id) values ('group_project_private', 1)`,
"insert into user_group (group_name, group_type, ldap_group_dn) values ('test_group_01', 1, 'cn=harbor_users,ou=sample,ou=vmware,dc=harbor,dc=com')",
"insert into user_group (group_name, group_type, ldap_group_dn) values ('test_http_group', 2, '')",
"insert into user_group (group_name, group_type, ldap_group_dn) values ('test_myhttp_group', 2, '')",
"update project set owner_id = (select user_id from harbor_user where username = 'member_test_01') where name = 'member_test_01'",
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from harbor_user where username = 'member_test_01'), 'u', 1)",
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_group_01'), 'g', 1)",
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_http_group'), 'g', 4)",
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_myhttp_group'), 'g', 4)",
}
clearSqls := []string{
"delete from project where name='member_test_01'",
"delete from harbor_user where username='member_test_01' or username='pm_sample'",
"delete from project where name='group_project2'",
"delete from project where name='group_project_private'",
"delete from harbor_user where username='member_test_01' or username='pm_sample' or username='grouptestu09'",
"delete from user_group",
"delete from project_member",
}
dao.PrepareTestData(clearSqls, initSqls)
dao.ExecuteBatchSQL(initSqls)
defer dao.ExecuteBatchSQL(clearSqls)
result = m.Run()
@ -80,7 +91,7 @@ func TestAddUserGroup(t *testing.T) {
want int
wantErr bool
}{
{"Insert an ldap user group", args{userGroup: models.UserGroup{GroupName: "sample_group", GroupType: common.LdapGroupType, LdapGroupDN: "sample_ldap_dn_string"}}, 0, false},
{"Insert an ldap user group", args{userGroup: models.UserGroup{GroupName: "sample_group", GroupType: common.LDAPGroupType, LdapGroupDN: "sample_ldap_dn_string"}}, 0, false},
{"Insert other user group", args{userGroup: models.UserGroup{GroupName: "other_group", GroupType: 3, LdapGroupDN: "other information"}}, 0, false},
}
for _, tt := range tests {
@ -108,8 +119,8 @@ func TestQueryUserGroup(t *testing.T) {
wantErr bool
}{
{"Query all user group", args{query: models.UserGroup{GroupName: "test_group_01"}}, 1, false},
{"Query all ldap group", args{query: models.UserGroup{GroupType: common.LdapGroupType}}, 2, false},
{"Query ldap group with group property", args{query: models.UserGroup{GroupType: common.LdapGroupType, LdapGroupDN: "CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com"}}, 1, false},
{"Query all ldap group", args{query: models.UserGroup{GroupType: common.LDAPGroupType}}, 2, false},
{"Query ldap group with group property", args{query: models.UserGroup{GroupType: common.LDAPGroupType, LdapGroupDN: "CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com"}}, 1, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@ -126,7 +137,7 @@ func TestQueryUserGroup(t *testing.T) {
}
func TestGetUserGroup(t *testing.T) {
userGroup := models.UserGroup{GroupName: "insert_group", GroupType: common.LdapGroupType, LdapGroupDN: "ldap_dn_string"}
userGroup := models.UserGroup{GroupName: "insert_group", GroupType: common.LDAPGroupType, LdapGroupDN: "ldap_dn_string"}
result, err := AddUserGroup(userGroup)
if err != nil {
t.Errorf("Error occurred when AddUserGroup: %v", err)
@ -175,7 +186,7 @@ func TestUpdateUserGroup(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fmt.Printf("id=%v", createdUserGroupID)
fmt.Printf("id=%v\n", createdUserGroupID)
if err := UpdateUserGroupName(tt.args.id, tt.args.groupName); (err != nil) != tt.wantErr {
t.Errorf("UpdateUserGroup() error = %v, wantErr %v", err, tt.wantErr)
userGroup, err := GetUserGroup(tt.args.id)
@ -231,65 +242,30 @@ func TestOnBoardUserGroup(t *testing.T) {
args{g: &models.UserGroup{
GroupName: "harbor_example",
LdapGroupDN: "cn=harbor_example,ou=groups,dc=example,dc=com",
GroupType: common.LdapGroupType}},
GroupType: common.LDAPGroupType}},
false},
{"OnBoardUserGroup second time",
args{g: &models.UserGroup{
GroupName: "harbor_example",
LdapGroupDN: "cn=harbor_example,ou=groups,dc=example,dc=com",
GroupType: common.LdapGroupType}},
GroupType: common.LDAPGroupType}},
false},
{"OnBoardUserGroup HTTP user group",
args{g: &models.UserGroup{
GroupName: "test_myhttp_group",
GroupType: common.HTTPGroupType}},
false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := OnBoardUserGroup(tt.args.g, "LdapGroupDN", "GroupType"); (err != nil) != tt.wantErr {
if err := OnBoardUserGroup(tt.args.g); (err != nil) != tt.wantErr {
t.Errorf("OnBoardUserGroup() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestGetGroupDNQueryCondition(t *testing.T) {
userGroupList := []*models.UserGroup{
{
GroupName: "sample1",
GroupType: 1,
LdapGroupDN: "cn=sample1_users,ou=groups,dc=example,dc=com",
},
{
GroupName: "sample2",
GroupType: 1,
LdapGroupDN: "cn=sample2_users,ou=groups,dc=example,dc=com",
},
{
GroupName: "sample3",
GroupType: 0,
LdapGroupDN: "cn=sample3_users,ou=groups,dc=example,dc=com",
},
}
groupQueryConditions := GetGroupDNQueryCondition(userGroupList)
expectedConditions := `'cn=sample1_users,ou=groups,dc=example,dc=com','cn=sample2_users,ou=groups,dc=example,dc=com'`
if groupQueryConditions != expectedConditions {
t.Errorf("Failed to GetGroupDNQueryCondition, expected %v, actual %v", expectedConditions, groupQueryConditions)
}
var userGroupList2 []*models.UserGroup
groupQueryCondition2 := GetGroupDNQueryCondition(userGroupList2)
if len(groupQueryCondition2) > 0 {
t.Errorf("Failed to GetGroupDNQueryCondition, expected %v, actual %v", "", groupQueryCondition2)
}
groupQueryCondition3 := GetGroupDNQueryCondition(nil)
if len(groupQueryCondition3) > 0 {
t.Errorf("Failed to GetGroupDNQueryCondition, expected %v, actual %v", "", groupQueryCondition3)
}
}
func TestGetGroupProjects(t *testing.T) {
userID, err := dao.Register(models.User{
Username: "grouptestu09",
Email: "grouptest09@example.com",
Password: "Harbor123456",
})
defer dao.DeleteUser(int(userID))
projectID1, err := dao.AddProject(models.Project{
Name: "grouptest01",
OwnerID: 1,
@ -307,7 +283,7 @@ func TestGetGroupProjects(t *testing.T) {
}
defer dao.DeleteProject(projectID2)
groupID, err := AddUserGroup(models.UserGroup{
GroupName: "test_group_01",
GroupName: "test_group_03",
GroupType: 1,
LdapGroupDN: "cn=harbor_users,ou=groups,dc=example,dc=com",
})
@ -322,8 +298,7 @@ func TestGetGroupProjects(t *testing.T) {
})
defer project.DeleteProjectMemberByID(pmid)
type args struct {
groupDNCondition string
query *models.ProjectQueryParam
query *models.ProjectQueryParam
}
member := &models.MemberQuery{
Name: "grouptestu09",
@ -335,19 +310,17 @@ func TestGetGroupProjects(t *testing.T) {
wantErr bool
}{
{"Query with group DN",
args{"'cn=harbor_users,ou=groups,dc=example,dc=com'",
&models.ProjectQueryParam{
Member: member,
}},
args{&models.ProjectQueryParam{
Member: member,
}},
1, false},
{"Query without group DN",
args{"",
&models.ProjectQueryParam{}},
args{&models.ProjectQueryParam{}},
1, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := dao.GetGroupProjects(tt.args.groupDNCondition, tt.args.query)
got, err := dao.GetGroupProjects([]int{groupID}, tt.args.query)
if (err != nil) != tt.wantErr {
t.Errorf("GetGroupProjects() error = %v, wantErr %v", err, tt.wantErr)
return
@ -377,7 +350,7 @@ func TestGetTotalGroupProjects(t *testing.T) {
}
defer dao.DeleteProject(projectID2)
groupID, err := AddUserGroup(models.UserGroup{
GroupName: "test_group_01",
GroupName: "test_group_05",
GroupType: 1,
LdapGroupDN: "cn=harbor_users,ou=groups,dc=example,dc=com",
})
@ -392,8 +365,7 @@ func TestGetTotalGroupProjects(t *testing.T) {
})
defer project.DeleteProjectMemberByID(pmid)
type args struct {
groupDNCondition string
query *models.ProjectQueryParam
query *models.ProjectQueryParam
}
tests := []struct {
name string
@ -401,18 +373,16 @@ func TestGetTotalGroupProjects(t *testing.T) {
wantSize int
wantErr bool
}{
{"Query with group DN",
args{"'cn=harbor_users,ou=groups,dc=example,dc=com'",
&models.ProjectQueryParam{}},
{"Query with group ID",
args{&models.ProjectQueryParam{}},
1, false},
{"Query without group DN",
args{"",
&models.ProjectQueryParam{}},
{"Query without group ID",
args{&models.ProjectQueryParam{}},
1, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := dao.GetTotalGroupProjects(tt.args.groupDNCondition, tt.args.query)
got, err := dao.GetTotalGroupProjects([]int{groupID}, tt.args.query)
if (err != nil) != tt.wantErr {
t.Errorf("GetGroupProjects() error = %v, wantErr %v", err, tt.wantErr)
return
@ -423,3 +393,90 @@ func TestGetTotalGroupProjects(t *testing.T) {
})
}
}
func TestGetRolesByLDAPGroup(t *testing.T) {
userGroupList, err := QueryUserGroup(models.UserGroup{LdapGroupDN: "cn=harbor_users,ou=sample,ou=vmware,dc=harbor,dc=com", GroupType: 1})
if err != nil || len(userGroupList) < 1 {
t.Errorf("failed to query user group, err %v", err)
}
gl2, err2 := GetGroupIDByGroupName([]string{"test_http_group", "test_myhttp_group"}, common.HTTPGroupType)
if err2 != nil || len(gl2) != 2 {
t.Errorf("failed to query http user group, err %v", err)
}
project, err := dao.GetProjectByName("member_test_01")
if err != nil {
t.Errorf("Error occurred when Get project by name: %v", err)
}
privateProject, err := dao.GetProjectByName("group_project_private")
if err != nil {
t.Errorf("Error occurred when Get project by name: %v", err)
}
type args struct {
projectID int64
groupIDs []int
}
tests := []struct {
name string
args args
wantSize int
wantErr bool
}{
{"Check normal", args{projectID: project.ProjectID, groupIDs: []int{userGroupList[0].ID, gl2[0], gl2[1]}}, 2, false},
{"Check non exist", args{projectID: privateProject.ProjectID, groupIDs: []int{9999}}, 0, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := dao.GetRolesByGroupID(tt.args.projectID, tt.args.groupIDs)
if (err != nil) != tt.wantErr {
t.Errorf("TestGetRolesByLDAPGroup() error = %v, wantErr %v", err, tt.wantErr)
return
}
if len(got) != tt.wantSize {
t.Errorf("TestGetRolesByLDAPGroup() = %v, want %v", len(got), tt.wantSize)
}
})
}
}
func TestGetGroupIDByGroupName(t *testing.T) {
groupList, err := QueryUserGroup(models.UserGroup{GroupName: "test_http_group", GroupType: 2})
if err != nil {
t.Error(err)
}
if len(groupList) < 0 {
t.Error(err)
}
groupList2, err := QueryUserGroup(models.UserGroup{GroupName: "test_myhttp_group", GroupType: 2})
if err != nil {
t.Error(err)
}
if len(groupList2) < 0 {
t.Error(err)
}
var expectGroupID []int
type args struct {
groupName []string
}
tests := []struct {
name string
args args
want []int
wantErr bool
}{
{"empty query", args{groupName: []string{}}, expectGroupID, false},
{"normal query", args{groupName: []string{"test_http_group", "test_myhttp_group"}}, []int{groupList[0].ID, groupList2[0].ID}, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := GetGroupIDByGroupName(tt.args.groupName, common.HTTPGroupType)
if (err != nil) != tt.wantErr {
t.Errorf("GetHTTPGroupIDByGroupName() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("GetHTTPGroupIDByGroupName() = %#v, want %#v", got, tt.want)
}
})
}
}

View File

@ -0,0 +1,122 @@
package notification
import (
"fmt"
"github.com/astaxie/beego/orm"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/pkg/errors"
)
// UpdateNotificationJob update notification job
func UpdateNotificationJob(job *models.NotificationJob, props ...string) (int64, error) {
if job == nil {
return 0, errors.New("nil job")
}
if job.ID == 0 {
return 0, fmt.Errorf("notification job ID is empty")
}
o := dao.GetOrmer()
return o.Update(job, props...)
}
// AddNotificationJob insert new notification job to DB
func AddNotificationJob(job *models.NotificationJob) (int64, error) {
if job == nil {
return 0, errors.New("nil job")
}
o := dao.GetOrmer()
if len(job.Status) == 0 {
job.Status = models.JobPending
}
return o.Insert(job)
}
// GetNotificationJob ...
func GetNotificationJob(id int64) (*models.NotificationJob, error) {
o := dao.GetOrmer()
j := &models.NotificationJob{
ID: id,
}
err := o.Read(j)
if err == orm.ErrNoRows {
return nil, nil
}
return j, nil
}
// GetTotalCountOfNotificationJobs ...
func GetTotalCountOfNotificationJobs(query ...*models.NotificationJobQuery) (int64, error) {
qs := notificationJobQueryConditions(query...)
return qs.Count()
}
// GetNotificationJobs ...
func GetNotificationJobs(query ...*models.NotificationJobQuery) ([]*models.NotificationJob, error) {
var jobs []*models.NotificationJob
qs := notificationJobQueryConditions(query...)
if len(query) > 0 && query[0] != nil {
qs = dao.PaginateForQuerySetter(qs, query[0].Page, query[0].Size)
}
qs = qs.OrderBy("-UpdateTime")
_, err := qs.All(&jobs)
return jobs, err
}
// GetLastTriggerJobsGroupByEventType get notification jobs info of policy, including event type and last trigger time
func GetLastTriggerJobsGroupByEventType(policyID int64) ([]*models.NotificationJob, error) {
o := dao.GetOrmer()
// get jobs last triggered(created) group by event_type. postgres group by usage reference:
// https://stackoverflow.com/questions/13325583/postgresql-max-and-group-by
sql := `select distinct on (event_type) event_type, id, creation_time, status, notify_type, job_uuid, update_time,
creation_time, job_detail from notification_job where policy_id = ?
order by event_type, id desc, creation_time, status, notify_type, job_uuid, update_time, creation_time, job_detail`
jobs := []*models.NotificationJob{}
_, err := o.Raw(sql, policyID).QueryRows(&jobs)
if err != nil {
log.Errorf("query last trigger info group by event type failed: %v", err)
return nil, err
}
return jobs, nil
}
// DeleteNotificationJob ...
func DeleteNotificationJob(id int64) error {
o := dao.GetOrmer()
_, err := o.Delete(&models.NotificationJob{ID: id})
return err
}
// DeleteAllNotificationJobsByPolicyID ...
func DeleteAllNotificationJobsByPolicyID(policyID int64) (int64, error) {
o := dao.GetOrmer()
return o.Delete(&models.NotificationJob{PolicyID: policyID}, "policy_id")
}
func notificationJobQueryConditions(query ...*models.NotificationJobQuery) orm.QuerySeter {
qs := dao.GetOrmer().QueryTable(&models.NotificationJob{})
if len(query) == 0 || query[0] == nil {
return qs
}
q := query[0]
if q.PolicyID != 0 {
qs = qs.Filter("PolicyID", q.PolicyID)
}
if len(q.Statuses) > 0 {
qs = qs.Filter("Status__in", q.Statuses)
}
if len(q.EventTypes) > 0 {
qs = qs.Filter("EventType__in", q.EventTypes)
}
return qs
}

View File

@ -0,0 +1,263 @@
package notification
import (
"testing"
"github.com/goharbor/harbor/src/common/models"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
testJob1 = &models.NotificationJob{
PolicyID: 1111,
EventType: "pushImage",
NotifyType: "http",
Status: "pending",
JobDetail: "{\"type\":\"pushImage\",\"occur_at\":1563536782,\"event_data\":{\"resources\":[{\"digest\":\"sha256:bf1684a6e3676389ec861c602e97f27b03f14178e5bc3f70dce198f9f160cce9\",\"tag\":\"v1.0\",\"resource_url\":\"10.194.32.23/myproj/alpine:v1.0\"}],\"repository\":{\"date_created\":1563505587,\"name\":\"alpine\",\"namespace\":\"myproj\",\"repo_full_name\":\"myproj/alpine\",\"repo_type\":\"private\"}},\"operator\":\"admin\"}",
UUID: "00000000",
}
testJob2 = &models.NotificationJob{
PolicyID: 111,
EventType: "pullImage",
NotifyType: "http",
Status: "",
JobDetail: "{\"type\":\"pushImage\",\"occur_at\":1563537782,\"event_data\":{\"resources\":[{\"digest\":\"sha256:bf1684a6e3676389ec861c602e97f27b03f14178e5bc3f70dce198f9f160cce9\",\"tag\":\"v1.0\",\"resource_url\":\"10.194.32.23/myproj/alpine:v1.0\"}],\"repository\":{\"date_created\":1563505587,\"name\":\"alpine\",\"namespace\":\"myproj\",\"repo_full_name\":\"myproj/alpine\",\"repo_type\":\"private\"}},\"operator\":\"admin\"}",
UUID: "00000000",
}
testJob3 = &models.NotificationJob{
PolicyID: 111,
EventType: "deleteImage",
NotifyType: "http",
Status: "pending",
JobDetail: "{\"type\":\"pushImage\",\"occur_at\":1563538782,\"event_data\":{\"resources\":[{\"digest\":\"sha256:bf1684a6e3676389ec861c602e97f27b03f14178e5bc3f70dce198f9f160cce9\",\"tag\":\"v1.0\",\"resource_url\":\"10.194.32.23/myproj/alpine:v1.0\"}],\"repository\":{\"date_created\":1563505587,\"name\":\"alpine\",\"namespace\":\"myproj\",\"repo_full_name\":\"myproj/alpine\",\"repo_type\":\"private\"}},\"operator\":\"admin\"}",
UUID: "00000000",
}
)
func TestAddNotificationJob(t *testing.T) {
tests := []struct {
name string
job *models.NotificationJob
want int64
wantErr bool
}{
{name: "AddNotificationJob nil", job: nil, wantErr: true},
{name: "AddNotificationJob 1", job: testJob1, want: 1},
{name: "AddNotificationJob 2", job: testJob2, want: 2},
{name: "AddNotificationJob 3", job: testJob3, want: 3},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := AddNotificationJob(tt.job)
if tt.wantErr {
require.NotNil(t, err, "wantErr: %s", err)
return
}
require.Nil(t, err)
assert.Equal(t, tt.want, got)
})
}
}
func TestGetTotalCountOfNotificationJobs(t *testing.T) {
type args struct {
query *models.NotificationJobQuery
}
tests := []struct {
name string
args args
want int64
wantErr bool
}{
{
name: "GetTotalCountOfNotificationJobs 1",
args: args{
query: &models.NotificationJobQuery{
PolicyID: 111,
},
},
want: 2,
},
{
name: "GetTotalCountOfNotificationJobs 2",
args: args{},
want: 3,
},
{
name: "GetTotalCountOfNotificationJobs 3",
args: args{
query: &models.NotificationJobQuery{
Statuses: []string{"pending"},
},
},
want: 3,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := GetTotalCountOfNotificationJobs(tt.args.query)
if tt.wantErr {
require.NotNil(t, err, "wantErr: %s", err)
return
}
require.Nil(t, err)
assert.Equal(t, tt.want, got)
})
}
}
func TestGetLastTriggerJobsGroupByEventType(t *testing.T) {
type args struct {
policyID int64
}
tests := []struct {
name string
args args
want []*models.NotificationJob
wantErr bool
}{
{
name: "GetLastTriggerJobsGroupByEventType",
args: args{
policyID: 111,
},
want: []*models.NotificationJob{
testJob2,
testJob3,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := GetLastTriggerJobsGroupByEventType(tt.args.policyID)
if tt.wantErr {
require.NotNil(t, err, "wantErr: %s", err)
return
}
require.Nil(t, err)
assert.Equal(t, len(tt.want), len(got))
})
}
}
func TestUpdateNotificationJob(t *testing.T) {
type args struct {
job *models.NotificationJob
props []string
}
tests := []struct {
name string
args args
want int64
wantErr bool
}{
{name: "UpdateNotificationJob Want Error 1", args: args{job: nil}, wantErr: true},
{name: "UpdateNotificationJob Want Error 2", args: args{job: &models.NotificationJob{ID: 0}}, wantErr: true},
{
name: "UpdateNotificationJob 1",
args: args{
job: &models.NotificationJob{ID: 1, UUID: "111111111111111"},
props: []string{"UUID"},
},
},
{
name: "UpdateNotificationJob 2",
args: args{
job: &models.NotificationJob{ID: 2, UUID: "222222222222222"},
props: []string{"UUID"},
},
},
{
name: "UpdateNotificationJob 3",
args: args{
job: &models.NotificationJob{ID: 3, UUID: "333333333333333"},
props: []string{"UUID"},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, err := UpdateNotificationJob(tt.args.job, tt.args.props...)
if tt.wantErr {
require.NotNil(t, err, "Error: %s", err)
return
}
require.Nil(t, err)
gotJob, err := GetNotificationJob(tt.args.job.ID)
require.Nil(t, err)
assert.Equal(t, tt.args.job.UUID, gotJob.UUID)
})
}
}
func TestDeleteNotificationJob(t *testing.T) {
type args struct {
id int64
}
tests := []struct {
name string
args args
wantErr bool
}{
{name: "DeleteNotificationJob 1", args: args{id: 1}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := DeleteNotificationJob(tt.args.id)
if tt.wantErr {
require.NotNil(t, err, "Error: %s", err)
return
}
require.Nil(t, err)
job, err := GetNotificationJob(tt.args.id)
require.Nil(t, err)
assert.Nil(t, job)
})
}
}
func TestDeleteAllNotificationJobs(t *testing.T) {
type args struct {
policyID int64
query []*models.NotificationJobQuery
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "DeleteAllNotificationJobs 1",
args: args{
policyID: 111,
query: []*models.NotificationJobQuery{
{PolicyID: 111},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, err := DeleteAllNotificationJobsByPolicyID(tt.args.policyID)
if tt.wantErr {
require.NotNil(t, err, "Error: %s", err)
return
}
require.Nil(t, err)
jobs, err := GetNotificationJobs(tt.args.query...)
require.Nil(t, err)
assert.Equal(t, 0, len(jobs))
})
}
}

View File

@ -0,0 +1,69 @@
package notification
import (
"github.com/astaxie/beego/orm"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/pkg/errors"
)
// GetNotificationPolicy return notification policy by id
func GetNotificationPolicy(id int64) (*models.NotificationPolicy, error) {
policy := new(models.NotificationPolicy)
o := dao.GetOrmer()
err := o.QueryTable(policy).Filter("id", id).One(policy)
if err == orm.ErrNoRows {
return nil, nil
}
return policy, err
}
// GetNotificationPolicyByName return notification policy by name
func GetNotificationPolicyByName(name string, projectID int64) (*models.NotificationPolicy, error) {
policy := new(models.NotificationPolicy)
o := dao.GetOrmer()
err := o.QueryTable(policy).Filter("name", name).Filter("projectID", projectID).One(policy)
if err == orm.ErrNoRows {
return nil, nil
}
return policy, err
}
// GetNotificationPolicies returns all notification policy in project
func GetNotificationPolicies(projectID int64) ([]*models.NotificationPolicy, error) {
var policies []*models.NotificationPolicy
qs := dao.GetOrmer().QueryTable(new(models.NotificationPolicy)).Filter("ProjectID", projectID)
_, err := qs.All(&policies)
if err != nil {
return nil, err
}
return policies, nil
}
// AddNotificationPolicy insert new notification policy to DB
func AddNotificationPolicy(policy *models.NotificationPolicy) (int64, error) {
if policy == nil {
return 0, errors.New("nil policy")
}
o := dao.GetOrmer()
return o.Insert(policy)
}
// UpdateNotificationPolicy update t specified notification policy
func UpdateNotificationPolicy(policy *models.NotificationPolicy) error {
if policy == nil {
return errors.New("nil policy")
}
o := dao.GetOrmer()
_, err := o.Update(policy)
return err
}
// DeleteNotificationPolicy delete notification policy by id
func DeleteNotificationPolicy(id int64) error {
o := dao.GetOrmer()
_, err := o.Delete(&models.NotificationPolicy{ID: id})
return err
}

View File

@ -0,0 +1,291 @@
package notification
import (
"testing"
"time"
"github.com/goharbor/harbor/src/common/models"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
testPly1 = &models.NotificationPolicy{
Name: "webhook test policy1",
Description: "webhook test policy1 description",
ProjectID: 111,
TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\",\"token\":\"xxxxxxxxx\",\"skip_cert_verify\":true}]",
EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\",\"uploadChart\",\"deleteChart\",\"downloadChart\",\"scanningFailed\",\"scanningCompleted\"]",
Creator: "no one",
CreationTime: time.Now(),
UpdateTime: time.Now(),
Enabled: true,
}
)
var (
testPly2 = &models.NotificationPolicy{
Name: "webhook test policy2",
Description: "webhook test policy2 description",
ProjectID: 222,
TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\",\"token\":\"xxxxxxxxx\",\"skip_cert_verify\":true}]",
EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\",\"uploadChart\",\"deleteChart\",\"downloadChart\",\"scanningFailed\",\"scanningCompleted\"]",
Creator: "no one",
CreationTime: time.Now(),
UpdateTime: time.Now(),
Enabled: true,
}
)
var (
testPly3 = &models.NotificationPolicy{
Name: "webhook test policy3",
Description: "webhook test policy3 description",
ProjectID: 333,
TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\",\"token\":\"xxxxxxxxx\",\"skip_cert_verify\":true}]",
EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\",\"uploadChart\",\"deleteChart\",\"downloadChart\",\"scanningFailed\",\"scanningCompleted\"]",
Creator: "no one",
CreationTime: time.Now(),
UpdateTime: time.Now(),
Enabled: true,
}
)
func TestAddNotificationPolicy(t *testing.T) {
tests := []struct {
name string
policy *models.NotificationPolicy
want int64
wantErr bool
}{
{name: "AddNotificationPolicy nil", policy: nil, wantErr: true},
{name: "AddNotificationPolicy 1", policy: testPly1, want: 1},
{name: "AddNotificationPolicy 2", policy: testPly2, want: 2},
{name: "AddNotificationPolicy 3", policy: testPly3, want: 3},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := AddNotificationPolicy(tt.policy)
if tt.wantErr {
require.NotNil(t, err, "wantErr: %s", err)
return
}
require.Nil(t, err)
assert.Equal(t, tt.want, got)
})
}
}
func TestGetNotificationPolicies(t *testing.T) {
tests := []struct {
name string
projectID int64
wantPolicies []*models.NotificationPolicy
wantErr bool
}{
{name: "GetNotificationPolicies nil", projectID: 0, wantPolicies: []*models.NotificationPolicy{}},
{name: "GetNotificationPolicies 1", projectID: 111, wantPolicies: []*models.NotificationPolicy{testPly1}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotPolicies, err := GetNotificationPolicies(tt.projectID)
if tt.wantErr {
require.NotNil(t, err, "wantErr: %s", err)
return
}
require.Nil(t, err)
for i, gotPolicy := range gotPolicies {
assert.Equal(t, tt.wantPolicies[i].Name, gotPolicy.Name)
assert.Equal(t, tt.wantPolicies[i].ID, gotPolicy.ID)
assert.Equal(t, tt.wantPolicies[i].EventTypesDB, gotPolicy.EventTypesDB)
assert.Equal(t, tt.wantPolicies[i].TargetsDB, gotPolicy.TargetsDB)
assert.Equal(t, tt.wantPolicies[i].Creator, gotPolicy.Creator)
assert.Equal(t, tt.wantPolicies[i].Enabled, gotPolicy.Enabled)
assert.Equal(t, tt.wantPolicies[i].Description, gotPolicy.Description)
}
})
}
}
func TestGetNotificationPolicy(t *testing.T) {
tests := []struct {
name string
id int64
wantPolicy *models.NotificationPolicy
wantErr bool
}{
{name: "GetRepPolicy 1", id: 1, wantPolicy: testPly1},
{name: "GetRepPolicy 2", id: 2, wantPolicy: testPly2},
{name: "GetRepPolicy 3", id: 3, wantPolicy: testPly3},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotPolicy, err := GetNotificationPolicy(tt.id)
if tt.wantErr {
require.NotNil(t, err, "wantErr: %s", err)
return
}
require.Nil(t, err)
assert.Equal(t, tt.wantPolicy.Name, gotPolicy.Name)
assert.Equal(t, tt.wantPolicy.ID, gotPolicy.ID)
assert.Equal(t, tt.wantPolicy.EventTypesDB, gotPolicy.EventTypesDB)
assert.Equal(t, tt.wantPolicy.TargetsDB, gotPolicy.TargetsDB)
assert.Equal(t, tt.wantPolicy.Creator, gotPolicy.Creator)
assert.Equal(t, tt.wantPolicy.Enabled, gotPolicy.Enabled)
assert.Equal(t, tt.wantPolicy.Description, gotPolicy.Description)
})
}
}
func TestGetNotificationPolicyByName(t *testing.T) {
type args struct {
name string
projectID int64
}
tests := []struct {
name string
args args
wantPolicy *models.NotificationPolicy
wantErr bool
}{
{name: "GetNotificationPolicyByName 1", args: args{name: testPly1.Name, projectID: testPly1.ProjectID}, wantPolicy: testPly1},
{name: "GetNotificationPolicyByName 2", args: args{name: testPly2.Name, projectID: testPly2.ProjectID}, wantPolicy: testPly2},
{name: "GetNotificationPolicyByName 3", args: args{name: testPly3.Name, projectID: testPly3.ProjectID}, wantPolicy: testPly3},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotPolicy, err := GetNotificationPolicyByName(tt.args.name, tt.args.projectID)
if tt.wantErr {
require.NotNil(t, err, "wantErr: %s", err)
return
}
require.Nil(t, err)
assert.Equal(t, tt.wantPolicy.Name, gotPolicy.Name)
assert.Equal(t, tt.wantPolicy.ID, gotPolicy.ID)
assert.Equal(t, tt.wantPolicy.EventTypesDB, gotPolicy.EventTypesDB)
assert.Equal(t, tt.wantPolicy.TargetsDB, gotPolicy.TargetsDB)
assert.Equal(t, tt.wantPolicy.Creator, gotPolicy.Creator)
assert.Equal(t, tt.wantPolicy.Enabled, gotPolicy.Enabled)
assert.Equal(t, tt.wantPolicy.Description, gotPolicy.Description)
})
}
}
func TestUpdateNotificationPolicy(t *testing.T) {
type args struct {
policy *models.NotificationPolicy
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "UpdateNotificationPolicy nil",
args: args{
policy: nil,
},
wantErr: true,
},
{
name: "UpdateNotificationPolicy 1",
args: args{
policy: &models.NotificationPolicy{
ID: 1,
Name: "webhook test policy1 new",
Description: "webhook test policy1 description new",
ProjectID: 111,
TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\",\"token\":\"xxxxxxxxx\",\"skip_cert_verify\":true}]",
EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\",\"uploadChart\",\"deleteChart\",\"downloadChart\",\"scanningFailed\",\"scanningCompleted\"]",
Creator: "no one",
CreationTime: time.Now(),
UpdateTime: time.Now(),
Enabled: true,
},
},
},
{
name: "UpdateNotificationPolicy 2",
args: args{
policy: &models.NotificationPolicy{
ID: 2,
Name: "webhook test policy2 new",
Description: "webhook test policy2 description new",
ProjectID: 222,
TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\",\"token\":\"xxxxxxxxx\",\"skip_cert_verify\":true}]",
EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\",\"uploadChart\",\"deleteChart\",\"downloadChart\",\"scanningFailed\",\"scanningCompleted\"]",
Creator: "no one",
CreationTime: time.Now(),
UpdateTime: time.Now(),
Enabled: true,
},
},
},
{
name: "UpdateNotificationPolicy 3",
args: args{
policy: &models.NotificationPolicy{
ID: 3,
Name: "webhook test policy3 new",
Description: "webhook test policy3 description new",
ProjectID: 333,
TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\",\"token\":\"xxxxxxxxx\",\"skip_cert_verify\":true}]",
EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\",\"uploadChart\",\"deleteChart\",\"downloadChart\",\"scanningFailed\",\"scanningCompleted\"]",
Creator: "no one",
CreationTime: time.Now(),
UpdateTime: time.Now(),
Enabled: true,
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := UpdateNotificationPolicy(tt.args.policy)
if tt.wantErr {
require.NotNil(t, err, "Error: %s", err)
return
}
require.Nil(t, err)
gotPolicy, err := GetNotificationPolicy(tt.args.policy.ID)
require.Nil(t, err)
assert.Equal(t, tt.args.policy.Description, gotPolicy.Description)
assert.Equal(t, tt.args.policy.Name, gotPolicy.Name)
})
}
}
func TestDeleteNotificationPolicy(t *testing.T) {
tests := []struct {
name string
id int64
wantErr bool
}{
{name: "DeleteNotificationPolicy 1", id: 1, wantErr: false},
{name: "DeleteNotificationPolicy 2", id: 2, wantErr: false},
{name: "DeleteNotificationPolicy 3", id: 3, wantErr: false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := DeleteNotificationPolicy(tt.id)
if tt.wantErr {
require.NotNil(t, err, "wantErr: %s", err)
return
}
require.Nil(t, err)
policy, err := GetNotificationPolicy(tt.id)
require.Nil(t, err)
assert.Nil(t, policy)
})
}
}

View File

@ -0,0 +1,13 @@
package notification
import (
"os"
"testing"
"github.com/goharbor/harbor/src/common/dao"
)
func TestMain(m *testing.M) {
dao.PrepareTestForPostgresSQL()
os.Exit(m.Run())
}

View File

@ -16,6 +16,7 @@ package dao
import (
"fmt"
"net/url"
"os"
"github.com/astaxie/beego/orm"
@ -31,12 +32,14 @@ import (
const defaultMigrationPath = "migrations/postgresql/"
type pgsql struct {
host string
port string
usr string
pwd string
database string
sslmode string
host string
port string
usr string
pwd string
database string
sslmode string
maxIdleConns int
maxOpenConns int
}
// Name returns the name of PostgreSQL
@ -51,17 +54,19 @@ func (p *pgsql) String() string {
}
// NewPGSQL returns an instance of postgres
func NewPGSQL(host string, port string, usr string, pwd string, database string, sslmode string) Database {
func NewPGSQL(host string, port string, usr string, pwd string, database string, sslmode string, maxIdleConns int, maxOpenConns int) Database {
if len(sslmode) == 0 {
sslmode = "disable"
}
return &pgsql{
host: host,
port: port,
usr: usr,
pwd: pwd,
database: database,
sslmode: sslmode,
host: host,
port: port,
usr: usr,
pwd: pwd,
database: database,
sslmode: sslmode,
maxIdleConns: maxIdleConns,
maxOpenConns: maxOpenConns,
}
}
@ -82,19 +87,26 @@ func (p *pgsql) Register(alias ...string) error {
info := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=%s",
p.host, p.port, p.usr, p.pwd, p.database, p.sslmode)
return orm.RegisterDataBase(an, "postgres", info)
return orm.RegisterDataBase(an, "postgres", info, p.maxIdleConns, p.maxOpenConns)
}
// UpgradeSchema calls migrate tool to upgrade schema to the latest based on the SQL scripts.
func (p *pgsql) UpgradeSchema() error {
dbURL := fmt.Sprintf("postgres://%s:%s@%s:%s/%s?sslmode=%s", p.usr, p.pwd, p.host, p.port, p.database, p.sslmode)
dbURL := url.URL{
Scheme: "postgres",
User: url.UserPassword(p.usr, p.pwd),
Host: fmt.Sprintf("%s:%s", p.host, p.port),
Path: p.database,
RawQuery: fmt.Sprintf("sslmode=%s", p.sslmode),
}
// For UT
path := os.Getenv("POSTGRES_MIGRATION_SCRIPTS_PATH")
if len(path) == 0 {
path = defaultMigrationPath
}
srcURL := fmt.Sprintf("file://%s", path)
m, err := migrate.New(srcURL, dbURL)
m, err := migrate.New(srcURL, dbURL.String())
if err != nil {
return err
}

View File

@ -26,8 +26,8 @@ import (
func AddProjectMetadata(meta *models.ProjectMetadata) error {
now := time.Now()
sql := `insert into project_metadata
(project_id, name, value, creation_time, update_time, deleted)
values (?, ?, ?, ?, ?, false)`
(project_id, name, value, creation_time, update_time)
values (?, ?, ?, ?, ?)`
_, err := GetOrmer().Raw(sql, meta.ProjectID, meta.Name, meta.Value,
now, now).Exec()
return err
@ -38,13 +38,12 @@ func AddProjectMetadata(meta *models.ProjectMetadata) error {
// by name will be deleted
func DeleteProjectMetadata(projectID int64, name ...string) error {
params := make([]interface{}, 1)
sql := `update project_metadata
set deleted = true
sql := `delete from project_metadata
where project_id = ?`
params = append(params, projectID)
if len(name) > 0 {
sql += fmt.Sprintf(` and name in ( %s )`, paramPlaceholder(len(name)))
sql += fmt.Sprintf(` and name in ( %s )`, ParamPlaceholderForIn(len(name)))
params = append(params, name)
}
@ -56,7 +55,7 @@ func DeleteProjectMetadata(projectID int64, name ...string) error {
func UpdateProjectMetadata(meta *models.ProjectMetadata) error {
sql := `update project_metadata
set value = ?, update_time = ?
where project_id = ? and name = ? and deleted = false`
where project_id = ? and name = ?`
_, err := GetOrmer().Raw(sql, meta.Value, time.Now(), meta.ProjectID,
meta.Name).Exec()
return err
@ -70,11 +69,11 @@ func GetProjectMetadata(projectID int64, name ...string) ([]*models.ProjectMetad
params := make([]interface{}, 1)
sql := `select * from project_metadata
where project_id = ? and deleted = false`
where project_id = ? `
params = append(params, projectID)
if len(name) > 0 {
sql += fmt.Sprintf(` and name in ( %s )`, paramPlaceholder(len(name)))
sql += fmt.Sprintf(` and name in ( %s )`, ParamPlaceholderForIn(len(name)))
params = append(params, name)
}
@ -82,7 +81,9 @@ func GetProjectMetadata(projectID int64, name ...string) ([]*models.ProjectMetad
return proMetas, err
}
func paramPlaceholder(n int) string {
// ParamPlaceholderForIn returns a string that contains placeholders for sql keyword "in"
// e.g. n=3, returns "?,?,?"
func ParamPlaceholderForIn(n int) string {
placeholders := []string{}
for i := 0; i < n; i++ {
placeholders = append(placeholders, "?")
@ -93,7 +94,7 @@ func paramPlaceholder(n int) string {
// ListProjectMetadata ...
func ListProjectMetadata(name, value string) ([]*models.ProjectMetadata, error) {
sql := `select * from project_metadata
where name = ? and value = ? and deleted = false`
where name = ? and value = ? `
metadatas := []*models.ProjectMetadata{}
_, err := GetOrmer().Raw(sql, name, value).QueryRows(&metadatas)
return metadatas, err

View File

@ -156,19 +156,21 @@ func GetProjects(query *models.ProjectQueryParam) ([]*models.Project, error) {
// GetGroupProjects - Get user's all projects, including user is the user member of this project
// and the user is in the group which is a group member of this project.
func GetGroupProjects(groupDNCondition string, query *models.ProjectQueryParam) ([]*models.Project, error) {
func GetGroupProjects(groupIDs []int, query *models.ProjectQueryParam) ([]*models.Project, error) {
sql, params := projectQueryConditions(query)
sql = `select distinct p.project_id, p.name, p.owner_id,
p.creation_time, p.update_time ` + sql
if len(groupDNCondition) > 0 {
groupIDCondition := JoinNumberConditions(groupIDs)
if len(groupIDs) > 0 {
sql = fmt.Sprintf(
`%s union select distinct p.project_id, p.name, p.owner_id, p.creation_time, p.update_time
from project p
left join project_member pm on p.project_id = pm.project_id
left join user_group ug on ug.id = pm.entity_id and pm.entity_type = 'g' and ug.group_type = 1
where ug.ldap_group_dn in ( %s ) order by name`,
sql, groupDNCondition)
left join user_group ug on ug.id = pm.entity_id and pm.entity_type = 'g'
where ug.id in ( %s )`,
sql, groupIDCondition)
}
sql = sql + ` order by name`
sqlStr, queryParams := CreatePagination(query, sql, params)
log.Debugf("query sql:%v", sql)
var projects []*models.Project
@ -178,10 +180,11 @@ func GetGroupProjects(groupDNCondition string, query *models.ProjectQueryParam)
// GetTotalGroupProjects - Get the total count of projects, including user is the member of this project and the
// user is in the group, which is the group member of this project.
func GetTotalGroupProjects(groupDNCondition string, query *models.ProjectQueryParam) (int, error) {
func GetTotalGroupProjects(groupIDs []int, query *models.ProjectQueryParam) (int, error) {
var sql string
sqlCondition, params := projectQueryConditions(query)
if len(groupDNCondition) == 0 {
groupIDCondition := JoinNumberConditions(groupIDs)
if len(groupIDs) == 0 {
sql = `select count(1) ` + sqlCondition
} else {
sql = fmt.Sprintf(
@ -189,9 +192,9 @@ func GetTotalGroupProjects(groupDNCondition string, query *models.ProjectQueryPa
from ( select p.project_id %s union select p.project_id
from project p
left join project_member pm on p.project_id = pm.project_id
left join user_group ug on ug.id = pm.entity_id and pm.entity_type = 'g' and ug.group_type = 1
where ug.ldap_group_dn in ( %s )) t`,
sqlCondition, groupDNCondition)
left join user_group ug on ug.id = pm.entity_id and pm.entity_type = 'g'
where ug.id in ( %s )) t`,
sqlCondition, groupIDCondition)
}
log.Debugf("query sql:%v", sql)
var count int
@ -257,7 +260,7 @@ func projectQueryConditions(query *models.ProjectQueryParam) (string, []interfac
}
if len(query.ProjectIDs) > 0 {
sql += fmt.Sprintf(` and p.project_id in ( %s )`,
paramPlaceholder(len(query.ProjectIDs)))
ParamPlaceholderForIn(len(query.ProjectIDs)))
params = append(params, query.ProjectIDs)
}
return sql, params
@ -291,29 +294,24 @@ func DeleteProject(id int64) error {
return err
}
// GetRolesByLDAPGroup - Get Project roles of the
// specified group DN is a member of current project
func GetRolesByLDAPGroup(projectID int64, groupDNCondition string) ([]int, error) {
// GetRolesByGroupID - Get Project roles of the
// specified group is a member of current project
func GetRolesByGroupID(projectID int64, groupIDs []int) ([]int, error) {
var roles []int
if len(groupDNCondition) == 0 {
if len(groupIDs) == 0 {
return roles, nil
}
groupIDCondition := JoinNumberConditions(groupIDs)
o := GetOrmer()
// Because an LDAP user can be memberof multiple groups,
// the role is in descent order (1-admin, 2-developer, 3-guest, 4-master), use min to select the max privilege role.
sql := fmt.Sprintf(
`select min(pm.role) from project_member pm
`select distinct pm.role from project_member pm
left join user_group ug on pm.entity_type = 'g' and pm.entity_id = ug.id
where ug.ldap_group_dn in ( %s ) and pm.project_id = ? `,
groupDNCondition)
log.Debugf("sql:%v", sql)
where ug.id in ( %s ) and pm.project_id = ?`,
groupIDCondition)
log.Debugf("sql for GetRolesByGroupID(project ID: %d, group ids: %v):%v", projectID, groupIDs, sql)
if _, err := o.Raw(sql, projectID).QueryRows(&roles); err != nil {
log.Warningf("Error in GetRolesByLDAPGroup, error: %v", err)
log.Warningf("Error in GetRolesByGroupID, error: %v", err)
return nil, err
}
// If there is no row selected, the min returns an empty row, to avoid return 0 as role
if len(roles) == 1 && roles[0] == 0 {
return []int{}, nil
}
return roles, nil
}

Some files were not shown because too many files have changed in this diff Show More