mirror of
https://github.com/goharbor/harbor.git
synced 2024-11-19 16:55:16 +01:00
Merge branch 'master' into master
This commit is contained in:
commit
7d63620d42
1
.dockerignore
Normal file
1
.dockerignore
Normal file
@ -0,0 +1 @@
|
|||||||
|
src/portal/node_modules/
|
10
.travis.yml
10
.travis.yml
@ -1,23 +1,23 @@
|
|||||||
sudo: true
|
sudo: true
|
||||||
language: go
|
language: go
|
||||||
go:
|
go:
|
||||||
- 1.11.2
|
- 1.12.5
|
||||||
go_import_path: github.com/goharbor/harbor
|
go_import_path: github.com/goharbor/harbor
|
||||||
services:
|
services:
|
||||||
- docker
|
- docker
|
||||||
dist: trusty
|
dist: trusty
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- go: 1.11.2
|
- go: 1.12.5
|
||||||
env:
|
env:
|
||||||
- UTTEST=true
|
- UTTEST=true
|
||||||
- go: 1.11.2
|
- go: 1.12.5
|
||||||
env:
|
env:
|
||||||
- APITEST_DB=true
|
- APITEST_DB=true
|
||||||
- go: 1.11.2
|
- go: 1.12.5
|
||||||
env:
|
env:
|
||||||
- APITEST_LDAP=true
|
- APITEST_LDAP=true
|
||||||
- go: 1.11.2
|
- go: 1.12.5
|
||||||
env:
|
env:
|
||||||
- OFFLINE=true
|
- OFFLINE=true
|
||||||
env:
|
env:
|
||||||
|
13
CHANGELOG.md
13
CHANGELOG.md
@ -1,5 +1,18 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
# v1.8.0 (2019-05-21)
|
||||||
|
[Full list of issues fixed in v1.8.0](https://github.com/goharbor/harbor/issues?q=is%3Aissue+is%3Aclosed+label%3Atarget%2F1.8.0)
|
||||||
|
* Support for OpenID Connect - OpenID Connect (OIDC) is an authentication layer on top of OAuth 2.0, allowing Harbor to verify the identity of users based on the authentication performed by an external authorization server or identity provider.
|
||||||
|
* Robot accounts - Robot accounts can be configured to provide administrators with a token that can be granted appropriate permissions for pulling or pushing images. Harbor users can continue operating Harbor using their enterprise SSO credentials, and use robot accounts for CI/CD systems that perform Docker client commands.
|
||||||
|
* Replication advancements - Harbor new version replication allows you to replicate your Harbor repository to and from non-Harbor registries. Harbor 1.8 expands on the Harbor-to-Harbor replication feature, adding the ability to replicate resources between Harbor and Docker Hub, Docker Registry, and Huawei Registry. This is enabled through both push and pull mode replication.
|
||||||
|
* Health check API, showing detailed status and health of all Harbor components.
|
||||||
|
* Support for defining cron-based scheduled tasks in the Harbor UI. Administrators can now use cron strings to define the schedule of a job. Scan, garbage collection and replication jobs are all supported.
|
||||||
|
API explorer integration. End users can now explore and trigger Harbor’s API via the swagger UI nested inside Harbor’s UI.
|
||||||
|
* Introduce a new master role to project, the role's permissions are more than developer and less than project admin.
|
||||||
|
* Introduce harbor.yml as the replacement of harbor.cfg and refactor the prepare script to provide more flexibility to the installation process based on docker-compose
|
||||||
|
* Enhancement of the Job Service engine to include webhook events, additional APIs for automation, and numerous bug fixes to improve the stability of the service.
|
||||||
|
* Docker Registry upgraded to v2.7.1.
|
||||||
|
|
||||||
## v1.7.5 (2019-04-02)
|
## v1.7.5 (2019-04-02)
|
||||||
* Bumped up Clair to v2.0.8
|
* Bumped up Clair to v2.0.8
|
||||||
* Fixed issues in supporting windows images. #6992 #6369
|
* Fixed issues in supporting windows images. #6992 #6369
|
||||||
|
21
Makefile
21
Makefile
@ -70,7 +70,6 @@ SRCPATH=./src
|
|||||||
TOOLSPATH=$(BUILDPATH)/tools
|
TOOLSPATH=$(BUILDPATH)/tools
|
||||||
CORE_PATH=$(BUILDPATH)/src/core
|
CORE_PATH=$(BUILDPATH)/src/core
|
||||||
PORTAL_PATH=$(BUILDPATH)/src/portal
|
PORTAL_PATH=$(BUILDPATH)/src/portal
|
||||||
GOBASEPATH=/go/src/github.com/goharbor
|
|
||||||
CHECKENVCMD=checkenv.sh
|
CHECKENVCMD=checkenv.sh
|
||||||
|
|
||||||
# parameters
|
# parameters
|
||||||
@ -101,14 +100,14 @@ PREPARE_VERSION_NAME=versions
|
|||||||
REGISTRYVERSION=v2.7.1-patch-2819
|
REGISTRYVERSION=v2.7.1-patch-2819
|
||||||
NGINXVERSION=$(VERSIONTAG)
|
NGINXVERSION=$(VERSIONTAG)
|
||||||
NOTARYVERSION=v0.6.1
|
NOTARYVERSION=v0.6.1
|
||||||
CLAIRVERSION=v2.0.8
|
CLAIRVERSION=v2.0.9
|
||||||
CLAIRDBVERSION=$(VERSIONTAG)
|
CLAIRDBVERSION=$(VERSIONTAG)
|
||||||
MIGRATORVERSION=$(VERSIONTAG)
|
MIGRATORVERSION=$(VERSIONTAG)
|
||||||
REDISVERSION=$(VERSIONTAG)
|
REDISVERSION=$(VERSIONTAG)
|
||||||
NOTARYMIGRATEVERSION=v3.5.4
|
NOTARYMIGRATEVERSION=v3.5.4
|
||||||
|
|
||||||
# version of chartmuseum
|
# version of chartmuseum
|
||||||
CHARTMUSEUMVERSION=v0.8.1
|
CHARTMUSEUMVERSION=v0.9.0
|
||||||
|
|
||||||
define VERSIONS_FOR_PREPARE
|
define VERSIONS_FOR_PREPARE
|
||||||
VERSION_TAG: $(VERSIONTAG)
|
VERSION_TAG: $(VERSIONTAG)
|
||||||
@ -136,10 +135,10 @@ GOINSTALL=$(GOCMD) install
|
|||||||
GOTEST=$(GOCMD) test
|
GOTEST=$(GOCMD) test
|
||||||
GODEP=$(GOTEST) -i
|
GODEP=$(GOTEST) -i
|
||||||
GOFMT=gofmt -w
|
GOFMT=gofmt -w
|
||||||
GOBUILDIMAGE=golang:1.11.2
|
GOBUILDIMAGE=golang:1.12.5
|
||||||
GOBUILDPATH=$(GOBASEPATH)/harbor
|
GOBUILDPATH=/harbor
|
||||||
GOIMAGEBUILDCMD=/usr/local/go/bin/go
|
GOIMAGEBUILDCMD=/usr/local/go/bin/go
|
||||||
GOIMAGEBUILD=$(GOIMAGEBUILDCMD) build
|
GOIMAGEBUILD=$(GOIMAGEBUILDCMD) build -mod vendor
|
||||||
GOBUILDPATH_CORE=$(GOBUILDPATH)/src/core
|
GOBUILDPATH_CORE=$(GOBUILDPATH)/src/core
|
||||||
GOBUILDPATH_JOBSERVICE=$(GOBUILDPATH)/src/jobservice
|
GOBUILDPATH_JOBSERVICE=$(GOBUILDPATH)/src/jobservice
|
||||||
GOBUILDPATH_REGISTRYCTL=$(GOBUILDPATH)/src/registryctl
|
GOBUILDPATH_REGISTRYCTL=$(GOBUILDPATH)/src/registryctl
|
||||||
@ -243,7 +242,7 @@ PACKAGE_ONLINE_PARA=-zcvf harbor-online-installer-$(PKGVERSIONTAG).tgz \
|
|||||||
$(HARBORPKG)/install.sh \
|
$(HARBORPKG)/install.sh \
|
||||||
$(HARBORPKG)/harbor.yml
|
$(HARBORPKG)/harbor.yml
|
||||||
|
|
||||||
DOCKERCOMPOSE_LIST=-f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
DOCKERCOMPOSE_FILE_OPT=-f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||||
|
|
||||||
ifeq ($(NOTARYFLAG), true)
|
ifeq ($(NOTARYFLAG), true)
|
||||||
DOCKERSAVE_PARA+= goharbor/notary-server-photon:$(NOTARYVERSION)-$(VERSIONTAG) goharbor/notary-signer-photon:$(NOTARYVERSION)-$(VERSIONTAG)
|
DOCKERSAVE_PARA+= goharbor/notary-server-photon:$(NOTARYVERSION)-$(VERSIONTAG) goharbor/notary-signer-photon:$(NOTARYVERSION)-$(VERSIONTAG)
|
||||||
@ -271,7 +270,6 @@ check_environment:
|
|||||||
|
|
||||||
compile_core:
|
compile_core:
|
||||||
@echo "compiling binary for core (golang image)..."
|
@echo "compiling binary for core (golang image)..."
|
||||||
@echo $(GOBASEPATH)
|
|
||||||
@echo $(GOBUILDPATH)
|
@echo $(GOBUILDPATH)
|
||||||
@$(DOCKERCMD) run --rm -v $(BUILDPATH):$(GOBUILDPATH) -w $(GOBUILDPATH_CORE) $(GOBUILDIMAGE) $(GOIMAGEBUILD) -o $(GOBUILDMAKEPATH_CORE)/$(CORE_BINARYNAME)
|
@$(DOCKERCMD) run --rm -v $(BUILDPATH):$(GOBUILDPATH) -w $(GOBUILDPATH_CORE) $(GOBUILDIMAGE) $(GOIMAGEBUILD) -o $(GOBUILDMAKEPATH_CORE)/$(CORE_BINARYNAME)
|
||||||
@echo "Done."
|
@echo "Done."
|
||||||
@ -294,7 +292,7 @@ compile_notary_migrate_patch:
|
|||||||
compile: check_environment versions_prepare compile_core compile_jobservice compile_registryctl compile_notary_migrate_patch
|
compile: check_environment versions_prepare compile_core compile_jobservice compile_registryctl compile_notary_migrate_patch
|
||||||
|
|
||||||
update_prepare_version:
|
update_prepare_version:
|
||||||
@echo "substitude the prepare version tag in prepare file..."
|
@echo "substitute the prepare version tag in prepare file..."
|
||||||
@$(SEDCMD) -i -e 's/goharbor\/prepare:.*[[:space:]]\+/goharbor\/prepare:$(VERSIONTAG) /' $(MAKEPATH)/prepare ;
|
@$(SEDCMD) -i -e 's/goharbor\/prepare:.*[[:space:]]\+/goharbor\/prepare:$(VERSIONTAG) /' $(MAKEPATH)/prepare ;
|
||||||
|
|
||||||
prepare: update_prepare_version
|
prepare: update_prepare_version
|
||||||
@ -414,17 +412,16 @@ pushimage:
|
|||||||
|
|
||||||
start:
|
start:
|
||||||
@echo "loading harbor images..."
|
@echo "loading harbor images..."
|
||||||
@$(DOCKERCOMPOSECMD) $(DOCKERCOMPOSE_LIST) up -d
|
@$(DOCKERCOMPOSECMD) $(DOCKERCOMPOSE_FILE_OPT) up -d
|
||||||
@echo "Start complete. You can visit harbor now."
|
@echo "Start complete. You can visit harbor now."
|
||||||
|
|
||||||
down:
|
down:
|
||||||
@echo "Please make sure to set -e NOTARYFLAG=true/CLAIRFLAG=true/CHARTFLAG=true if you are using Notary/CLAIR/Chartmuseum in Harbor, otherwise the Notary/CLAIR/Chartmuseum containers cannot be stop automaticlly."
|
|
||||||
@while [ -z "$$CONTINUE" ]; do \
|
@while [ -z "$$CONTINUE" ]; do \
|
||||||
read -r -p "Type anything but Y or y to exit. [Y/N]: " CONTINUE; \
|
read -r -p "Type anything but Y or y to exit. [Y/N]: " CONTINUE; \
|
||||||
done ; \
|
done ; \
|
||||||
[ $$CONTINUE = "y" ] || [ $$CONTINUE = "Y" ] || (echo "Exiting."; exit 1;)
|
[ $$CONTINUE = "y" ] || [ $$CONTINUE = "Y" ] || (echo "Exiting."; exit 1;)
|
||||||
@echo "stoping harbor instance..."
|
@echo "stoping harbor instance..."
|
||||||
@$(DOCKERCOMPOSECMD) $(DOCKERCOMPOSE_LIST) down -v
|
@$(DOCKERCOMPOSECMD) $(DOCKERCOMPOSE_FILE_OPT) down -v
|
||||||
@echo "Done."
|
@echo "Done."
|
||||||
|
|
||||||
swagger_client:
|
swagger_client:
|
||||||
|
13
README.md
13
README.md
@ -25,7 +25,7 @@ Please use [releases](https://github.com/vmware/harbor/releases) instead of the
|
|||||||
|
|
||||||
<img alt="Harbor" src="docs/img/harbor_logo.png">
|
<img alt="Harbor" src="docs/img/harbor_logo.png">
|
||||||
|
|
||||||
Harbor is an an open source trusted cloud native registry project that stores, signs, and scans content. Harbor extends the open source Docker Distribution by adding the functionalities usually required by users such as security, identity and management. Having a registry closer to the build and run environment can improve the image transfer efficiency. Harbor supports replication of images between registries, and also offers advanced security features such as user management, access control and activity auditing.
|
Harbor is an open source trusted cloud native registry project that stores, signs, and scans content. Harbor extends the open source Docker Distribution by adding the functionalities usually required by users such as security, identity and management. Having a registry closer to the build and run environment can improve the image transfer efficiency. Harbor supports replication of images between registries, and also offers advanced security features such as user management, access control and activity auditing.
|
||||||
|
|
||||||
Harbor is hosted by the [Cloud Native Computing Foundation](https://cncf.io) (CNCF). If you are an organization that wants to help shape the evolution of cloud native technologies, consider joining the CNCF. For details about who's involved and how Harbor plays a role, read the CNCF
|
Harbor is hosted by the [Cloud Native Computing Foundation](https://cncf.io) (CNCF). If you are an organization that wants to help shape the evolution of cloud native technologies, consider joining the CNCF. For details about who's involved and how Harbor plays a role, read the CNCF
|
||||||
[announcement](https://www.cncf.io/blog/2018/07/31/cncf-to-host-harbor-in-the-sandbox/).
|
[announcement](https://www.cncf.io/blog/2018/07/31/cncf-to-host-harbor-in-the-sandbox/).
|
||||||
@ -33,22 +33,23 @@ Harbor is hosted by the [Cloud Native Computing Foundation](https://cncf.io) (CN
|
|||||||
## Features
|
## Features
|
||||||
|
|
||||||
* **Cloud native registry**: With support for both container images and [Helm](https://helm.sh) charts, Harbor serves as registry for cloud native environments like container runtimes and orchestration platforms.
|
* **Cloud native registry**: With support for both container images and [Helm](https://helm.sh) charts, Harbor serves as registry for cloud native environments like container runtimes and orchestration platforms.
|
||||||
* **Role based access control**: Users and repositories are organized via 'projects' and a user can have different permission for images under a project.
|
* **Role based access control**: Users and repositories are organized via 'projects' and a user can have different permission for images or Helm charts under a project.
|
||||||
* **Policy based image replication**: Images can be replicated (synchronized) between multiple registry instances based on policies with multiple filters (repository, tag and label). Harbor will auto-retry to replicate if it encounters any errors. Great for load balancing, high availability, multi-datacenter, hybrid and multi-cloud scenarios.
|
* **Policy based replication**: Images and charts can be replicated (synchronized) between multiple registry instances based on policies with multiple filters (repository, tag and label). Harbor automatically retries a replication if it encounters any errors. Great for load balancing, high availability, multi-datacenter, hybrid and multi-cloud scenarios.
|
||||||
* **Vulnerability Scanning**: Harbor scans images regularly and warns users of vulnerabilities.
|
* **Vulnerability Scanning**: Harbor scans images regularly and warns users of vulnerabilities.
|
||||||
* **LDAP/AD support**: Harbor integrates with existing enterprise LDAP/AD for user authentication and management, and supports importing LDAP groups into Harbor and assigning proper project roles to them.
|
* **LDAP/AD support**: Harbor integrates with existing enterprise LDAP/AD for user authentication and management, and supports importing LDAP groups into Harbor and assigning proper project roles to them.
|
||||||
|
* **OIDC support**: Harbor leverages OpenID Connect (OIDC) to verify the identity of users authenticated by an external authorization server or identity provider. Single sign-on can be enabled to log into the Harbor portal.
|
||||||
* **Image deletion & garbage collection**: Images can be deleted and their space can be recycled.
|
* **Image deletion & garbage collection**: Images can be deleted and their space can be recycled.
|
||||||
* **Notary**: Image authenticity can be ensured.
|
* **Notary**: Image authenticity can be ensured.
|
||||||
* **Graphical user portal**: User can easily browse, search repositories and manage projects.
|
* **Graphical user portal**: User can easily browse, search repositories and manage projects.
|
||||||
* **Auditing**: All the operations to the repositories are tracked.
|
* **Auditing**: All the operations to the repositories are tracked.
|
||||||
* **RESTful API**: RESTful APIs for most administrative operations, easy to integrate with external systems.
|
* **RESTful API**: RESTful APIs for most administrative operations, easy to integrate with external systems. An embedded Swagger UI is available for exploring and testing the API.
|
||||||
* **Easy deployment**: Provide both an online and offline installer.
|
* **Easy deployment**: Provide both an online and offline installer. In addition, a Helm Chart can be used to deploy Harbor on Kubernetes.
|
||||||
|
|
||||||
## Install & Run
|
## Install & Run
|
||||||
|
|
||||||
**System requirements:**
|
**System requirements:**
|
||||||
|
|
||||||
**On a Linux host:** docker 17.03.0-ce+ and docker-compose 1.18.0+ .
|
**On a Linux host:** docker 17.06.0-ce+ and docker-compose 1.18.0+ .
|
||||||
|
|
||||||
Download binaries of **[Harbor release ](https://github.com/vmware/harbor/releases)** and follow **[Installation & Configuration Guide](docs/installation_guide.md)** to install Harbor.
|
Download binaries of **[Harbor release ](https://github.com/vmware/harbor/releases)** and follow **[Installation & Configuration Guide](docs/installation_guide.md)** to install Harbor.
|
||||||
|
|
||||||
|
@ -44,25 +44,25 @@ You can compile the code by one of the three approaches:
|
|||||||
* Get official Golang image from docker hub:
|
* Get official Golang image from docker hub:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ docker pull golang:1.11.2
|
$ docker pull golang:1.12.5
|
||||||
```
|
```
|
||||||
|
|
||||||
* Build, install and bring up Harbor without Notary:
|
* Build, install and bring up Harbor without Notary:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ make install GOBUILDIMAGE=golang:1.11.2 COMPILETAG=compile_golangimage
|
$ make install GOBUILDIMAGE=golang:1.12.5 COMPILETAG=compile_golangimage
|
||||||
```
|
```
|
||||||
|
|
||||||
* Build, install and bring up Harbor with Notary:
|
* Build, install and bring up Harbor with Notary:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ make install GOBUILDIMAGE=golang:1.11.2 COMPILETAG=compile_golangimage NOTARYFLAG=true
|
$ make install GOBUILDIMAGE=golang:1.12.5 COMPILETAG=compile_golangimage NOTARYFLAG=true
|
||||||
```
|
```
|
||||||
|
|
||||||
* Build, install and bring up Harbor with Clair:
|
* Build, install and bring up Harbor with Clair:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ make install GOBUILDIMAGE=golang:1.11.2 COMPILETAG=compile_golangimage CLAIRFLAG=true
|
$ make install GOBUILDIMAGE=golang:1.12.5 COMPILETAG=compile_golangimage CLAIRFLAG=true
|
||||||
```
|
```
|
||||||
|
|
||||||
#### II. Compile code with your own Golang environment, then build Harbor
|
#### II. Compile code with your own Golang environment, then build Harbor
|
||||||
|
@ -113,17 +113,24 @@ Notice that you may need to trust the certificate at OS level. Please refer to t
|
|||||||
|
|
||||||
**3) Configure Harbor**
|
**3) Configure Harbor**
|
||||||
|
|
||||||
Edit the file ```harbor.cfg```, update the hostname and the protocol, and update the attributes ```ssl_cert``` and ```ssl_cert_key```:
|
Edit the file `harbor.yml`, update the hostname and uncomment the https block, and update the attributes `certificate` and `private_key`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
#set hostname
|
||||||
|
hostname: yourdomain.com
|
||||||
|
|
||||||
|
http:
|
||||||
|
port: 80
|
||||||
|
|
||||||
|
https:
|
||||||
|
# https port for harbor, default is 443
|
||||||
|
port: 443
|
||||||
|
# The path of cert and key files for nginx
|
||||||
|
certificate: /data/cert/yourdomain.com.crt
|
||||||
|
private_key: /data/cert/yourdomain.com.key
|
||||||
|
|
||||||
```
|
|
||||||
#set hostname
|
|
||||||
hostname = yourdomain.com:port
|
|
||||||
#set ui_url_protocol
|
|
||||||
ui_url_protocol = https
|
|
||||||
......
|
......
|
||||||
#The path of cert and key files for nginx, they are applied only the protocol is set to https
|
|
||||||
ssl_cert = /data/cert/yourdomain.com.crt
|
|
||||||
ssl_cert_key = /data/cert/yourdomain.com.key
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Generate configuration files for Harbor:
|
Generate configuration files for Harbor:
|
||||||
@ -163,7 +170,7 @@ If you've mapped nginx 443 port to another, you need to add the port to login, l
|
|||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
##Troubleshooting
|
## Troubleshooting
|
||||||
1. You may get an intermediate certificate from a certificate issuer. In this case, you should merge the intermediate certificate with your own certificate to create a certificate bundle. You can achieve this by the below command:
|
1. You may get an intermediate certificate from a certificate issuer. In this case, you should merge the intermediate certificate with your own certificate to create a certificate bundle. You can achieve this by the below command:
|
||||||
|
|
||||||
```
|
```
|
||||||
|
Binary file not shown.
Before Width: | Height: | Size: 4.5 KiB |
@ -30,7 +30,7 @@ Harbor is deployed as several Docker containers, and, therefore, can be deployed
|
|||||||
|
|
||||||
|Software|Version|Description|
|
|Software|Version|Description|
|
||||||
|---|---|---|
|
|---|---|---|
|
||||||
|Docker engine|version 17.03.0-ce+ or higher|For installation instructions, please refer to: [docker engine doc](https://docs.docker.com/engine/installation/)|
|
|Docker engine|version 17.06.0-ce+ or higher|For installation instructions, please refer to: [docker engine doc](https://docs.docker.com/engine/installation/)|
|
||||||
|Docker Compose|version 1.18.0 or higher|For installation instructions, please refer to: [docker compose doc](https://docs.docker.com/compose/install/)|
|
|Docker Compose|version 1.18.0 or higher|For installation instructions, please refer to: [docker compose doc](https://docs.docker.com/compose/install/)|
|
||||||
|Openssl|latest is preferred|Generate certificate and keys for Harbor|
|
|Openssl|latest is preferred|Generate certificate and keys for Harbor|
|
||||||
|
|
||||||
|
@ -17,18 +17,23 @@ This guide provides instructions to manage roles by LDAP/AD group. You can impor
|
|||||||
|
|
||||||
Besides **[basic LDAP configure parameters](https://github.com/vmware/harbor/blob/master/docs/installation_guide.md#optional-parameters)** , LDAP group related configure parameters should be configured, they can be configured before or after installation
|
Besides **[basic LDAP configure parameters](https://github.com/vmware/harbor/blob/master/docs/installation_guide.md#optional-parameters)** , LDAP group related configure parameters should be configured, they can be configured before or after installation
|
||||||
|
|
||||||
1. Configure parameters in harbor.cfg before installation
|
1. Configure LDAP parameters via API, refer to **[Config Harbor user settings by command line](configure_user_settings.md)**
|
||||||
|
|
||||||
|
For example:
|
||||||
|
```
|
||||||
|
curl -X PUT -u "<username>:<password>" -H "Content-Type: application/json" -ki https://harbor.sample.domain/api/configurations -d'{"ldap_group_basedn":"ou=groups,dc=example,dc=com"}'
|
||||||
|
```
|
||||||
|
The following parameters are related to LDAP group configuration.
|
||||||
* ldap_group_basedn -- The base DN from which to lookup a group in LDAP/AD, for example: ou=groups,dc=example,dc=com
|
* ldap_group_basedn -- The base DN from which to lookup a group in LDAP/AD, for example: ou=groups,dc=example,dc=com
|
||||||
* ldap_group_filter -- The filter to search LDAP/AD group, for example: objectclass=groupOfNames
|
* ldap_group_filter -- The filter to search LDAP/AD group, for example: objectclass=groupOfNames
|
||||||
* ldap_group_gid -- The attribute used to name an LDAP/AD group, for example: cn
|
* ldap_group_gid -- The attribute used to name an LDAP/AD group, for example: cn
|
||||||
* ldap_group_scope -- The scope to search for LDAP/AD groups. 0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE
|
* ldap_group_scope -- The scope to search for LDAP/AD groups. 0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE
|
||||||
|
|
||||||
2. Or Change configure parameter in web console after installation. Go to "Administration" -> "Configuration" -> "Authentication" and change following settings.
|
2. Or change configure parameter in web console after installation. Go to "Administration" -> "Configuration" -> "Authentication" and change following settings.
|
||||||
- LDAP Group Base DN -- ldap_group_basedn in harbor.cfg
|
- LDAP Group Base DN -- ldap_group_basedn in the Harbor user settings
|
||||||
- LDAP Group Filter -- ldap_group_filter in harbor.cfg
|
- LDAP Group Filter -- ldap_group_filter in the Harbor user settings
|
||||||
- LDAP Group GID -- ldap_group_gid in harbor.cfg
|
- LDAP Group GID -- ldap_group_gid in the Harbor user settings
|
||||||
- LDAP Group Scope -- ldap_group_scope in harbor.cfg
|
- LDAP Group Scope -- ldap_group_scope in the Harbor user settings
|
||||||
- LDAP Groups With Admin Privilege -- Specify an LDAP/AD group DN, all LDAPA/AD users in this group have harbor admin privileges.
|
- LDAP Groups With Admin Privilege -- Specify an LDAP/AD group DN, all LDAPA/AD users in this group have harbor admin privileges.
|
||||||
|
|
||||||
![Screenshot of LDAP group config](img/group/ldap_group_config.png)
|
![Screenshot of LDAP group config](img/group/ldap_group_config.png)
|
||||||
@ -49,4 +54,4 @@ If a user is in the LDAP groups with admin privilege (ldap_group_admin_dn), the
|
|||||||
|
|
||||||
## User privileges and group privileges
|
## User privileges and group privileges
|
||||||
|
|
||||||
If a user has both user-level role and group-level role, only the user level role privileges will be considered.
|
If a user has both user-level role and group-level role, these privileges are merged together.
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Harbor upgrade and migration guide
|
# Harbor upgrade and migration guide
|
||||||
|
|
||||||
This guide only covers upgrade and mgiration to version >= v1.8.0
|
This guide only covers upgrade and migration to version >= v1.8.0
|
||||||
|
|
||||||
When upgrading your existing Harbor instance to a newer version, you may need to migrate the data in your database and the settings in `harbor.cfg`.
|
When upgrading your existing Harbor instance to a newer version, you may need to migrate the data in your database and the settings in `harbor.cfg`.
|
||||||
Since the migration may alter the database schema and the settings of `harbor.cfg`, you should **always** back up your data before any migration.
|
Since the migration may alter the database schema and the settings of `harbor.cfg`, you should **always** back up your data before any migration.
|
||||||
@ -34,7 +34,7 @@ you follow the steps below.
|
|||||||
```
|
```
|
||||||
mv harbor /my_backup_dir/harbor
|
mv harbor /my_backup_dir/harbor
|
||||||
```
|
```
|
||||||
Back up database (by default in diretory `/data/database`)
|
Back up database (by default in directory `/data/database`)
|
||||||
```
|
```
|
||||||
cp -r /data/database /my_backup_dir/
|
cp -r /data/database /my_backup_dir/
|
||||||
```
|
```
|
||||||
|
@ -2,7 +2,7 @@ swagger: '2.0'
|
|||||||
info:
|
info:
|
||||||
title: Harbor API
|
title: Harbor API
|
||||||
description: These APIs provide services for manipulating Harbor project.
|
description: These APIs provide services for manipulating Harbor project.
|
||||||
version: 1.7.0
|
version: 1.9.0
|
||||||
host: localhost
|
host: localhost
|
||||||
schemes:
|
schemes:
|
||||||
- http
|
- http
|
||||||
@ -311,6 +311,34 @@ paths:
|
|||||||
description: User need to log in first.
|
description: User need to log in first.
|
||||||
'500':
|
'500':
|
||||||
description: Unexpected internal errors.
|
description: Unexpected internal errors.
|
||||||
|
'/projects/{project_id}/summary':
|
||||||
|
get:
|
||||||
|
summary: Get summary of the project.
|
||||||
|
description: Get summary of the project.
|
||||||
|
parameters:
|
||||||
|
- name: project_id
|
||||||
|
in: path
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
|
required: true
|
||||||
|
description: Relevant project ID
|
||||||
|
tags:
|
||||||
|
- Products
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Get summary of the project successfully.
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/ProjectSummary'
|
||||||
|
'400':
|
||||||
|
description: Illegal format of provided ID value.
|
||||||
|
'401':
|
||||||
|
description: User need to log in first.
|
||||||
|
'404':
|
||||||
|
description: Project ID does not exist.
|
||||||
|
'403':
|
||||||
|
description: User does not have permission to get summary of the project.
|
||||||
|
'500':
|
||||||
|
description: Unexpected internal errors.
|
||||||
'/projects/{project_id}/metadatas':
|
'/projects/{project_id}/metadatas':
|
||||||
get:
|
get:
|
||||||
summary: Get project metadata.
|
summary: Get project metadata.
|
||||||
@ -516,7 +544,7 @@ paths:
|
|||||||
'403':
|
'403':
|
||||||
description: User in session does not have permission to the project.
|
description: User in session does not have permission to the project.
|
||||||
'409':
|
'409':
|
||||||
description: An LDAP user group with same DN already exist.
|
description: A user group with same group name already exist or an LDAP user group with same DN already exist.
|
||||||
'500':
|
'500':
|
||||||
description: Unexpected internal errors.
|
description: Unexpected internal errors.
|
||||||
'/projects/{project_id}/members/{mid}':
|
'/projects/{project_id}/members/{mid}':
|
||||||
@ -1235,11 +1263,16 @@ paths:
|
|||||||
type: string
|
type: string
|
||||||
required: true
|
required: true
|
||||||
description: Relevant repository name.
|
description: Relevant repository name.
|
||||||
- name: label_ids
|
- name: label_id
|
||||||
in: query
|
in: query
|
||||||
type: string
|
type: string
|
||||||
required: false
|
required: false
|
||||||
description: A list of comma separated label IDs.
|
description: A label ID.
|
||||||
|
- name: detail
|
||||||
|
in: query
|
||||||
|
type: boolean
|
||||||
|
required: false
|
||||||
|
description: Bool value indicating whether return detailed information of the tag, such as vulnerability scan info, if set to false, only tag name is returned.
|
||||||
tags:
|
tags:
|
||||||
- Products
|
- Products
|
||||||
responses:
|
responses:
|
||||||
@ -2376,6 +2409,20 @@ paths:
|
|||||||
$ref: '#/responses/UnsupportedMediaType'
|
$ref: '#/responses/UnsupportedMediaType'
|
||||||
'500':
|
'500':
|
||||||
description: Unexpected internal errors.
|
description: Unexpected internal errors.
|
||||||
|
/internal/syncquota:
|
||||||
|
post:
|
||||||
|
summary: Sync quota from registry/chart to DB.
|
||||||
|
description: |
|
||||||
|
This endpoint is for syncing quota usage of registry/chart with database.
|
||||||
|
tags:
|
||||||
|
- Products
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Sync repositories successfully.
|
||||||
|
'401':
|
||||||
|
description: User need to log in first.
|
||||||
|
'403':
|
||||||
|
description: User does not have permission of system admin role.
|
||||||
/systeminfo:
|
/systeminfo:
|
||||||
get:
|
get:
|
||||||
summary: Get general system info
|
summary: Get general system info
|
||||||
@ -2575,7 +2622,7 @@ paths:
|
|||||||
'403':
|
'403':
|
||||||
description: User in session does not have permission to the user group.
|
description: User in session does not have permission to the user group.
|
||||||
'409':
|
'409':
|
||||||
description: An LDAP user group with same DN already exist.
|
description: A user group with same group name already exist, or an LDAP user group with same DN already exist.
|
||||||
'500':
|
'500':
|
||||||
description: Unexpected internal errors.
|
description: Unexpected internal errors.
|
||||||
'/usergroups/{group_id}':
|
'/usergroups/{group_id}':
|
||||||
@ -3031,7 +3078,9 @@ paths:
|
|||||||
description: The chart name
|
description: The chart name
|
||||||
responses:
|
responses:
|
||||||
'200':
|
'200':
|
||||||
$ref: '#/definitions/ChartVersions'
|
description: Retrieved all versions of the specified chart
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/ChartVersions'
|
||||||
'401':
|
'401':
|
||||||
$ref: '#/definitions/UnauthorizedChartAPIError'
|
$ref: '#/definitions/UnauthorizedChartAPIError'
|
||||||
'403':
|
'403':
|
||||||
@ -3091,7 +3140,9 @@ paths:
|
|||||||
description: The chart version
|
description: The chart version
|
||||||
responses:
|
responses:
|
||||||
'200':
|
'200':
|
||||||
$ref: '#/definitions/ChartVersionDetails'
|
description: Successfully retrieved the chart version
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/ChartVersionDetails'
|
||||||
'401':
|
'401':
|
||||||
$ref: '#/definitions/UnauthorizedChartAPIError'
|
$ref: '#/definitions/UnauthorizedChartAPIError'
|
||||||
'403':
|
'403':
|
||||||
@ -3474,6 +3525,441 @@ paths:
|
|||||||
description: The robot account is not found.
|
description: The robot account is not found.
|
||||||
'500':
|
'500':
|
||||||
description: Unexpected internal errors.
|
description: Unexpected internal errors.
|
||||||
|
'/system/oidc/ping':
|
||||||
|
post:
|
||||||
|
summary: Test the OIDC endpoint.
|
||||||
|
description: Test the OIDC endpoint, the setting of the endpoint is provided in the request. This API can only
|
||||||
|
be called by system admin.
|
||||||
|
tags:
|
||||||
|
- Products
|
||||||
|
- System
|
||||||
|
parameters:
|
||||||
|
- name: endpoint
|
||||||
|
in: body
|
||||||
|
description: Request body for OIDC endpoint to be tested.
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
url:
|
||||||
|
type: string
|
||||||
|
description: The URL of OIDC endpoint to be tested.
|
||||||
|
verify_cert:
|
||||||
|
type: boolean
|
||||||
|
description: Whether the certificate should be verified
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Ping succeeded. The OIDC endpoint is valid.
|
||||||
|
'400':
|
||||||
|
description: The ping failed
|
||||||
|
'401':
|
||||||
|
description: User need to log in first.
|
||||||
|
'403':
|
||||||
|
description: User does not have permission to call this API
|
||||||
|
'/system/CVEWhitelist':
|
||||||
|
get:
|
||||||
|
summary: Get the system level whitelist of CVE.
|
||||||
|
description: Get the system level whitelist of CVE. This API can be called by all authenticated users.
|
||||||
|
tags:
|
||||||
|
- Products
|
||||||
|
- System
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Successfully retrieved the CVE whitelist.
|
||||||
|
schema:
|
||||||
|
$ref: "#/definitions/CVEWhitelist"
|
||||||
|
'401':
|
||||||
|
description: User is not authenticated.
|
||||||
|
'500':
|
||||||
|
description: Unexpected internal errors.
|
||||||
|
put:
|
||||||
|
summary: Update the system level whitelist of CVE.
|
||||||
|
description: This API overwrites the system level whitelist of CVE with the list in request body. Only system Admin
|
||||||
|
has permission to call this API.
|
||||||
|
tags:
|
||||||
|
- Products
|
||||||
|
- System
|
||||||
|
parameters:
|
||||||
|
- in: body
|
||||||
|
name: whitelist
|
||||||
|
description: The whitelist with new content
|
||||||
|
schema:
|
||||||
|
$ref: "#/definitions/CVEWhitelist"
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Successfully updated the CVE whitelist.
|
||||||
|
'401':
|
||||||
|
description: User is not authenticated.
|
||||||
|
'403':
|
||||||
|
description: User does not have permission to call this API.
|
||||||
|
'500':
|
||||||
|
description: Unexpected internal errors.
|
||||||
|
'/quotas':
|
||||||
|
get:
|
||||||
|
summary: List quotas
|
||||||
|
description: List quotas
|
||||||
|
tags:
|
||||||
|
- quota
|
||||||
|
parameters:
|
||||||
|
- name: reference
|
||||||
|
in: query
|
||||||
|
description: The reference type of quota.
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
- name: sort
|
||||||
|
in: query
|
||||||
|
type: string
|
||||||
|
required: false
|
||||||
|
description: |
|
||||||
|
Sort method, valid values include:
|
||||||
|
'hard.resource_name', '-hard.resource_name', 'used.resource_name', '-used.resource_name'.
|
||||||
|
Here '-' stands for descending order, resource_name should be the real resource name of the quota.
|
||||||
|
- name: page
|
||||||
|
in: query
|
||||||
|
type: integer
|
||||||
|
format: int32
|
||||||
|
required: false
|
||||||
|
description: 'The page nubmer, default is 1.'
|
||||||
|
- name: page_size
|
||||||
|
in: query
|
||||||
|
type: integer
|
||||||
|
format: int32
|
||||||
|
required: false
|
||||||
|
description: 'The size of per page, default is 10, maximum is 100.'
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Successfully retrieved the quotas.
|
||||||
|
schema:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/definitions/Quota'
|
||||||
|
headers:
|
||||||
|
X-Total-Count:
|
||||||
|
description: The total count of access logs
|
||||||
|
type: integer
|
||||||
|
Link:
|
||||||
|
description: Link refers to the previous page and next page
|
||||||
|
type: string
|
||||||
|
'401':
|
||||||
|
description: User is not authenticated.
|
||||||
|
'403':
|
||||||
|
description: User does not have permission to call this API.
|
||||||
|
'500':
|
||||||
|
description: Unexpected internal errors.
|
||||||
|
'/quotas/{id}':
|
||||||
|
get:
|
||||||
|
summary: Get the specified quota
|
||||||
|
description: Get the specified quota
|
||||||
|
tags:
|
||||||
|
- quota
|
||||||
|
parameters:
|
||||||
|
- name: id
|
||||||
|
in: path
|
||||||
|
type: integer
|
||||||
|
required: true
|
||||||
|
description: Quota ID
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Successfully retrieved the quota.
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/Quota'
|
||||||
|
'401':
|
||||||
|
description: User need to log in first.
|
||||||
|
'403':
|
||||||
|
description: User does not have permission to call this API
|
||||||
|
'404':
|
||||||
|
description: Quota does not exist.
|
||||||
|
'500':
|
||||||
|
description: Unexpected internal errors.
|
||||||
|
put:
|
||||||
|
summary: Update the specified quota
|
||||||
|
description: Update hard limits of the specified quota
|
||||||
|
tags:
|
||||||
|
- quota
|
||||||
|
parameters:
|
||||||
|
- name: id
|
||||||
|
in: path
|
||||||
|
type: integer
|
||||||
|
required: true
|
||||||
|
description: Quota ID
|
||||||
|
- name: hard
|
||||||
|
in: body
|
||||||
|
required: true
|
||||||
|
description: The new hard limits for the quota
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/QuotaUpdateReq'
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Updated quota hard limits successfully.
|
||||||
|
'400':
|
||||||
|
description: Illegal format of quota update request.
|
||||||
|
'401':
|
||||||
|
description: User need to log in first.
|
||||||
|
'403':
|
||||||
|
description: User does not have permission to the quota.
|
||||||
|
'404':
|
||||||
|
description: Quota ID does not exist.
|
||||||
|
'500':
|
||||||
|
description: Unexpected internal errors.
|
||||||
|
'/projects/{project_id}/webhook/policies':
|
||||||
|
get:
|
||||||
|
summary: List project webhook policies.
|
||||||
|
description: |
|
||||||
|
This endpoint returns webhook policies of a project.
|
||||||
|
parameters:
|
||||||
|
- name: project_id
|
||||||
|
in: path
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
|
required: true
|
||||||
|
description: Relevant project ID.
|
||||||
|
tags:
|
||||||
|
- Products
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: List project webhook policies successfully.
|
||||||
|
schema:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/definitions/WebhookPolicy'
|
||||||
|
'400':
|
||||||
|
description: Illegal format of provided ID value.
|
||||||
|
'401':
|
||||||
|
description: User need to log in first.
|
||||||
|
'403':
|
||||||
|
description: User have no permission to list webhook policies of the project.
|
||||||
|
'500':
|
||||||
|
description: Unexpected internal errors.
|
||||||
|
post:
|
||||||
|
summary: Create project webhook policy.
|
||||||
|
description: |
|
||||||
|
This endpoint create a webhook policy if the project does not have one.
|
||||||
|
parameters:
|
||||||
|
- name: project_id
|
||||||
|
in: path
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
|
required: true
|
||||||
|
description: Relevant project ID
|
||||||
|
- name: policy
|
||||||
|
in: body
|
||||||
|
description: Properties "targets" and "event_types" needed.
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/WebhookPolicy'
|
||||||
|
tags:
|
||||||
|
- Products
|
||||||
|
responses:
|
||||||
|
'201':
|
||||||
|
description: Project webhook policy create successfully.
|
||||||
|
'400':
|
||||||
|
description: Illegal format of provided ID value.
|
||||||
|
'401':
|
||||||
|
description: User need to log in first.
|
||||||
|
'403':
|
||||||
|
description: User have no permission to create webhook policy of the project.
|
||||||
|
'500':
|
||||||
|
description: Unexpected internal errors.
|
||||||
|
'/projects/{project_id}/webhook/policies/{policy_id}':
|
||||||
|
get:
|
||||||
|
summary: Get project webhook policy
|
||||||
|
description: |
|
||||||
|
This endpoint returns specified webhook policy of a project.
|
||||||
|
parameters:
|
||||||
|
- name: project_id
|
||||||
|
in: path
|
||||||
|
description: Relevant project ID.
|
||||||
|
required: true
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
|
- name: policy_id
|
||||||
|
in: path
|
||||||
|
description: The id of webhook policy.
|
||||||
|
required: true
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
|
tags:
|
||||||
|
- Products
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Get webhook policy successfully.
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/WebhookPolicy'
|
||||||
|
'400':
|
||||||
|
description: Illegal format of provided ID value.
|
||||||
|
'401':
|
||||||
|
description: User need to log in first.
|
||||||
|
'403':
|
||||||
|
description: User have no permission to get webhook policy of the project.
|
||||||
|
'404':
|
||||||
|
description: Webhook policy ID does not exist.
|
||||||
|
'500':
|
||||||
|
description: Internal server errors.
|
||||||
|
put:
|
||||||
|
summary: Update webhook policy of a project.
|
||||||
|
description: |
|
||||||
|
This endpoint is aimed to update the webhook policy of a project.
|
||||||
|
parameters:
|
||||||
|
- name: project_id
|
||||||
|
in: path
|
||||||
|
description: Relevant project ID.
|
||||||
|
required: true
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
|
- name: policy_id
|
||||||
|
in: path
|
||||||
|
description: The id of webhook policy.
|
||||||
|
required: true
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
|
- name: policy
|
||||||
|
in: body
|
||||||
|
description: All properties needed except "id", "project_id", "creation_time", "update_time".
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/WebhookPolicy'
|
||||||
|
tags:
|
||||||
|
- Products
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Update webhook policy successfully.
|
||||||
|
'400':
|
||||||
|
description: Illegal format of provided ID value.
|
||||||
|
'401':
|
||||||
|
description: User need to log in first.
|
||||||
|
'403':
|
||||||
|
description: User have no permission to update webhook policy of the project.
|
||||||
|
'404':
|
||||||
|
description: Webhook policy ID does not exist.
|
||||||
|
'500':
|
||||||
|
description: Internal server errors.
|
||||||
|
delete:
|
||||||
|
summary: Delete webhook policy of a project
|
||||||
|
description: |
|
||||||
|
This endpoint is aimed to delete webhookpolicy of a project.
|
||||||
|
parameters:
|
||||||
|
- name: project_id
|
||||||
|
in: path
|
||||||
|
description: Relevant project ID.
|
||||||
|
required: true
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
|
- name: policy_id
|
||||||
|
in: path
|
||||||
|
description: The id of webhook policy.
|
||||||
|
required: true
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
|
tags:
|
||||||
|
- Products
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Delete webhook policy successfully.
|
||||||
|
'400':
|
||||||
|
description: Illegal format of provided ID value.
|
||||||
|
'401':
|
||||||
|
description: User need to log in first.
|
||||||
|
'403':
|
||||||
|
description: User have no permission to delete webhook policy of the project.
|
||||||
|
'404':
|
||||||
|
description: Webhook policy ID does not exist.
|
||||||
|
'500':
|
||||||
|
description: Internal server errors.
|
||||||
|
'/projects/{project_id}/webhook/policies/test':
|
||||||
|
post:
|
||||||
|
summary: Test project webhook connection
|
||||||
|
description: |
|
||||||
|
This endpoint tests webhook connection of a project.
|
||||||
|
parameters:
|
||||||
|
- name: project_id
|
||||||
|
in: path
|
||||||
|
description: Relevant project ID.
|
||||||
|
required: true
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
|
- name: policy
|
||||||
|
in: body
|
||||||
|
description: Only property "targets" needed.
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/WebhookPolicy'
|
||||||
|
tags:
|
||||||
|
- Products
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Test webhook connection successfully.
|
||||||
|
'400':
|
||||||
|
description: Illegal format of provided ID value.
|
||||||
|
'401':
|
||||||
|
description: User need to log in first.
|
||||||
|
'403':
|
||||||
|
description: User have no permission to get webhook policy of the project.
|
||||||
|
'500':
|
||||||
|
description: Internal server errors.
|
||||||
|
'/projects/{project_id}/webhook/lasttrigger':
|
||||||
|
get:
|
||||||
|
summary: Get project webhook policy last trigger info
|
||||||
|
description: |
|
||||||
|
This endpoint returns last trigger information of project webhook policy.
|
||||||
|
parameters:
|
||||||
|
- name: project_id
|
||||||
|
in: path
|
||||||
|
description: Relevant project ID.
|
||||||
|
required: true
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
|
tags:
|
||||||
|
- Products
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Test webhook connection successfully.
|
||||||
|
schema:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/definitions/WebhookLastTrigger'
|
||||||
|
'400':
|
||||||
|
description: Illegal format of provided ID value.
|
||||||
|
'401':
|
||||||
|
description: User need to log in first.
|
||||||
|
'403':
|
||||||
|
description: User have no permission to get webhook policy of the project.
|
||||||
|
'500':
|
||||||
|
description: Internal server errors.
|
||||||
|
'/projects/{project_id}/webhook/jobs':
|
||||||
|
get:
|
||||||
|
summary: List project webhook jobs
|
||||||
|
description: |
|
||||||
|
This endpoint returns webhook jobs of a project.
|
||||||
|
parameters:
|
||||||
|
- name: project_id
|
||||||
|
in: path
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
|
required: true
|
||||||
|
description: Relevant project ID.
|
||||||
|
- name: policy_id
|
||||||
|
in: query
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
|
required: true
|
||||||
|
description: The policy ID.
|
||||||
|
tags:
|
||||||
|
- Products
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: List project webhook jobs successfully.
|
||||||
|
schema:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/definitions/WebhookJob'
|
||||||
|
'400':
|
||||||
|
description: Illegal format of provided ID value.
|
||||||
|
'401':
|
||||||
|
description: User need to log in first.
|
||||||
|
'403':
|
||||||
|
description: User have no permission to list webhook jobs of the project.
|
||||||
|
'500':
|
||||||
|
description: Unexpected internal errors.
|
||||||
responses:
|
responses:
|
||||||
OK:
|
OK:
|
||||||
description: 'Success'
|
description: 'Success'
|
||||||
@ -3556,6 +4042,17 @@ definitions:
|
|||||||
metadata:
|
metadata:
|
||||||
description: The metadata of the project.
|
description: The metadata of the project.
|
||||||
$ref: '#/definitions/ProjectMetadata'
|
$ref: '#/definitions/ProjectMetadata'
|
||||||
|
cve_whitelist:
|
||||||
|
description: The CVE whitelist of the project.
|
||||||
|
$ref: '#/definitions/CVEWhitelist'
|
||||||
|
count_limit:
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
|
description: The count quota of the project.
|
||||||
|
storage_limit:
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
|
description: The storage quota of the project.
|
||||||
Project:
|
Project:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
@ -3597,6 +4094,9 @@ definitions:
|
|||||||
metadata:
|
metadata:
|
||||||
description: The metadata of the project.
|
description: The metadata of the project.
|
||||||
$ref: '#/definitions/ProjectMetadata'
|
$ref: '#/definitions/ProjectMetadata'
|
||||||
|
cve_whitelist:
|
||||||
|
description: The CVE whitelist of this project.
|
||||||
|
$ref: '#/definitions/CVEWhitelist'
|
||||||
ProjectMetadata:
|
ProjectMetadata:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
@ -3605,16 +4105,50 @@ definitions:
|
|||||||
description: 'The public status of the project. The valid values are "true", "false".'
|
description: 'The public status of the project. The valid values are "true", "false".'
|
||||||
enable_content_trust:
|
enable_content_trust:
|
||||||
type: string
|
type: string
|
||||||
description: 'Whether content trust is enabled or not. If it is enabled, user cann''t pull unsigned images from this project. The valid values are "true", "false".'
|
description: 'Whether content trust is enabled or not. If it is enabled, user can''t pull unsigned images from this project. The valid values are "true", "false".'
|
||||||
prevent_vul:
|
prevent_vul:
|
||||||
type: string
|
type: string
|
||||||
description: 'Whether prevent the vulnerable images from running. The valid values are "true", "false".'
|
description: 'Whether prevent the vulnerable images from running. The valid values are "true", "false".'
|
||||||
severity:
|
severity:
|
||||||
type: string
|
type: string
|
||||||
description: 'If the vulnerability is high than severity defined here, the images cann''t be pulled. The valid values are "negligible", "low", "medium", "high", "critical".'
|
description: 'If the vulnerability is high than severity defined here, the images can''t be pulled. The valid values are "negligible", "low", "medium", "high", "critical".'
|
||||||
auto_scan:
|
auto_scan:
|
||||||
type: string
|
type: string
|
||||||
description: 'Whether scan images automatically when pushing. The valid values are "true", "false".'
|
description: 'Whether scan images automatically when pushing. The valid values are "true", "false".'
|
||||||
|
reuse_sys_cve_whitelist:
|
||||||
|
type: string
|
||||||
|
description: 'Whether this project reuse the system level CVE whitelist as the whitelist of its own. The valid values are "true", "false".
|
||||||
|
If it is set to "true" the actual whitelist associate with this project, if any, will be ignored.'
|
||||||
|
ProjectSummary:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
repo_count:
|
||||||
|
type: integer
|
||||||
|
description: The number of the repositories under this project.
|
||||||
|
chart_count:
|
||||||
|
type: integer
|
||||||
|
description: The total number of charts under this project.
|
||||||
|
project_admin_count:
|
||||||
|
type: integer
|
||||||
|
description: The total number of project admin members.
|
||||||
|
master_count:
|
||||||
|
type: integer
|
||||||
|
description: The total number of master members.
|
||||||
|
developer_count:
|
||||||
|
type: integer
|
||||||
|
description: The total number of developer members.
|
||||||
|
guest_count:
|
||||||
|
type: integer
|
||||||
|
description: The total number of guest members.
|
||||||
|
quota:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
hard:
|
||||||
|
$ref: "#/definitions/ResourceList"
|
||||||
|
description: The hard limits of the quota
|
||||||
|
used:
|
||||||
|
$ref: "#/definitions/ResourceList"
|
||||||
|
description: The used status of the quota
|
||||||
Manifest:
|
Manifest:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
@ -4270,6 +4804,9 @@ definitions:
|
|||||||
auth_mode:
|
auth_mode:
|
||||||
type: string
|
type: string
|
||||||
description: 'The auth mode of current system, such as "db_auth", "ldap_auth"'
|
description: 'The auth mode of current system, such as "db_auth", "ldap_auth"'
|
||||||
|
count_per_project:
|
||||||
|
type: string
|
||||||
|
description: The default count quota for the new created projects.
|
||||||
email_from:
|
email_from:
|
||||||
type: string
|
type: string
|
||||||
description: The sender name for Email notification.
|
description: The sender name for Email notification.
|
||||||
@ -4330,12 +4867,18 @@ definitions:
|
|||||||
project_creation_restriction:
|
project_creation_restriction:
|
||||||
type: string
|
type: string
|
||||||
description: This attribute restricts what users have the permission to create project. It can be "everyone" or "adminonly".
|
description: This attribute restricts what users have the permission to create project. It can be "everyone" or "adminonly".
|
||||||
|
quota_per_project_enable:
|
||||||
|
type: boolean
|
||||||
|
description: This attribute indicates whether quota per project enabled in harbor
|
||||||
read_only:
|
read_only:
|
||||||
type: boolean
|
type: boolean
|
||||||
description: '''docker push'' is prohibited by Harbor if you set it to true. '
|
description: '''docker push'' is prohibited by Harbor if you set it to true. '
|
||||||
self_registration:
|
self_registration:
|
||||||
type: boolean
|
type: boolean
|
||||||
description: 'Whether the Harbor instance supports self-registration. If it''s set to false, admin need to add user to the instance.'
|
description: 'Whether the Harbor instance supports self-registration. If it''s set to false, admin need to add user to the instance.'
|
||||||
|
storage_per_project:
|
||||||
|
type: string
|
||||||
|
description: The default storage quota for the new created projects.
|
||||||
token_expiration:
|
token_expiration:
|
||||||
type: integer
|
type: integer
|
||||||
description: 'The expiration time of the token for internal Registry, in minutes.'
|
description: 'The expiration time of the token for internal Registry, in minutes.'
|
||||||
@ -4361,6 +4904,9 @@ definitions:
|
|||||||
auth_mode:
|
auth_mode:
|
||||||
$ref: '#/definitions/StringConfigItem'
|
$ref: '#/definitions/StringConfigItem'
|
||||||
description: 'The auth mode of current system, such as "db_auth", "ldap_auth"'
|
description: 'The auth mode of current system, such as "db_auth", "ldap_auth"'
|
||||||
|
count_per_project:
|
||||||
|
$ref: '#/definitions/IntegerConfigItem'
|
||||||
|
description: The default count quota for the new created projects.
|
||||||
email_from:
|
email_from:
|
||||||
$ref: '#/definitions/StringConfigItem'
|
$ref: '#/definitions/StringConfigItem'
|
||||||
description: The sender name for Email notification.
|
description: The sender name for Email notification.
|
||||||
@ -4421,12 +4967,18 @@ definitions:
|
|||||||
project_creation_restriction:
|
project_creation_restriction:
|
||||||
$ref: '#/definitions/StringConfigItem'
|
$ref: '#/definitions/StringConfigItem'
|
||||||
description: This attribute restricts what users have the permission to create project. It can be "everyone" or "adminonly".
|
description: This attribute restricts what users have the permission to create project. It can be "everyone" or "adminonly".
|
||||||
|
quota_per_project_enable:
|
||||||
|
$ref: '#/definitions/BoolConfigItem'
|
||||||
|
description: This attribute indicates whether quota per project enabled in harbor
|
||||||
read_only:
|
read_only:
|
||||||
$ref: '#/definitions/BoolConfigItem'
|
$ref: '#/definitions/BoolConfigItem'
|
||||||
description: '''docker push'' is prohibited by Harbor if you set it to true. '
|
description: '''docker push'' is prohibited by Harbor if you set it to true. '
|
||||||
self_registration:
|
self_registration:
|
||||||
$ref: '#/definitions/BoolConfigItem'
|
$ref: '#/definitions/BoolConfigItem'
|
||||||
description: 'Whether the Harbor instance supports self-registration. If it''s set to false, admin need to add user to the instance.'
|
description: 'Whether the Harbor instance supports self-registration. If it''s set to false, admin need to add user to the instance.'
|
||||||
|
storage_per_project:
|
||||||
|
$ref: '#/definitions/IntegerConfigItem'
|
||||||
|
description: The default storage quota for the new created projects.
|
||||||
token_expiration:
|
token_expiration:
|
||||||
$ref: '#/definitions/IntegerConfigItem'
|
$ref: '#/definitions/IntegerConfigItem'
|
||||||
description: 'The expiration time of the token for internal Registry, in minutes.'
|
description: 'The expiration time of the token for internal Registry, in minutes.'
|
||||||
@ -4542,7 +5094,7 @@ definitions:
|
|||||||
description: The name of the user group
|
description: The name of the user group
|
||||||
group_type:
|
group_type:
|
||||||
type: integer
|
type: integer
|
||||||
description: 'The group type, 1 for LDAP group.'
|
description: 'The group type, 1 for LDAP group, 2 for HTTP group.'
|
||||||
ldap_group_dn:
|
ldap_group_dn:
|
||||||
type: string
|
type: string
|
||||||
description: The DN of the LDAP group if group type is 1 (LDAP group).
|
description: The DN of the LDAP group if group type is 1 (LDAP group).
|
||||||
@ -4829,7 +5381,9 @@ definitions:
|
|||||||
properties:
|
properties:
|
||||||
type:
|
type:
|
||||||
type: string
|
type: string
|
||||||
description: The schedule type. The valid values are hourly, daily, weekly, custom and None. 'None' means to cancel the schedule.
|
description: |
|
||||||
|
The schedule type. The valid values are 'Hourly', 'Daily', 'Weekly', 'Custom', 'Manually' and 'None'.
|
||||||
|
'Manually' means to trigger it right away and 'None' means to cancel the schedule.
|
||||||
cron:
|
cron:
|
||||||
type: string
|
type: string
|
||||||
description: A cron expression, a time-based job scheduler.
|
description: A cron expression, a time-based job scheduler.
|
||||||
@ -5065,4 +5619,162 @@ definitions:
|
|||||||
description: The name of namespace
|
description: The name of namespace
|
||||||
metadata:
|
metadata:
|
||||||
type: object
|
type: object
|
||||||
description: The metadata of namespace
|
description: The metadata of namespace
|
||||||
|
CVEWhitelist:
|
||||||
|
type: object
|
||||||
|
description: The CVE Whitelist for system or project
|
||||||
|
properties:
|
||||||
|
id:
|
||||||
|
type: integer
|
||||||
|
description: ID of the whitelist
|
||||||
|
project_id:
|
||||||
|
type: integer
|
||||||
|
description: ID of the project which the whitelist belongs to. For system level whitelist this attribute is zero.
|
||||||
|
expires_at:
|
||||||
|
type: integer
|
||||||
|
description: the time for expiration of the whitelist, in the form of seconds since epoch. This is an optional attribute, if it's not set the CVE whitelist does not expire.
|
||||||
|
items:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: "#/definitions/CVEWhitelistItem"
|
||||||
|
CVEWhitelistItem:
|
||||||
|
type: object
|
||||||
|
description: The item in CVE whitelist
|
||||||
|
properties:
|
||||||
|
cve_id:
|
||||||
|
type: string
|
||||||
|
description: The ID of the CVE, such as "CVE-2019-10164"
|
||||||
|
ResourceList:
|
||||||
|
type: object
|
||||||
|
additionalProperties:
|
||||||
|
type: integer
|
||||||
|
QuotaUpdateReq:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
hard:
|
||||||
|
$ref: "#/definitions/ResourceList"
|
||||||
|
description: The new hard limits for the quota
|
||||||
|
QuotaRefObject:
|
||||||
|
type: object
|
||||||
|
additionalProperties: {}
|
||||||
|
Quota:
|
||||||
|
type: object
|
||||||
|
description: The quota object
|
||||||
|
properties:
|
||||||
|
id:
|
||||||
|
type: integer
|
||||||
|
description: ID of the quota
|
||||||
|
ref:
|
||||||
|
$ref: "#/definitions/QuotaRefObject"
|
||||||
|
description: The reference object of the quota
|
||||||
|
hard:
|
||||||
|
$ref: "#/definitions/ResourceList"
|
||||||
|
description: The hard limits of the quota
|
||||||
|
used:
|
||||||
|
$ref: "#/definitions/ResourceList"
|
||||||
|
description: The used status of the quota
|
||||||
|
creation_time:
|
||||||
|
type: string
|
||||||
|
description: the creation time of the quota
|
||||||
|
update_time:
|
||||||
|
type: string
|
||||||
|
description: the update time of the quota
|
||||||
|
WebhookTargetObject:
|
||||||
|
type: object
|
||||||
|
description: The webhook policy target object.
|
||||||
|
properties:
|
||||||
|
type:
|
||||||
|
type: string
|
||||||
|
description: The webhook target notify type.
|
||||||
|
address:
|
||||||
|
type: string
|
||||||
|
description: The webhook target address.
|
||||||
|
auth_header:
|
||||||
|
type: string
|
||||||
|
description: The webhook auth header.
|
||||||
|
skip_cert_verify:
|
||||||
|
type: boolean
|
||||||
|
description: Whether or not to skip cert verify.
|
||||||
|
WebhookPolicy:
|
||||||
|
type: object
|
||||||
|
description: The webhook policy object
|
||||||
|
properties:
|
||||||
|
id:
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
|
description: The webhook policy ID.
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
description: The name of webhook policy.
|
||||||
|
description:
|
||||||
|
type: string
|
||||||
|
description: The description of webhook policy.
|
||||||
|
project_id:
|
||||||
|
type: integer
|
||||||
|
description: The project ID of webhook policy.
|
||||||
|
targets:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/definitions/WebhookTargetObject'
|
||||||
|
event_types:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
creator:
|
||||||
|
type: string
|
||||||
|
description: The creator of the webhook policy.
|
||||||
|
creation_time:
|
||||||
|
type: string
|
||||||
|
description: The create time of the webhook policy.
|
||||||
|
update_time:
|
||||||
|
type: string
|
||||||
|
description: The update time of the webhook policy.
|
||||||
|
enabled:
|
||||||
|
type: boolean
|
||||||
|
description: Whether the webhook policy is enabled or not.
|
||||||
|
WebhookLastTrigger:
|
||||||
|
type: object
|
||||||
|
description: The webhook policy and last trigger time group by event type.
|
||||||
|
properties:
|
||||||
|
event_type:
|
||||||
|
type: string
|
||||||
|
description: The webhook event type.
|
||||||
|
enabled:
|
||||||
|
type: boolean
|
||||||
|
description: Whether or not the webhook policy enabled.
|
||||||
|
creation_time:
|
||||||
|
type: string
|
||||||
|
description: The creation time of webhook policy.
|
||||||
|
last_trigger_time:
|
||||||
|
type: string
|
||||||
|
description: The last trigger time of webhook policy.
|
||||||
|
WebhookJob:
|
||||||
|
type: object
|
||||||
|
description: The webhook job.
|
||||||
|
properties:
|
||||||
|
id:
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
|
description: The webhook job ID.
|
||||||
|
policy_id:
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
|
description: The webhook policy ID.
|
||||||
|
event_type:
|
||||||
|
type: string
|
||||||
|
description: The webhook job event type.
|
||||||
|
notify_type:
|
||||||
|
type: string
|
||||||
|
description: The webhook job notify type.
|
||||||
|
status:
|
||||||
|
type: string
|
||||||
|
description: The webhook job status.
|
||||||
|
job_detail:
|
||||||
|
type: string
|
||||||
|
description: The webhook job notify detailed data.
|
||||||
|
creation_time:
|
||||||
|
type: string
|
||||||
|
description: The webhook job creation time.
|
||||||
|
update_time:
|
||||||
|
type: string
|
||||||
|
description: The webhook job update time.
|
||||||
|
@ -36,10 +36,10 @@ version | set harbor version
|
|||||||
#### EXAMPLE:
|
#### EXAMPLE:
|
||||||
|
|
||||||
#### Build and run harbor from source code.
|
#### Build and run harbor from source code.
|
||||||
make install GOBUILDIMAGE=golang:1.11.2 COMPILETAG=compile_golangimage NOTARYFLAG=true
|
make install GOBUILDIMAGE=golang:1.12.5 COMPILETAG=compile_golangimage NOTARYFLAG=true
|
||||||
|
|
||||||
### Package offline installer
|
### Package offline installer
|
||||||
make package_offline GOBUILDIMAGE=golang:1.11.2 COMPILETAG=compile_golangimage NOTARYFLAG=true
|
make package_offline GOBUILDIMAGE=golang:1.12.5 COMPILETAG=compile_golangimage NOTARYFLAG=true
|
||||||
|
|
||||||
### Start harbor with notary
|
### Start harbor with notary
|
||||||
make -e NOTARYFLAG=true start
|
make -e NOTARYFLAG=true start
|
||||||
|
@ -573,7 +573,7 @@ Before working, Harbor should be added into the repository list with `helm repo
|
|||||||
|
|
||||||
With this mode Helm can be made aware of all the charts located in different projects and which are accessible by the currently authenticated user.
|
With this mode Helm can be made aware of all the charts located in different projects and which are accessible by the currently authenticated user.
|
||||||
```
|
```
|
||||||
helm repo add --ca-file ca.crt --cert-file server.crt --key-file server.key --username=admin --password=Passw0rd myrepo https://xx.xx.xx.xx/chartrepo
|
helm repo add --ca-file ca.crt --username=admin --password=Passw0rd myrepo https://xx.xx.xx.xx/chartrepo
|
||||||
```
|
```
|
||||||
**NOTES:** Providing both ca file and cert files is caused by an issue from helm.
|
**NOTES:** Providing both ca file and cert files is caused by an issue from helm.
|
||||||
|
|
||||||
@ -581,7 +581,7 @@ helm repo add --ca-file ca.crt --cert-file server.crt --key-file server.key --us
|
|||||||
|
|
||||||
With this mode, helm can only pull charts in the specified project.
|
With this mode, helm can only pull charts in the specified project.
|
||||||
```
|
```
|
||||||
helm repo add --ca-file ca.crt --cert-file server.crt --key-file server.key --username=admin --password=Passw0rd myrepo https://xx.xx.xx.xx/chartrepo/myproject
|
helm repo add --ca-file ca.crt --username=admin --password=Passw0rd myrepo https://xx.xx.xx.xx/chartrepo/myproject
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Push charts to the repository server by CLI
|
#### Push charts to the repository server by CLI
|
||||||
@ -591,7 +591,7 @@ helm plugin install https://github.com/chartmuseum/helm-push
|
|||||||
```
|
```
|
||||||
After a successful installation, run `push` command to upload your charts:
|
After a successful installation, run `push` command to upload your charts:
|
||||||
```
|
```
|
||||||
helm push --ca-file=ca.crt --key-file=server.key --cert-file=server.crt --username=admin --password=passw0rd chart_repo/hello-helm-0.1.0.tgz myrepo
|
helm push --ca-file=ca.crt --username=admin --password=passw0rd chart_repo/hello-helm-0.1.0.tgz myrepo
|
||||||
```
|
```
|
||||||
**NOTES:** `push` command does not support pushing a prov file of a signed chart yet.
|
**NOTES:** `push` command does not support pushing a prov file of a signed chart yet.
|
||||||
|
|
||||||
@ -609,7 +609,7 @@ helm search hello
|
|||||||
```
|
```
|
||||||
Everything is ready, install the chart to your kubernetes:
|
Everything is ready, install the chart to your kubernetes:
|
||||||
```
|
```
|
||||||
helm install --ca-file=ca.crt --key-file=server.key --cert-file=server.crt --username=admin --password=Passw0rd --version 0.1.10 repo248/chart_repo/hello-helm
|
helm install --ca-file=ca.crt --username=admin --password=Passw0rd --version 0.1.10 repo248/chart_repo/hello-helm
|
||||||
```
|
```
|
||||||
|
|
||||||
For other more helm commands like how to sign a chart, please refer to the [helm doc](https://docs.helm.sh/helm/#helm).
|
For other more helm commands like how to sign a chart, please refer to the [helm doc](https://docs.helm.sh/helm/#helm).
|
||||||
|
@ -30,6 +30,11 @@ harbor_admin_password: Harbor12345
|
|||||||
database:
|
database:
|
||||||
# The password for the root user of Harbor DB. Change this before any production use.
|
# The password for the root user of Harbor DB. Change this before any production use.
|
||||||
password: root123
|
password: root123
|
||||||
|
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
||||||
|
max_idle_conns: 50
|
||||||
|
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
|
||||||
|
# Note: the default number of connections is 100 for postgres.
|
||||||
|
max_open_conns: 100
|
||||||
|
|
||||||
# The default data volume
|
# The default data volume
|
||||||
data_volume: /data
|
data_volume: /data
|
||||||
@ -50,20 +55,18 @@ data_volume: /data
|
|||||||
# disabled: false
|
# disabled: false
|
||||||
|
|
||||||
# Clair configuration
|
# Clair configuration
|
||||||
clair:
|
clair:
|
||||||
# The interval of clair updaters, the unit is hour, set to 0 to disable the updaters.
|
# The interval of clair updaters, the unit is hour, set to 0 to disable the updaters.
|
||||||
updaters_interval: 12
|
updaters_interval: 12
|
||||||
|
|
||||||
# Config http proxy for Clair, e.g. http://my.proxy.com:3128
|
|
||||||
# Clair doesn't need to connect to harbor internal components via http proxy.
|
|
||||||
http_proxy:
|
|
||||||
https_proxy:
|
|
||||||
no_proxy: 127.0.0.1,localhost,core,registry
|
|
||||||
|
|
||||||
jobservice:
|
jobservice:
|
||||||
# Maximum number of job workers in job service
|
# Maximum number of job workers in job service
|
||||||
max_job_workers: 10
|
max_job_workers: 10
|
||||||
|
|
||||||
|
notification:
|
||||||
|
# Maximum retry count for webhook job
|
||||||
|
webhook_job_max_retry: 10
|
||||||
|
|
||||||
chart:
|
chart:
|
||||||
# Change the value of absolute_url to enabled can enable absolute url in chart
|
# Change the value of absolute_url to enabled can enable absolute url in chart
|
||||||
absolute_url: disabled
|
absolute_url: disabled
|
||||||
@ -72,14 +75,25 @@ chart:
|
|||||||
log:
|
log:
|
||||||
# options are debug, info, warning, error, fatal
|
# options are debug, info, warning, error, fatal
|
||||||
level: info
|
level: info
|
||||||
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
# configs for logs in local storage
|
||||||
rotate_count: 50
|
local:
|
||||||
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||||
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
rotate_count: 50
|
||||||
# are all valid.
|
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||||
rotate_size: 200M
|
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||||
# The directory on your host that store log
|
# are all valid.
|
||||||
location: /var/log/harbor
|
rotate_size: 200M
|
||||||
|
# The directory on your host that store log
|
||||||
|
location: /var/log/harbor
|
||||||
|
|
||||||
|
# Uncomment following lines to enable external syslog endpoint.
|
||||||
|
# external_endpoint:
|
||||||
|
# # protocol used to transmit log to external endpoint, options is tcp or udp
|
||||||
|
# protocol: tcp
|
||||||
|
# # The host of external endpoint
|
||||||
|
# host: localhost
|
||||||
|
# # Port of external endpoint
|
||||||
|
# port: 5140
|
||||||
|
|
||||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||||
_version: 1.8.0
|
_version: 1.8.0
|
||||||
@ -128,3 +142,20 @@ _version: 1.8.0
|
|||||||
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
|
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
|
||||||
# uaa:
|
# uaa:
|
||||||
# ca_file: /path/to/ca
|
# ca_file: /path/to/ca
|
||||||
|
|
||||||
|
# Global proxy
|
||||||
|
# Config http proxy for components, e.g. http://my.proxy.com:3128
|
||||||
|
# Components doesn't need to connect to each others via http proxy.
|
||||||
|
# Remove component from `components` array if want disable proxy
|
||||||
|
# for it. If you want use proxy for replication, MUST enable proxy
|
||||||
|
# for core and jobservice, and set `http_proxy` and `https_proxy`.
|
||||||
|
# Add domain to the `no_proxy` field, when you want disable proxy
|
||||||
|
# for some special registry.
|
||||||
|
proxy:
|
||||||
|
http_proxy:
|
||||||
|
https_proxy:
|
||||||
|
no_proxy: 127.0.0.1,localhost,.local,.internal,log,db,redis,nginx,core,portal,postgresql,jobservice,registry,registryctl,clair
|
||||||
|
components:
|
||||||
|
- core
|
||||||
|
- jobservice
|
||||||
|
- clair
|
||||||
|
@ -56,9 +56,9 @@ $$;
|
|||||||
|
|
||||||
CREATE TRIGGER harbor_user_update_time_at_modtime BEFORE UPDATE ON harbor_user FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
|
CREATE TRIGGER harbor_user_update_time_at_modtime BEFORE UPDATE ON harbor_user FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
|
||||||
|
|
||||||
insert into harbor_user (username, email, password, realname, comment, deleted, sysadmin_flag, creation_time, update_time) values
|
insert into harbor_user (username, password, realname, comment, deleted, sysadmin_flag, creation_time, update_time) values
|
||||||
('admin', 'admin@example.com', '', 'system admin', 'admin user',false, true, NOW(), NOW()),
|
('admin', '', 'system admin', 'admin user',false, true, NOW(), NOW()),
|
||||||
('anonymous', 'anonymous@example.com', '', 'anonymous user', 'anonymous user', true, false, NOW(), NOW());
|
('anonymous', '', 'anonymous user', 'anonymous user', true, false, NOW(), NOW());
|
||||||
|
|
||||||
create table project (
|
create table project (
|
||||||
project_id SERIAL PRIMARY KEY NOT NULL,
|
project_id SERIAL PRIMARY KEY NOT NULL,
|
||||||
|
30
make/migrations/postgresql/0005_1.8.2_schema.up.sql
Normal file
30
make/migrations/postgresql/0005_1.8.2_schema.up.sql
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
/*
|
||||||
|
Rename the duplicate names before adding "UNIQUE" constraint
|
||||||
|
*/
|
||||||
|
DO $$
|
||||||
|
BEGIN
|
||||||
|
WHILE EXISTS (SELECT count(*) FROM user_group GROUP BY group_name HAVING count(*) > 1) LOOP
|
||||||
|
UPDATE user_group AS r
|
||||||
|
SET group_name = (
|
||||||
|
/*
|
||||||
|
truncate the name if it is too long after appending the sequence number
|
||||||
|
*/
|
||||||
|
CASE WHEN (length(group_name)+length(v.seq::text)+1) > 256
|
||||||
|
THEN
|
||||||
|
substring(group_name from 1 for (255-length(v.seq::text))) || '_' || v.seq
|
||||||
|
ELSE
|
||||||
|
group_name || '_' || v.seq
|
||||||
|
END
|
||||||
|
)
|
||||||
|
FROM (SELECT id, row_number() OVER (PARTITION BY group_name ORDER BY id) AS seq FROM user_group) AS v
|
||||||
|
WHERE r.id = v.id AND v.seq > 1;
|
||||||
|
END LOOP;
|
||||||
|
END $$;
|
||||||
|
|
||||||
|
ALTER TABLE user_group ADD CONSTRAINT unique_group_name UNIQUE (group_name);
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
Fix issue https://github.com/goharbor/harbor/issues/8526, delete the none scan_all schedule.
|
||||||
|
*/
|
||||||
|
UPDATE admin_job SET deleted='true' WHERE cron_str='{"type":"none"}';
|
183
make/migrations/postgresql/0010_1.9.0_schema.up.sql
Normal file
183
make/migrations/postgresql/0010_1.9.0_schema.up.sql
Normal file
@ -0,0 +1,183 @@
|
|||||||
|
/* add table for CVE whitelist */
|
||||||
|
CREATE TABLE cve_whitelist
|
||||||
|
(
|
||||||
|
id SERIAL PRIMARY KEY NOT NULL,
|
||||||
|
project_id int,
|
||||||
|
creation_time timestamp default CURRENT_TIMESTAMP,
|
||||||
|
update_time timestamp default CURRENT_TIMESTAMP,
|
||||||
|
expires_at bigint,
|
||||||
|
items text NOT NULL,
|
||||||
|
UNIQUE (project_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE blob
|
||||||
|
(
|
||||||
|
id SERIAL PRIMARY KEY NOT NULL,
|
||||||
|
/*
|
||||||
|
digest of config, layer, manifest
|
||||||
|
*/
|
||||||
|
digest varchar(255) NOT NULL,
|
||||||
|
content_type varchar(255) NOT NULL,
|
||||||
|
size int NOT NULL,
|
||||||
|
creation_time timestamp default CURRENT_TIMESTAMP,
|
||||||
|
UNIQUE (digest)
|
||||||
|
);
|
||||||
|
|
||||||
|
/* add the table for project and blob */
|
||||||
|
CREATE TABLE project_blob (
|
||||||
|
id SERIAL PRIMARY KEY NOT NULL,
|
||||||
|
project_id int NOT NULL,
|
||||||
|
blob_id int NOT NULL,
|
||||||
|
creation_time timestamp default CURRENT_TIMESTAMP,
|
||||||
|
CONSTRAINT unique_project_blob UNIQUE (project_id, blob_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE artifact
|
||||||
|
(
|
||||||
|
id SERIAL PRIMARY KEY NOT NULL,
|
||||||
|
project_id int NOT NULL,
|
||||||
|
repo varchar(255) NOT NULL,
|
||||||
|
tag varchar(255) NOT NULL,
|
||||||
|
/*
|
||||||
|
digest of manifest
|
||||||
|
*/
|
||||||
|
digest varchar(255) NOT NULL,
|
||||||
|
/*
|
||||||
|
kind of artifact, image, chart, etc..
|
||||||
|
*/
|
||||||
|
kind varchar(255) NOT NULL,
|
||||||
|
creation_time timestamp default CURRENT_TIMESTAMP,
|
||||||
|
pull_time timestamp,
|
||||||
|
push_time timestamp,
|
||||||
|
CONSTRAINT unique_artifact UNIQUE (project_id, repo, tag)
|
||||||
|
);
|
||||||
|
|
||||||
|
/* add the table for relation of artifact and blob */
|
||||||
|
CREATE TABLE artifact_blob
|
||||||
|
(
|
||||||
|
id SERIAL PRIMARY KEY NOT NULL,
|
||||||
|
digest_af varchar(255) NOT NULL,
|
||||||
|
digest_blob varchar(255) NOT NULL,
|
||||||
|
creation_time timestamp default CURRENT_TIMESTAMP,
|
||||||
|
CONSTRAINT unique_artifact_blob UNIQUE (digest_af, digest_blob)
|
||||||
|
);
|
||||||
|
|
||||||
|
/* add quota table */
|
||||||
|
CREATE TABLE quota
|
||||||
|
(
|
||||||
|
id SERIAL PRIMARY KEY NOT NULL,
|
||||||
|
reference VARCHAR(255) NOT NULL,
|
||||||
|
reference_id VARCHAR(255) NOT NULL,
|
||||||
|
hard JSONB NOT NULL,
|
||||||
|
creation_time timestamp default CURRENT_TIMESTAMP,
|
||||||
|
update_time timestamp default CURRENT_TIMESTAMP,
|
||||||
|
UNIQUE (reference, reference_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
/* add quota usage table */
|
||||||
|
CREATE TABLE quota_usage
|
||||||
|
(
|
||||||
|
id SERIAL PRIMARY KEY NOT NULL,
|
||||||
|
reference VARCHAR(255) NOT NULL,
|
||||||
|
reference_id VARCHAR(255) NOT NULL,
|
||||||
|
used JSONB NOT NULL,
|
||||||
|
creation_time timestamp default CURRENT_TIMESTAMP,
|
||||||
|
update_time timestamp default CURRENT_TIMESTAMP,
|
||||||
|
UNIQUE (reference, reference_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
/* only set quota and usage for 'library', and let the sync quota handling others. */
|
||||||
|
INSERT INTO quota (reference, reference_id, hard, creation_time, update_time)
|
||||||
|
SELECT 'project',
|
||||||
|
CAST(project_id AS VARCHAR),
|
||||||
|
'{"count": -1, "storage": -1}',
|
||||||
|
NOW(),
|
||||||
|
NOW()
|
||||||
|
FROM project
|
||||||
|
WHERE name = 'library' and deleted = 'f';
|
||||||
|
|
||||||
|
INSERT INTO quota_usage (id, reference, reference_id, used, creation_time, update_time)
|
||||||
|
SELECT id,
|
||||||
|
reference,
|
||||||
|
reference_id,
|
||||||
|
'{"count": 0, "storage": 0}',
|
||||||
|
creation_time,
|
||||||
|
update_time
|
||||||
|
FROM quota;
|
||||||
|
|
||||||
|
create table retention_policy
|
||||||
|
(
|
||||||
|
id serial PRIMARY KEY NOT NULL,
|
||||||
|
scope_level varchar(20),
|
||||||
|
scope_reference integer,
|
||||||
|
trigger_kind varchar(20),
|
||||||
|
data text,
|
||||||
|
create_time time,
|
||||||
|
update_time time
|
||||||
|
);
|
||||||
|
|
||||||
|
create table retention_execution
|
||||||
|
(
|
||||||
|
id serial PRIMARY KEY NOT NULL,
|
||||||
|
policy_id integer,
|
||||||
|
dry_run boolean,
|
||||||
|
trigger varchar(20),
|
||||||
|
start_time timestamp
|
||||||
|
);
|
||||||
|
|
||||||
|
create table retention_task
|
||||||
|
(
|
||||||
|
id SERIAL NOT NULL,
|
||||||
|
execution_id integer,
|
||||||
|
repository varchar(255),
|
||||||
|
job_id varchar(64),
|
||||||
|
status varchar(32),
|
||||||
|
status_code integer,
|
||||||
|
start_time timestamp default CURRENT_TIMESTAMP,
|
||||||
|
end_time timestamp default CURRENT_TIMESTAMP,
|
||||||
|
total integer,
|
||||||
|
retained integer,
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
create table schedule
|
||||||
|
(
|
||||||
|
id SERIAL NOT NULL,
|
||||||
|
job_id varchar(64),
|
||||||
|
status varchar(64),
|
||||||
|
creation_time timestamp default CURRENT_TIMESTAMP,
|
||||||
|
update_time timestamp default CURRENT_TIMESTAMP,
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
/*add notification policy table*/
|
||||||
|
create table notification_policy (
|
||||||
|
id SERIAL NOT NULL,
|
||||||
|
name varchar(256),
|
||||||
|
project_id int NOT NULL,
|
||||||
|
enabled boolean NOT NULL DEFAULT true,
|
||||||
|
description text,
|
||||||
|
targets text,
|
||||||
|
event_types text,
|
||||||
|
creator varchar(256),
|
||||||
|
creation_time timestamp default CURRENT_TIMESTAMP,
|
||||||
|
update_time timestamp default CURRENT_TIMESTAMP,
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
CONSTRAINT unique_project_id UNIQUE (project_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
/*add notification job table*/
|
||||||
|
CREATE TABLE notification_job (
|
||||||
|
id SERIAL NOT NULL,
|
||||||
|
policy_id int NOT NULL,
|
||||||
|
status varchar(32),
|
||||||
|
/* event_type is the type of trigger event, eg. pushImage, pullImage, uploadChart... */
|
||||||
|
event_type varchar(256),
|
||||||
|
/* notify_type is the type to notify event to user, eg. HTTP, Email... */
|
||||||
|
notify_type varchar(256),
|
||||||
|
job_detail text,
|
||||||
|
job_uuid varchar(64),
|
||||||
|
creation_time timestamp default CURRENT_TIMESTAMP,
|
||||||
|
update_time timestamp default CURRENT_TIMESTAMP,
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
@ -4,7 +4,7 @@ set +e
|
|||||||
|
|
||||||
usage(){
|
usage(){
|
||||||
echo "Usage: builder <golang image:version> <code path> <code release tag> <main.go path> <binary name>"
|
echo "Usage: builder <golang image:version> <code path> <code release tag> <main.go path> <binary name>"
|
||||||
echo "e.g: builder golang:1.11.2 github.com/helm/chartmuseum v0.8.1 cmd/chartmuseum chartm"
|
echo "e.g: builder golang:1.11.2 github.com/helm/chartmuseum v0.9.0 cmd/chartmuseum chartm"
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -13,7 +13,7 @@ if [ $# != 5 ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
GOLANG_IMAGE="$1"
|
GOLANG_IMAGE="$1"
|
||||||
CODE_PATH="$2"
|
GIT_PATH="$2"
|
||||||
CODE_VERSION="$3"
|
CODE_VERSION="$3"
|
||||||
MAIN_GO_PATH="$4"
|
MAIN_GO_PATH="$4"
|
||||||
BIN_NAME="$5"
|
BIN_NAME="$5"
|
||||||
@ -27,7 +27,7 @@ mkdir -p binary
|
|||||||
rm -rf binary/$BIN_NAME || true
|
rm -rf binary/$BIN_NAME || true
|
||||||
cp compile.sh binary/
|
cp compile.sh binary/
|
||||||
|
|
||||||
docker run -it -v $cur/binary:/go/bin --name golang_code_builder $GOLANG_IMAGE /bin/bash /go/bin/compile.sh $CODE_PATH $CODE_VERSION $MAIN_GO_PATH $BIN_NAME
|
docker run -it --rm -v $cur/binary:/go/bin --name golang_code_builder $GOLANG_IMAGE /bin/bash /go/bin/compile.sh $GIT_PATH $CODE_VERSION $MAIN_GO_PATH $BIN_NAME
|
||||||
|
|
||||||
#Clear
|
#Clear
|
||||||
docker rm -f golang_code_builder
|
docker rm -f golang_code_builder
|
||||||
|
@ -11,24 +11,21 @@ if [ $# != 4 ]; then
|
|||||||
usage
|
usage
|
||||||
fi
|
fi
|
||||||
|
|
||||||
CODE_PATH="$1"
|
GIT_PATH="$1"
|
||||||
VERSION="$2"
|
VERSION="$2"
|
||||||
MAIN_GO_PATH="$3"
|
MAIN_GO_PATH="$3"
|
||||||
BIN_NAME="$4"
|
BIN_NAME="$4"
|
||||||
|
|
||||||
#Get the source code of chartmusem
|
#Get the source code
|
||||||
go get $CODE_PATH
|
git clone $GIT_PATH src_code
|
||||||
|
ls
|
||||||
|
SRC_PATH=$(pwd)/src_code
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
#Checkout the released tag branch
|
#Checkout the released tag branch
|
||||||
cd /go/src/$CODE_PATH
|
cd $SRC_PATH
|
||||||
git checkout tags/$VERSION -b $VERSION
|
git checkout tags/$VERSION -b $VERSION
|
||||||
|
|
||||||
#Install the go dep tool to restore the package dependencies
|
|
||||||
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
|
|
||||||
dep ensure
|
|
||||||
|
|
||||||
#Compile
|
#Compile
|
||||||
cd /go/src/$CODE_PATH/$MAIN_GO_PATH && go build -a -o $BIN_NAME
|
cd $SRC_PATH/$MAIN_GO_PATH && go build -a -o $BIN_NAME
|
||||||
mv $BIN_NAME /go/bin/
|
mv $BIN_NAME /go/bin/
|
||||||
|
@ -6,11 +6,11 @@ RUN tdnf install sudo -y >> /dev/null\
|
|||||||
&& mkdir /harbor/
|
&& mkdir /harbor/
|
||||||
|
|
||||||
HEALTHCHECK CMD curl --fail -s http://127.0.0.1:8080/api/ping || exit 1
|
HEALTHCHECK CMD curl --fail -s http://127.0.0.1:8080/api/ping || exit 1
|
||||||
COPY ./make/photon/core/harbor_core ./make/photon/core/start.sh ./UIVERSION /harbor/
|
COPY ./make/photon/core/harbor_core ./UIVERSION /harbor/
|
||||||
COPY ./src/core/views /harbor/views
|
COPY ./src/core/views /harbor/views
|
||||||
COPY ./make/migrations /harbor/migrations
|
COPY ./make/migrations /harbor/migrations
|
||||||
|
|
||||||
RUN chmod u+x /harbor/start.sh /harbor/harbor_core
|
RUN chmod u+x /harbor/harbor_core
|
||||||
WORKDIR /harbor/
|
WORKDIR /harbor/
|
||||||
|
USER harbor
|
||||||
ENTRYPOINT ["/harbor/start.sh"]
|
ENTRYPOINT ["/harbor/harbor_core"]
|
||||||
|
@ -1,3 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
sudo -E -u \#10000 "/harbor/harbor_core"
|
|
||||||
|
|
@ -18,15 +18,16 @@ RUN tdnf erase -y toybox && tdnf install -y util-linux net-tools
|
|||||||
|
|
||||||
VOLUME /var/lib/postgresql/data
|
VOLUME /var/lib/postgresql/data
|
||||||
|
|
||||||
ADD ./make/photon/db/docker-entrypoint.sh /entrypoint.sh
|
COPY ./make/photon/db/docker-entrypoint.sh /docker-entrypoint.sh
|
||||||
ADD ./make/photon/db/docker-healthcheck.sh /docker-healthcheck.sh
|
COPY ./make/photon/db/docker-healthcheck.sh /docker-healthcheck.sh
|
||||||
RUN chmod u+x /entrypoint.sh /docker-healthcheck.sh
|
|
||||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
||||||
HEALTHCHECK CMD ["/docker-healthcheck.sh"]
|
|
||||||
|
|
||||||
COPY ./make/photon/db/initial-notaryserver.sql /docker-entrypoint-initdb.d/
|
COPY ./make/photon/db/initial-notaryserver.sql /docker-entrypoint-initdb.d/
|
||||||
COPY ./make/photon/db/initial-notarysigner.sql /docker-entrypoint-initdb.d/
|
COPY ./make/photon/db/initial-notarysigner.sql /docker-entrypoint-initdb.d/
|
||||||
COPY ./make/photon/db/initial-registry.sql /docker-entrypoint-initdb.d/
|
COPY ./make/photon/db/initial-registry.sql /docker-entrypoint-initdb.d/
|
||||||
|
RUN chown -R postgres:postgres /docker-entrypoint.sh /docker-healthcheck.sh /docker-entrypoint-initdb.d \
|
||||||
|
&& chmod u+x /docker-entrypoint.sh /docker-healthcheck.sh
|
||||||
|
|
||||||
|
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||||
|
HEALTHCHECK CMD ["/docker-healthcheck.sh"]
|
||||||
|
|
||||||
EXPOSE 5432
|
EXPOSE 5432
|
||||||
CMD ["postgres"]
|
USER postgres
|
||||||
|
@ -23,95 +23,88 @@ file_env() {
|
|||||||
unset "$fileVar"
|
unset "$fileVar"
|
||||||
}
|
}
|
||||||
|
|
||||||
if [ "${1:0:1}" = '-' ]; then
|
# look specifically for PG_VERSION, as it is expected in the DB dir
|
||||||
set -- postgres "$@"
|
if [ ! -s "$PGDATA/PG_VERSION" ]; then
|
||||||
fi
|
file_env 'POSTGRES_INITDB_ARGS'
|
||||||
|
if [ "$POSTGRES_INITDB_XLOGDIR" ]; then
|
||||||
if [ "$1" = 'postgres' ]; then
|
export POSTGRES_INITDB_ARGS="$POSTGRES_INITDB_ARGS --xlogdir $POSTGRES_INITDB_XLOGDIR"
|
||||||
chown -R postgres:postgres $PGDATA
|
fi
|
||||||
# look specifically for PG_VERSION, as it is expected in the DB dir
|
initdb -D $PGDATA -U postgres -E UTF-8 --lc-collate=en_US.UTF-8 --lc-ctype=en_US.UTF-8 $POSTGRES_INITDB_ARGS
|
||||||
if [ ! -s "$PGDATA/PG_VERSION" ]; then
|
# check password first so we can output the warning before postgres
|
||||||
file_env 'POSTGRES_INITDB_ARGS'
|
# messes it up
|
||||||
if [ "$POSTGRES_INITDB_XLOGDIR" ]; then
|
file_env 'POSTGRES_PASSWORD'
|
||||||
export POSTGRES_INITDB_ARGS="$POSTGRES_INITDB_ARGS --xlogdir $POSTGRES_INITDB_XLOGDIR"
|
if [ "$POSTGRES_PASSWORD" ]; then
|
||||||
fi
|
pass="PASSWORD '$POSTGRES_PASSWORD'"
|
||||||
su - $1 -c "initdb -D $PGDATA -U postgres -E UTF-8 --lc-collate=en_US.UTF-8 --lc-ctype=en_US.UTF-8 $POSTGRES_INITDB_ARGS"
|
authMethod=md5
|
||||||
# check password first so we can output the warning before postgres
|
else
|
||||||
# messes it up
|
# The - option suppresses leading tabs but *not* spaces. :)
|
||||||
file_env 'POSTGRES_PASSWORD'
|
cat >&2 <<-EOF
|
||||||
if [ "$POSTGRES_PASSWORD" ]; then
|
****************************************************
|
||||||
pass="PASSWORD '$POSTGRES_PASSWORD'"
|
WARNING: No password has been set for the database.
|
||||||
authMethod=md5
|
This will allow anyone with access to the
|
||||||
else
|
Postgres port to access your database. In
|
||||||
# The - option suppresses leading tabs but *not* spaces. :)
|
Docker's default configuration, this is
|
||||||
cat >&2 <<-EOF
|
effectively any other container on the same
|
||||||
****************************************************
|
system.
|
||||||
WARNING: No password has been set for the database.
|
Use "-e POSTGRES_PASSWORD=password" to set
|
||||||
This will allow anyone with access to the
|
it in "docker run".
|
||||||
Postgres port to access your database. In
|
****************************************************
|
||||||
Docker's default configuration, this is
|
|
||||||
effectively any other container on the same
|
|
||||||
system.
|
|
||||||
Use "-e POSTGRES_PASSWORD=password" to set
|
|
||||||
it in "docker run".
|
|
||||||
****************************************************
|
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
pass=
|
pass=
|
||||||
authMethod=trust
|
authMethod=trust
|
||||||
fi
|
fi
|
||||||
|
|
||||||
{
|
{
|
||||||
echo
|
echo
|
||||||
echo "host all all all $authMethod"
|
echo "host all all all $authMethod"
|
||||||
} >> "$PGDATA/pg_hba.conf"
|
} >> "$PGDATA/pg_hba.conf"
|
||||||
su postgres
|
echo `whoami`
|
||||||
echo `whoami`
|
# internal start of server in order to allow set-up using psql-client
|
||||||
# internal start of server in order to allow set-up using psql-client
|
# does not listen on external TCP/IP and waits until start finishes
|
||||||
# does not listen on external TCP/IP and waits until start finishes
|
pg_ctl -D "$PGDATA" -o "-c listen_addresses=''" -w start
|
||||||
su - $1 -c "pg_ctl -D \"$PGDATA\" -o \"-c listen_addresses='localhost'\" -w start"
|
|
||||||
|
|
||||||
file_env 'POSTGRES_USER' 'postgres'
|
file_env 'POSTGRES_USER' 'postgres'
|
||||||
file_env 'POSTGRES_DB' "$POSTGRES_USER"
|
file_env 'POSTGRES_DB' "$POSTGRES_USER"
|
||||||
|
|
||||||
psql=( psql -v ON_ERROR_STOP=1 )
|
psql=( psql -v ON_ERROR_STOP=1 )
|
||||||
|
|
||||||
if [ "$POSTGRES_DB" != 'postgres' ]; then
|
if [ "$POSTGRES_DB" != 'postgres' ]; then
|
||||||
"${psql[@]}" --username postgres <<-EOSQL
|
|
||||||
CREATE DATABASE "$POSTGRES_DB" ;
|
|
||||||
EOSQL
|
|
||||||
echo
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$POSTGRES_USER" = 'postgres' ]; then
|
|
||||||
op='ALTER'
|
|
||||||
else
|
|
||||||
op='CREATE'
|
|
||||||
fi
|
|
||||||
"${psql[@]}" --username postgres <<-EOSQL
|
"${psql[@]}" --username postgres <<-EOSQL
|
||||||
$op USER "$POSTGRES_USER" WITH SUPERUSER $pass ;
|
CREATE DATABASE "$POSTGRES_DB" ;
|
||||||
EOSQL
|
EOSQL
|
||||||
echo
|
echo
|
||||||
|
|
||||||
psql+=( --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" )
|
|
||||||
|
|
||||||
echo
|
|
||||||
for f in /docker-entrypoint-initdb.d/*; do
|
|
||||||
case "$f" in
|
|
||||||
*.sh) echo "$0: running $f"; . "$f" ;;
|
|
||||||
*.sql) echo "$0: running $f"; "${psql[@]}" -f "$f"; echo ;;
|
|
||||||
*.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${psql[@]}"; echo ;;
|
|
||||||
*) echo "$0: ignoring $f" ;;
|
|
||||||
esac
|
|
||||||
echo
|
|
||||||
done
|
|
||||||
|
|
||||||
PGUSER="${PGUSER:-postgres}" \
|
|
||||||
su - $1 -c "pg_ctl -D \"$PGDATA\" -m fast -w stop"
|
|
||||||
|
|
||||||
echo
|
|
||||||
echo 'PostgreSQL init process complete; ready for start up.'
|
|
||||||
echo
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ "$POSTGRES_USER" = 'postgres' ]; then
|
||||||
|
op='ALTER'
|
||||||
|
else
|
||||||
|
op='CREATE'
|
||||||
|
fi
|
||||||
|
"${psql[@]}" --username postgres <<-EOSQL
|
||||||
|
$op USER "$POSTGRES_USER" WITH SUPERUSER $pass ;
|
||||||
|
EOSQL
|
||||||
|
echo
|
||||||
|
|
||||||
|
psql+=( --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" )
|
||||||
|
|
||||||
|
echo
|
||||||
|
for f in /docker-entrypoint-initdb.d/*; do
|
||||||
|
case "$f" in
|
||||||
|
*.sh) echo "$0: running $f"; . "$f" ;;
|
||||||
|
*.sql) echo "$0: running $f"; "${psql[@]}" -f "$f"; echo ;;
|
||||||
|
*.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${psql[@]}"; echo ;;
|
||||||
|
*) echo "$0: ignoring $f" ;;
|
||||||
|
esac
|
||||||
|
echo
|
||||||
|
done
|
||||||
|
|
||||||
|
PGUSER="${PGUSER:-postgres}" \
|
||||||
|
pg_ctl -D "$PGDATA" -m fast -w stop
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo 'PostgreSQL init process complete; ready for start up.'
|
||||||
|
echo
|
||||||
fi
|
fi
|
||||||
exec su - $1 -c "$@ -D $PGDATA"
|
|
||||||
|
postgres -D $PGDATA
|
||||||
|
@ -1,12 +1,19 @@
|
|||||||
FROM photon:2.0
|
FROM photon:2.0
|
||||||
|
|
||||||
RUN mkdir /harbor/ \
|
RUN tdnf install sudo -y >> /dev/null\
|
||||||
&& tdnf install sudo -y >> /dev/null\
|
|
||||||
&& tdnf clean all \
|
&& tdnf clean all \
|
||||||
&& groupadd -r -g 10000 harbor && useradd --no-log-init -r -g 10000 -u 10000 harbor
|
&& groupadd -r -g 10000 harbor && useradd --no-log-init -r -g 10000 -u 10000 harbor
|
||||||
|
|
||||||
COPY ./make/photon/jobservice/start.sh ./make/photon/jobservice/harbor_jobservice /harbor/
|
COPY ./make/photon/jobservice/harbor_jobservice /harbor/
|
||||||
|
|
||||||
|
RUN chmod u+x /harbor/harbor_jobservice
|
||||||
|
|
||||||
RUN chmod u+x /harbor/harbor_jobservice /harbor/start.sh
|
|
||||||
WORKDIR /harbor/
|
WORKDIR /harbor/
|
||||||
ENTRYPOINT ["/harbor/start.sh"]
|
|
||||||
|
USER harbor
|
||||||
|
|
||||||
|
VOLUME ["/var/log/jobs/"]
|
||||||
|
|
||||||
|
HEALTHCHECK CMD curl --fail -s http://127.0.0.1:8080/api/v1/stats || exit 1
|
||||||
|
|
||||||
|
ENTRYPOINT ["/harbor/harbor_jobservice", "-c", "/etc/jobservice/config.yml"]
|
||||||
|
@ -1,6 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
if [ -d /var/log/jobs ]; then
|
|
||||||
chown -R 10000:10000 /var/log/jobs/
|
|
||||||
fi
|
|
||||||
sudo -E -u \#10000 "/harbor/harbor_jobservice" "-c" "/etc/jobservice/config.yml"
|
|
||||||
|
|
@ -1,8 +1,5 @@
|
|||||||
# Rsyslog configuration file for docker.
|
# Rsyslog configuration file for docker.
|
||||||
|
template(name="DynaFile" type="string" string="/var/log/docker/%programname%.log")
|
||||||
template(name="DynaFile" type="string"
|
if $programname != "rsyslogd" then {
|
||||||
string="/var/log/docker/%syslogtag:R,ERE,0,DFLT:[^[]*--end:secpath-replace%.log"
|
action(type="omfile" dynaFile="DynaFile")
|
||||||
)
|
}
|
||||||
#if $programname == "docker" then ?DynaFile
|
|
||||||
if $programname != "rsyslogd" then -?DynaFile
|
|
||||||
|
|
||||||
|
@ -1,14 +1,19 @@
|
|||||||
FROM photon:2.0
|
FROM photon:2.0
|
||||||
|
|
||||||
RUN tdnf install -y nginx >> /dev/null\
|
RUN tdnf install sudo nginx -y >> /dev/null\
|
||||||
|
&& tdnf clean all \
|
||||||
|
&& groupadd -r -g 10000 nginx && useradd --no-log-init -r -g 10000 -u 10000 nginx \
|
||||||
&& ln -sf /dev/stdout /var/log/nginx/access.log \
|
&& ln -sf /dev/stdout /var/log/nginx/access.log \
|
||||||
&& ln -sf /dev/stderr /var/log/nginx/error.log \
|
&& ln -sf /dev/stderr /var/log/nginx/error.log
|
||||||
&& tdnf clean all
|
|
||||||
|
|
||||||
EXPOSE 80
|
|
||||||
VOLUME /var/cache/nginx /var/log/nginx /run
|
VOLUME /var/cache/nginx /var/log/nginx /run
|
||||||
|
|
||||||
|
EXPOSE 8080
|
||||||
|
|
||||||
STOPSIGNAL SIGQUIT
|
STOPSIGNAL SIGQUIT
|
||||||
|
|
||||||
HEALTHCHECK CMD curl --fail -s http://127.0.0.1 || exit 1
|
HEALTHCHECK CMD curl --fail -s http://127.0.0.1:8080 || exit 1
|
||||||
|
|
||||||
|
USER nginx
|
||||||
|
|
||||||
CMD ["nginx", "-g", "daemon off;"]
|
CMD ["nginx", "-g", "daemon off;"]
|
||||||
|
@ -1,2 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
sudo -E -u \#10000 sh -c "migrate-patch -database='${DB_URL}' && /migrations/migrate.sh && /bin/notary-server -config=/etc/notary/server-config.postgres.json -logf=logfmt"
|
|
@ -4,12 +4,12 @@ RUN tdnf install -y shadow sudo \
|
|||||||
&& tdnf clean all \
|
&& tdnf clean all \
|
||||||
&& groupadd -r -g 10000 notary \
|
&& groupadd -r -g 10000 notary \
|
||||||
&& useradd --no-log-init -r -g 10000 -u 10000 notary
|
&& useradd --no-log-init -r -g 10000 -u 10000 notary
|
||||||
|
|
||||||
COPY ./make/photon/notary/migrate-patch /bin/migrate-patch
|
COPY ./make/photon/notary/migrate-patch /bin/migrate-patch
|
||||||
COPY ./make/photon/notary/binary/notary-server /bin/notary-server
|
COPY ./make/photon/notary/binary/notary-server /bin/notary-server
|
||||||
COPY ./make/photon/notary/binary/migrate /bin/migrate
|
COPY ./make/photon/notary/binary/migrate /bin/migrate
|
||||||
COPY ./make/photon/notary/binary/migrations/ /migrations/
|
COPY ./make/photon/notary/binary/migrations/ /migrations/
|
||||||
COPY ./make/photon/notary/server-start.sh /bin/server-start.sh
|
|
||||||
RUN chmod +x /bin/notary-server /migrations/migrate.sh /bin/migrate /bin/migrate-patch /bin/server-start.sh
|
RUN chmod +x /bin/notary-server /migrations/migrate.sh /bin/migrate /bin/migrate-patch
|
||||||
ENV SERVICE_NAME=notary_server
|
ENV SERVICE_NAME=notary_server
|
||||||
ENTRYPOINT [ "/bin/server-start.sh" ]
|
USER notary
|
||||||
|
CMD migrate-patch -database=${DB_URL} && /migrations/migrate.sh && /bin/notary-server -config=/etc/notary/server-config.postgres.json -logf=logfmt
|
@ -1,2 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
sudo -E -u \#10000 sh -c "migrate-patch -database='${DB_URL}' && /migrations/migrate.sh && /bin/notary-signer -config=/etc/notary/signer-config.postgres.json -logf=logfmt"
|
|
@ -8,8 +8,8 @@ COPY ./make/photon/notary/migrate-patch /bin/migrate-patch
|
|||||||
COPY ./make/photon/notary/binary/notary-signer /bin/notary-signer
|
COPY ./make/photon/notary/binary/notary-signer /bin/notary-signer
|
||||||
COPY ./make/photon/notary/binary/migrate /bin/migrate
|
COPY ./make/photon/notary/binary/migrate /bin/migrate
|
||||||
COPY ./make/photon/notary/binary/migrations/ /migrations/
|
COPY ./make/photon/notary/binary/migrations/ /migrations/
|
||||||
COPY ./make/photon/notary/signer-start.sh /bin/signer-start.sh
|
|
||||||
|
|
||||||
RUN chmod +x /bin/notary-signer /migrations/migrate.sh /bin/migrate /bin/migrate-patch /bin/signer-start.sh
|
RUN chmod +x /bin/notary-signer /migrations/migrate.sh /bin/migrate /bin/migrate-patch
|
||||||
ENV SERVICE_NAME=notary_signer
|
ENV SERVICE_NAME=notary_signer
|
||||||
ENTRYPOINT [ "/bin/signer-start.sh" ]
|
USER notary
|
||||||
|
CMD migrate-patch -database=${DB_URL} && /migrations/migrate.sh && /bin/notary-signer -config=/etc/notary/signer-config.postgres.json -logf=logfmt
|
@ -1,39 +1,44 @@
|
|||||||
FROM node:10.15.0 as nodeportal
|
FROM node:10.15.0 as nodeportal
|
||||||
|
|
||||||
RUN mkdir -p /portal_src
|
|
||||||
RUN mkdir -p /build_dir
|
|
||||||
|
|
||||||
COPY make/photon/portal/entrypoint.sh /
|
|
||||||
COPY src/portal /portal_src
|
COPY src/portal /portal_src
|
||||||
COPY ./docs/swagger.yaml /portal_src
|
COPY ./docs/swagger.yaml /portal_src
|
||||||
|
COPY ./LICENSE /portal_src
|
||||||
|
|
||||||
WORKDIR /portal_src
|
WORKDIR /build_dir
|
||||||
|
|
||||||
RUN npm install && \
|
RUN cp -r /portal_src/* /build_dir \
|
||||||
chmod u+x /entrypoint.sh
|
&& ls -la \
|
||||||
RUN /entrypoint.sh
|
&& apt-get update \
|
||||||
VOLUME ["/portal_src"]
|
&& apt-get install -y --no-install-recommends python-yaml=3.12-1 \
|
||||||
|
&& python -c 'import sys, yaml, json; y=yaml.load(sys.stdin.read()); print json.dumps(y)' < swagger.yaml > swagger.json \
|
||||||
|
&& npm install \
|
||||||
|
&& npm run build_lib \
|
||||||
|
&& npm run link_lib \
|
||||||
|
&& npm run release
|
||||||
|
|
||||||
|
|
||||||
FROM photon:2.0
|
FROM photon:2.0
|
||||||
|
|
||||||
RUN tdnf install -y nginx >> /dev/null \
|
|
||||||
&& ln -sf /dev/stdout /var/log/nginx/access.log \
|
|
||||||
&& ln -sf /dev/stderr /var/log/nginx/error.log \
|
|
||||||
&& tdnf clean all
|
|
||||||
|
|
||||||
EXPOSE 80
|
|
||||||
VOLUME /var/cache/nginx /var/log/nginx /run
|
|
||||||
|
|
||||||
|
|
||||||
COPY --from=nodeportal /build_dir/dist /usr/share/nginx/html
|
COPY --from=nodeportal /build_dir/dist /usr/share/nginx/html
|
||||||
COPY --from=nodeportal /build_dir/swagger.yaml /usr/share/nginx/html
|
COPY --from=nodeportal /build_dir/swagger.yaml /usr/share/nginx/html
|
||||||
COPY --from=nodeportal /build_dir/swagger.json /usr/share/nginx/html
|
COPY --from=nodeportal /build_dir/swagger.json /usr/share/nginx/html
|
||||||
|
COPY --from=nodeportal /build_dir/LICENSE /usr/share/nginx/html
|
||||||
|
|
||||||
COPY make/photon/portal/nginx.conf /etc/nginx/nginx.conf
|
COPY make/photon/portal/nginx.conf /etc/nginx/nginx.conf
|
||||||
|
|
||||||
|
RUN tdnf install -y nginx sudo >> /dev/null \
|
||||||
|
&& ln -sf /dev/stdout /var/log/nginx/access.log \
|
||||||
|
&& ln -sf /dev/stderr /var/log/nginx/error.log \
|
||||||
|
&& groupadd -r -g 10000 nginx && useradd --no-log-init -r -g 10000 -u 10000 nginx \
|
||||||
|
&& chown -R nginx:nginx /etc/nginx \
|
||||||
|
&& tdnf clean all
|
||||||
|
|
||||||
|
EXPOSE 8080
|
||||||
|
VOLUME /var/cache/nginx /var/log/nginx /run
|
||||||
|
|
||||||
STOPSIGNAL SIGQUIT
|
STOPSIGNAL SIGQUIT
|
||||||
|
|
||||||
HEALTHCHECK CMD curl --fail -s http://127.0.0.1 || exit 1
|
HEALTHCHECK CMD curl --fail -s http://127.0.0.1:8080 || exit 1
|
||||||
|
USER nginx
|
||||||
CMD ["nginx", "-g", "daemon off;"]
|
CMD ["nginx", "-g", "daemon off;"]
|
||||||
|
|
||||||
|
@ -1,21 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
cd /build_dir
|
|
||||||
cp -r /portal_src/* .
|
|
||||||
ls -la
|
|
||||||
|
|
||||||
# Update
|
|
||||||
apt-get update
|
|
||||||
apt-get install -y ruby
|
|
||||||
ruby -ryaml -rjson -e 'puts JSON.pretty_generate(YAML.load(ARGF))' swagger.yaml>swagger.json
|
|
||||||
|
|
||||||
cat ./package.json
|
|
||||||
npm install
|
|
||||||
|
|
||||||
## Build harbor-portal and link it
|
|
||||||
npm run build_lib
|
|
||||||
npm run link_lib
|
|
||||||
|
|
||||||
## Build production
|
|
||||||
npm run release
|
|
@ -1,13 +1,21 @@
|
|||||||
|
|
||||||
worker_processes 1;
|
worker_processes auto;
|
||||||
|
pid /tmp/nginx.pid;
|
||||||
|
|
||||||
events {
|
events {
|
||||||
worker_connections 1024;
|
worker_connections 1024;
|
||||||
}
|
}
|
||||||
|
|
||||||
http {
|
http {
|
||||||
|
|
||||||
|
client_body_temp_path /tmp/client_body_temp;
|
||||||
|
proxy_temp_path /tmp/proxy_temp;
|
||||||
|
fastcgi_temp_path /tmp/fastcgi_temp;
|
||||||
|
uwsgi_temp_path /tmp/uwsgi_temp;
|
||||||
|
scgi_temp_path /tmp/scgi_temp;
|
||||||
|
|
||||||
server {
|
server {
|
||||||
listen 80;
|
listen 8080;
|
||||||
server_name localhost;
|
server_name localhost;
|
||||||
|
|
||||||
root /usr/share/nginx/html;
|
root /usr/share/nginx/html;
|
||||||
|
@ -5,11 +5,19 @@ from pathlib import Path
|
|||||||
DEFAULT_UID = 10000
|
DEFAULT_UID = 10000
|
||||||
DEFAULT_GID = 10000
|
DEFAULT_GID = 10000
|
||||||
|
|
||||||
|
PG_UID = 999
|
||||||
|
PG_GID = 999
|
||||||
|
|
||||||
|
REDIS_UID = 999
|
||||||
|
REDIS_GID = 999
|
||||||
|
|
||||||
## Global variable
|
## Global variable
|
||||||
|
host_root_dir = '/hostfs'
|
||||||
|
|
||||||
base_dir = '/harbor_make'
|
base_dir = '/harbor_make'
|
||||||
templates_dir = "/usr/src/app/templates"
|
templates_dir = "/usr/src/app/templates"
|
||||||
config_dir = '/config'
|
config_dir = '/config'
|
||||||
|
data_dir = '/data'
|
||||||
secret_dir = '/secret'
|
secret_dir = '/secret'
|
||||||
secret_key_dir='/secret/keys'
|
secret_key_dir='/secret/keys'
|
||||||
|
|
||||||
|
@ -16,6 +16,7 @@ from utils.clair import prepare_clair
|
|||||||
from utils.chart import prepare_chartmuseum
|
from utils.chart import prepare_chartmuseum
|
||||||
from utils.docker_compose import prepare_docker_compose
|
from utils.docker_compose import prepare_docker_compose
|
||||||
from utils.nginx import prepare_nginx, nginx_confd_dir
|
from utils.nginx import prepare_nginx, nginx_confd_dir
|
||||||
|
from utils.redis import prepare_redis
|
||||||
from g import (config_dir, input_config_path, private_key_pem_path, root_crt_path, secret_key_dir,
|
from g import (config_dir, input_config_path, private_key_pem_path, root_crt_path, secret_key_dir,
|
||||||
old_private_key_pem_path, old_crt_path)
|
old_private_key_pem_path, old_crt_path)
|
||||||
|
|
||||||
@ -38,6 +39,7 @@ def main(conf, with_notary, with_clair, with_chartmuseum):
|
|||||||
prepare_registry_ctl(config_dict)
|
prepare_registry_ctl(config_dict)
|
||||||
prepare_db(config_dict)
|
prepare_db(config_dict)
|
||||||
prepare_job_service(config_dict)
|
prepare_job_service(config_dict)
|
||||||
|
prepare_redis(config_dict)
|
||||||
|
|
||||||
get_secret_key(secret_key_dir)
|
get_secret_key(secret_key_dir)
|
||||||
|
|
||||||
|
@ -1,3 +1,3 @@
|
|||||||
http_proxy={{clair_http_proxy}}
|
HTTP_PROXY={{clair_http_proxy}}
|
||||||
https_proxy={{clair_https_proxy}}
|
HTTPS_PROXY={{clair_https_proxy}}
|
||||||
no_proxy={{clair_no_proxy}}
|
NO_PROXY={{clair_no_proxy}}
|
||||||
|
@ -17,9 +17,3 @@ clair:
|
|||||||
timeout: 300s
|
timeout: 300s
|
||||||
updater:
|
updater:
|
||||||
interval: {{clair_updaters_interval}}h
|
interval: {{clair_updaters_interval}}h
|
||||||
|
|
||||||
notifier:
|
|
||||||
attempts: 3
|
|
||||||
renotifyinterval: 2h
|
|
||||||
http:
|
|
||||||
endpoint: http://core:8080/service/notifications/clair
|
|
||||||
|
@ -15,6 +15,8 @@ POSTGRESQL_USERNAME={{harbor_db_username}}
|
|||||||
POSTGRESQL_PASSWORD={{harbor_db_password}}
|
POSTGRESQL_PASSWORD={{harbor_db_password}}
|
||||||
POSTGRESQL_DATABASE={{harbor_db_name}}
|
POSTGRESQL_DATABASE={{harbor_db_name}}
|
||||||
POSTGRESQL_SSLMODE={{harbor_db_sslmode}}
|
POSTGRESQL_SSLMODE={{harbor_db_sslmode}}
|
||||||
|
POSTGRESQL_MAX_IDLE_CONNS={{harbor_db_max_idle_conns}}
|
||||||
|
POSTGRESQL_MAX_OPEN_CONNS={{harbor_db_max_open_conns}}
|
||||||
REGISTRY_URL={{registry_url}}
|
REGISTRY_URL={{registry_url}}
|
||||||
TOKEN_SERVICE_URL={{token_service_url}}
|
TOKEN_SERVICE_URL={{token_service_url}}
|
||||||
HARBOR_ADMIN_PASSWORD={{harbor_admin_password}}
|
HARBOR_ADMIN_PASSWORD={{harbor_admin_password}}
|
||||||
@ -31,6 +33,7 @@ CLAIR_DB_USERNAME={{clair_db_username}}
|
|||||||
CLAIR_DB={{clair_db_name}}
|
CLAIR_DB={{clair_db_name}}
|
||||||
CLAIR_DB_SSLMODE={{clair_db_sslmode}}
|
CLAIR_DB_SSLMODE={{clair_db_sslmode}}
|
||||||
CORE_URL={{core_url}}
|
CORE_URL={{core_url}}
|
||||||
|
CORE_LOCAL_URL={{core_local_url}}
|
||||||
JOBSERVICE_URL={{jobservice_url}}
|
JOBSERVICE_URL={{jobservice_url}}
|
||||||
CLAIR_URL={{clair_url}}
|
CLAIR_URL={{clair_url}}
|
||||||
NOTARY_URL={{notary_url}}
|
NOTARY_URL={{notary_url}}
|
||||||
@ -40,3 +43,7 @@ RELOAD_KEY={{reload_key}}
|
|||||||
CHART_REPOSITORY_URL={{chart_repository_url}}
|
CHART_REPOSITORY_URL={{chart_repository_url}}
|
||||||
REGISTRY_CONTROLLER_URL={{registry_controller_url}}
|
REGISTRY_CONTROLLER_URL={{registry_controller_url}}
|
||||||
WITH_CHARTMUSEUM={{with_chartmuseum}}
|
WITH_CHARTMUSEUM={{with_chartmuseum}}
|
||||||
|
|
||||||
|
HTTP_PROXY={{core_http_proxy}}
|
||||||
|
HTTPS_PROXY={{core_https_proxy}}
|
||||||
|
NO_PROXY={{core_no_proxy}}
|
||||||
|
@ -14,7 +14,8 @@ services:
|
|||||||
- SETUID
|
- SETUID
|
||||||
volumes:
|
volumes:
|
||||||
- {{log_location}}/:/var/log/docker/:z
|
- {{log_location}}/:/var/log/docker/:z
|
||||||
- ./common/config/log/:/etc/logrotate.d/:z
|
- ./common/config/log/logrotate.conf:/etc/logrotate.d/logrotate.conf:z
|
||||||
|
- ./common/config/log/rsyslog_docker.conf:/etc/rsyslog.d/rsyslog_docker.conf:z
|
||||||
ports:
|
ports:
|
||||||
- 127.0.0.1:1514:10514
|
- 127.0.0.1:1514:10514
|
||||||
networks:
|
networks:
|
||||||
@ -275,12 +276,7 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- ./common/config/nginx:/etc/nginx:z
|
- ./common/config/nginx:/etc/nginx:z
|
||||||
{% if protocol == 'https' %}
|
{% if protocol == 'https' %}
|
||||||
- type: bind
|
- {{data_volume}}/secret/cert:/etc/cert:z
|
||||||
source: {{cert_key_path}}
|
|
||||||
target: /etc/cert/server.key
|
|
||||||
- type: bind
|
|
||||||
source: {{cert_path}}
|
|
||||||
target: /etc/cert/server.crt
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
networks:
|
networks:
|
||||||
- harbor
|
- harbor
|
||||||
@ -289,9 +285,9 @@ services:
|
|||||||
{% endif %}
|
{% endif %}
|
||||||
dns_search: .
|
dns_search: .
|
||||||
ports:
|
ports:
|
||||||
- {{http_port}}:80
|
- {{http_port}}:8080
|
||||||
{% if protocol == 'https' %}
|
{% if protocol == 'https' %}
|
||||||
- {{https_port}}:443
|
- {{https_port}}:8443
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if with_notary %}
|
{% if with_notary %}
|
||||||
- 4443:4443
|
- 4443:4443
|
||||||
@ -419,7 +415,7 @@ services:
|
|||||||
{% if gcs_keyfile %}
|
{% if gcs_keyfile %}
|
||||||
- type: bind
|
- type: bind
|
||||||
source: {{gcs_keyfile}}
|
source: {{gcs_keyfile}}
|
||||||
target: /etc/registry/gcs.key
|
target: /etc/chartserver/gcs.key
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{%if registry_custom_ca_bundle_path %}
|
{%if registry_custom_ca_bundle_path %}
|
||||||
- type: bind
|
- type: bind
|
||||||
|
@ -1,3 +1,8 @@
|
|||||||
CORE_SECRET={{core_secret}}
|
CORE_SECRET={{core_secret}}
|
||||||
JOBSERVICE_SECRET={{jobservice_secret}}
|
JOBSERVICE_SECRET={{jobservice_secret}}
|
||||||
CORE_URL={{core_url}}
|
CORE_URL={{core_url}}
|
||||||
|
JOBSERVICE_WEBHOOK_JOB_MAX_RETRY={{notification_webhook_job_max_retry}}
|
||||||
|
|
||||||
|
HTTP_PROXY={{jobservice_http_proxy}}
|
||||||
|
HTTPS_PROXY={{jobservice_https_proxy}}
|
||||||
|
NO_PROXY={{jobservice_no_proxy}}
|
||||||
|
11
make/photon/prepare/templates/log/rsyslog_docker.conf.jinja
Normal file
11
make/photon/prepare/templates/log/rsyslog_docker.conf.jinja
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# Rsyslog configuration file for docker.
|
||||||
|
|
||||||
|
template(name="DynaFile" type="string" string="/var/log/docker/%programname%.log")
|
||||||
|
|
||||||
|
if $programname != "rsyslogd" then {
|
||||||
|
{%if log_external %}
|
||||||
|
action(type="omfwd" Target="{{log_ep_host}}" Port="{{log_ep_port}}" Protocol="{{log_ep_protocol}}" Template="RSYSLOG_SyslogProtocol23Format")
|
||||||
|
{% else %}
|
||||||
|
action(type="omfile" dynaFile="DynaFile")
|
||||||
|
{% endif %}
|
||||||
|
}
|
@ -1,4 +1,5 @@
|
|||||||
worker_processes auto;
|
worker_processes auto;
|
||||||
|
pid /tmp/nginx.pid;
|
||||||
|
|
||||||
events {
|
events {
|
||||||
worker_connections 1024;
|
worker_connections 1024;
|
||||||
@ -7,6 +8,11 @@ events {
|
|||||||
}
|
}
|
||||||
|
|
||||||
http {
|
http {
|
||||||
|
client_body_temp_path /tmp/client_body_temp;
|
||||||
|
proxy_temp_path /tmp/proxy_temp;
|
||||||
|
fastcgi_temp_path /tmp/fastcgi_temp;
|
||||||
|
uwsgi_temp_path /tmp/uwsgi_temp;
|
||||||
|
scgi_temp_path /tmp/scgi_temp;
|
||||||
tcp_nodelay on;
|
tcp_nodelay on;
|
||||||
|
|
||||||
# this is necessary for us to be able to disable request buffering in all cases
|
# this is necessary for us to be able to disable request buffering in all cases
|
||||||
@ -17,7 +23,7 @@ http {
|
|||||||
}
|
}
|
||||||
|
|
||||||
upstream portal {
|
upstream portal {
|
||||||
server portal:80;
|
server portal:8080;
|
||||||
}
|
}
|
||||||
|
|
||||||
log_format timed_combined '$remote_addr - '
|
log_format timed_combined '$remote_addr - '
|
||||||
@ -28,7 +34,7 @@ http {
|
|||||||
access_log /dev/stdout timed_combined;
|
access_log /dev/stdout timed_combined;
|
||||||
|
|
||||||
server {
|
server {
|
||||||
listen 80;
|
listen 8080;
|
||||||
server_tokens off;
|
server_tokens off;
|
||||||
# disable any limits to avoid HTTP 413 for large image uploads
|
# disable any limits to avoid HTTP 413 for large image uploads
|
||||||
client_max_body_size 0;
|
client_max_body_size 0;
|
||||||
@ -117,7 +123,7 @@ http {
|
|||||||
proxy_request_buffering off;
|
proxy_request_buffering off;
|
||||||
}
|
}
|
||||||
|
|
||||||
location /service/notifications {
|
location /service/notifications {
|
||||||
return 404;
|
return 404;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
worker_processes auto;
|
worker_processes auto;
|
||||||
|
pid /tmp/nginx.pid;
|
||||||
|
|
||||||
events {
|
events {
|
||||||
worker_connections 1024;
|
worker_connections 1024;
|
||||||
@ -7,6 +8,11 @@ events {
|
|||||||
}
|
}
|
||||||
|
|
||||||
http {
|
http {
|
||||||
|
client_body_temp_path /tmp/client_body_temp;
|
||||||
|
proxy_temp_path /tmp/proxy_temp;
|
||||||
|
fastcgi_temp_path /tmp/fastcgi_temp;
|
||||||
|
uwsgi_temp_path /tmp/uwsgi_temp;
|
||||||
|
scgi_temp_path /tmp/scgi_temp;
|
||||||
tcp_nodelay on;
|
tcp_nodelay on;
|
||||||
include /etc/nginx/conf.d/*.upstream.conf;
|
include /etc/nginx/conf.d/*.upstream.conf;
|
||||||
|
|
||||||
@ -18,7 +24,7 @@ http {
|
|||||||
}
|
}
|
||||||
|
|
||||||
upstream portal {
|
upstream portal {
|
||||||
server portal:80;
|
server portal:8080;
|
||||||
}
|
}
|
||||||
|
|
||||||
log_format timed_combined '$remote_addr - '
|
log_format timed_combined '$remote_addr - '
|
||||||
@ -31,7 +37,7 @@ http {
|
|||||||
include /etc/nginx/conf.d/*.server.conf;
|
include /etc/nginx/conf.d/*.server.conf;
|
||||||
|
|
||||||
server {
|
server {
|
||||||
listen 443 ssl;
|
listen 8443 ssl;
|
||||||
# server_name harbordomain.com;
|
# server_name harbordomain.com;
|
||||||
server_tokens off;
|
server_tokens off;
|
||||||
# SSL
|
# SSL
|
||||||
@ -136,13 +142,13 @@ http {
|
|||||||
proxy_buffering off;
|
proxy_buffering off;
|
||||||
proxy_request_buffering off;
|
proxy_request_buffering off;
|
||||||
}
|
}
|
||||||
|
|
||||||
location /service/notifications {
|
location /service/notifications {
|
||||||
return 404;
|
return 404;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
server {
|
server {
|
||||||
listen 80;
|
listen 8080;
|
||||||
#server_name harbordomain.com;
|
#server_name harbordomain.com;
|
||||||
return 308 https://$host$request_uri;
|
return 308 https://$host$request_uri;
|
||||||
}
|
}
|
||||||
|
@ -2,12 +2,12 @@ import os, shutil
|
|||||||
|
|
||||||
from g import templates_dir, config_dir, DEFAULT_UID, DEFAULT_GID
|
from g import templates_dir, config_dir, DEFAULT_UID, DEFAULT_GID
|
||||||
from .jinja import render_jinja
|
from .jinja import render_jinja
|
||||||
from .misc import prepare_config_dir
|
from .misc import prepare_dir
|
||||||
|
|
||||||
clair_template_dir = os.path.join(templates_dir, "clair")
|
clair_template_dir = os.path.join(templates_dir, "clair")
|
||||||
|
|
||||||
def prepare_clair(config_dict):
|
def prepare_clair(config_dict):
|
||||||
clair_config_dir = prepare_config_dir(config_dir, "clair")
|
clair_config_dir = prepare_dir(config_dir, "clair")
|
||||||
|
|
||||||
if os.path.exists(os.path.join(clair_config_dir, "postgresql-init.d")):
|
if os.path.exists(os.path.join(clair_config_dir, "postgresql-init.d")):
|
||||||
print("Copying offline data file for clair DB")
|
print("Copying offline data file for clair DB")
|
||||||
|
@ -13,6 +13,14 @@ def validate(conf, **kwargs):
|
|||||||
if not conf.get("cert_key_path"):
|
if not conf.get("cert_key_path"):
|
||||||
raise Exception("Error: The protocol is https but attribute ssl_cert_key is not set")
|
raise Exception("Error: The protocol is https but attribute ssl_cert_key is not set")
|
||||||
|
|
||||||
|
# log endpoint validate
|
||||||
|
if ('log_ep_host' in conf) and not conf['log_ep_host']:
|
||||||
|
raise Exception('Error: must set log endpoint host to enable external host')
|
||||||
|
if ('log_ep_port' in conf) and not conf['log_ep_port']:
|
||||||
|
raise Exception('Error: must set log endpoint port to enable external host')
|
||||||
|
if ('log_ep_protocol' in conf) and (conf['log_ep_protocol'] not in ['udp', 'tcp']):
|
||||||
|
raise Exception("Protocol in external log endpoint must be one of 'udp' or 'tcp' ")
|
||||||
|
|
||||||
# Storage validate
|
# Storage validate
|
||||||
valid_storage_drivers = ["filesystem", "azure", "gcs", "s3", "swift", "oss"]
|
valid_storage_drivers = ["filesystem", "azure", "gcs", "s3", "swift", "oss"]
|
||||||
storage_provider_name = conf.get("storage_provider_name")
|
storage_provider_name = conf.get("storage_provider_name")
|
||||||
@ -59,6 +67,7 @@ def parse_yaml_config(config_file_path):
|
|||||||
'registry_url': "http://registry:5000",
|
'registry_url': "http://registry:5000",
|
||||||
'registry_controller_url': "http://registryctl:8080",
|
'registry_controller_url': "http://registryctl:8080",
|
||||||
'core_url': "http://core:8080",
|
'core_url': "http://core:8080",
|
||||||
|
'core_local_url': "http://127.0.0.1:8080",
|
||||||
'token_service_url': "http://core:8080/service/token",
|
'token_service_url': "http://core:8080/service/token",
|
||||||
'jobservice_url': 'http://jobservice:8080',
|
'jobservice_url': 'http://jobservice:8080',
|
||||||
'clair_url': 'http://clair:6060',
|
'clair_url': 'http://clair:6060',
|
||||||
@ -103,6 +112,11 @@ def parse_yaml_config(config_file_path):
|
|||||||
config_dict['harbor_db_username'] = 'postgres'
|
config_dict['harbor_db_username'] = 'postgres'
|
||||||
config_dict['harbor_db_password'] = db_configs.get("password") or ''
|
config_dict['harbor_db_password'] = db_configs.get("password") or ''
|
||||||
config_dict['harbor_db_sslmode'] = 'disable'
|
config_dict['harbor_db_sslmode'] = 'disable'
|
||||||
|
|
||||||
|
default_max_idle_conns = 2 # NOTE: https://golang.org/pkg/database/sql/#DB.SetMaxIdleConns
|
||||||
|
default_max_open_conns = 0 # NOTE: https://golang.org/pkg/database/sql/#DB.SetMaxOpenConns
|
||||||
|
config_dict['harbor_db_max_idle_conns'] = db_configs.get("max_idle_conns") or default_max_idle_conns
|
||||||
|
config_dict['harbor_db_max_open_conns'] = db_configs.get("max_open_conns") or default_max_open_conns
|
||||||
# clari db
|
# clari db
|
||||||
config_dict['clair_db_host'] = 'postgresql'
|
config_dict['clair_db_host'] = 'postgresql'
|
||||||
config_dict['clair_db_port'] = 5432
|
config_dict['clair_db_port'] = 5432
|
||||||
@ -162,13 +176,18 @@ def parse_yaml_config(config_file_path):
|
|||||||
if storage_config.get('redirect'):
|
if storage_config.get('redirect'):
|
||||||
config_dict['storage_redirect_disabled'] = storage_config['redirect']['disabled']
|
config_dict['storage_redirect_disabled'] = storage_config['redirect']['disabled']
|
||||||
|
|
||||||
|
# Global proxy configs
|
||||||
|
proxy_config = configs.get('proxy') or {}
|
||||||
|
proxy_components = proxy_config.get('components') or []
|
||||||
|
for proxy_component in proxy_components:
|
||||||
|
config_dict[proxy_component + '_http_proxy'] = proxy_config.get('http_proxy') or ''
|
||||||
|
config_dict[proxy_component + '_https_proxy'] = proxy_config.get('https_proxy') or ''
|
||||||
|
config_dict[proxy_component + '_no_proxy'] = proxy_config.get('no_proxy') or '127.0.0.1,localhost,core,registry'
|
||||||
|
|
||||||
# Clair configs, optional
|
# Clair configs, optional
|
||||||
clair_configs = configs.get("clair") or {}
|
clair_configs = configs.get("clair") or {}
|
||||||
config_dict['clair_db'] = 'postgres'
|
config_dict['clair_db'] = 'postgres'
|
||||||
config_dict['clair_updaters_interval'] = clair_configs.get("updaters_interval") or 12
|
config_dict['clair_updaters_interval'] = clair_configs.get("updaters_interval") or 12
|
||||||
config_dict['clair_http_proxy'] = clair_configs.get('http_proxy') or ''
|
|
||||||
config_dict['clair_https_proxy'] = clair_configs.get('https_proxy') or ''
|
|
||||||
config_dict['clair_no_proxy'] = clair_configs.get('no_proxy') or '127.0.0.1,localhost,core,registry'
|
|
||||||
|
|
||||||
# Chart configs
|
# Chart configs
|
||||||
chart_configs = configs.get("chart") or {}
|
chart_configs = configs.get("chart") or {}
|
||||||
@ -179,18 +198,34 @@ def parse_yaml_config(config_file_path):
|
|||||||
config_dict['max_job_workers'] = js_config["max_job_workers"]
|
config_dict['max_job_workers'] = js_config["max_job_workers"]
|
||||||
config_dict['jobservice_secret'] = generate_random_string(16)
|
config_dict['jobservice_secret'] = generate_random_string(16)
|
||||||
|
|
||||||
|
# notification config
|
||||||
|
notification_config = configs.get('notification') or {}
|
||||||
|
config_dict['notification_webhook_job_max_retry'] = notification_config["webhook_job_max_retry"]
|
||||||
|
|
||||||
# Log configs
|
# Log configs
|
||||||
allowed_levels = ['debug', 'info', 'warning', 'error', 'fatal']
|
allowed_levels = ['debug', 'info', 'warning', 'error', 'fatal']
|
||||||
log_configs = configs.get('log') or {}
|
log_configs = configs.get('log') or {}
|
||||||
config_dict['log_location'] = log_configs["location"]
|
|
||||||
config_dict['log_rotate_count'] = log_configs["rotate_count"]
|
|
||||||
config_dict['log_rotate_size'] = log_configs["rotate_size"]
|
|
||||||
log_level = log_configs['level']
|
log_level = log_configs['level']
|
||||||
if log_level not in allowed_levels:
|
if log_level not in allowed_levels:
|
||||||
raise Exception('log level must be one of debug, info, warning, error, fatal')
|
raise Exception('log level must be one of debug, info, warning, error, fatal')
|
||||||
config_dict['log_level'] = log_level.lower()
|
config_dict['log_level'] = log_level.lower()
|
||||||
|
|
||||||
|
# parse local log related configs
|
||||||
|
local_logs = log_configs.get('local') or {}
|
||||||
|
if local_logs:
|
||||||
|
config_dict['log_location'] = local_logs.get('location') or '/var/log/harbor'
|
||||||
|
config_dict['log_rotate_count'] = local_logs.get('rotate_count') or 50
|
||||||
|
config_dict['log_rotate_size'] = local_logs.get('rotate_size') or '200M'
|
||||||
|
|
||||||
|
# parse external log endpoint related configs
|
||||||
|
if log_configs.get('external_endpoint'):
|
||||||
|
config_dict['log_external'] = True
|
||||||
|
config_dict['log_ep_protocol'] = log_configs['external_endpoint']['protocol']
|
||||||
|
config_dict['log_ep_host'] = log_configs['external_endpoint']['host']
|
||||||
|
config_dict['log_ep_port'] = log_configs['external_endpoint']['port']
|
||||||
|
else:
|
||||||
|
config_dict['log_external'] = False
|
||||||
|
|
||||||
# external DB, optional, if external_db enabled, it will cover the database config
|
# external DB, optional, if external_db enabled, it will cover the database config
|
||||||
external_db_configs = configs.get('external_database') or {}
|
external_db_configs = configs.get('external_database') or {}
|
||||||
@ -202,7 +237,7 @@ def parse_yaml_config(config_file_path):
|
|||||||
config_dict['harbor_db_username'] = external_db_configs['harbor']['username']
|
config_dict['harbor_db_username'] = external_db_configs['harbor']['username']
|
||||||
config_dict['harbor_db_password'] = external_db_configs['harbor']['password']
|
config_dict['harbor_db_password'] = external_db_configs['harbor']['password']
|
||||||
config_dict['harbor_db_sslmode'] = external_db_configs['harbor']['ssl_mode']
|
config_dict['harbor_db_sslmode'] = external_db_configs['harbor']['ssl_mode']
|
||||||
# clari db
|
# clair db
|
||||||
config_dict['clair_db_host'] = external_db_configs['clair']['host']
|
config_dict['clair_db_host'] = external_db_configs['clair']['host']
|
||||||
config_dict['clair_db_port'] = external_db_configs['clair']['port']
|
config_dict['clair_db_port'] = external_db_configs['clair']['port']
|
||||||
config_dict['clair_db_name'] = external_db_configs['clair']['db_name']
|
config_dict['clair_db_name'] = external_db_configs['clair']['db_name']
|
||||||
@ -261,4 +296,4 @@ def parse_yaml_config(config_file_path):
|
|||||||
# UAA configs
|
# UAA configs
|
||||||
config_dict['uaa'] = configs.get('uaa') or {}
|
config_dict['uaa'] = configs.get('uaa') or {}
|
||||||
|
|
||||||
return config_dict
|
return config_dict
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
import shutil, os
|
import shutil, os
|
||||||
|
|
||||||
from g import config_dir, templates_dir
|
from g import config_dir, templates_dir
|
||||||
from utils.misc import prepare_config_dir, generate_random_string
|
from utils.misc import prepare_dir, generate_random_string
|
||||||
from utils.jinja import render_jinja
|
from utils.jinja import render_jinja
|
||||||
|
|
||||||
core_config_dir = os.path.join(config_dir, "core", "certificates")
|
core_config_dir = os.path.join(config_dir, "core", "certificates")
|
||||||
@ -33,7 +33,7 @@ def prepare_core(config_dict, with_notary, with_clair, with_chartmuseum):
|
|||||||
copy_core_config(core_conf_template_path, core_conf)
|
copy_core_config(core_conf_template_path, core_conf)
|
||||||
|
|
||||||
def prepare_core_config_dir():
|
def prepare_core_config_dir():
|
||||||
prepare_config_dir(core_config_dir)
|
prepare_dir(core_config_dir)
|
||||||
|
|
||||||
def copy_core_config(core_templates_path, core_config_path):
|
def copy_core_config(core_templates_path, core_config_path):
|
||||||
shutil.copyfile(core_templates_path, core_config_path)
|
shutil.copyfile(core_templates_path, core_config_path)
|
||||||
|
@ -1,20 +1,18 @@
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
from g import config_dir, templates_dir
|
from g import config_dir, templates_dir, data_dir, PG_UID, PG_GID
|
||||||
from utils.misc import prepare_config_dir
|
from utils.misc import prepare_dir
|
||||||
from utils.jinja import render_jinja
|
from utils.jinja import render_jinja
|
||||||
|
|
||||||
db_config_dir = os.path.join(config_dir, "db")
|
db_config_dir = os.path.join(config_dir, "db")
|
||||||
db_env_template_path = os.path.join(templates_dir, "db", "env.jinja")
|
db_env_template_path = os.path.join(templates_dir, "db", "env.jinja")
|
||||||
db_conf_env = os.path.join(config_dir, "db", "env")
|
db_conf_env = os.path.join(config_dir, "db", "env")
|
||||||
|
database_data_path = os.path.join(data_dir, 'database')
|
||||||
|
|
||||||
def prepare_db(config_dict):
|
def prepare_db(config_dict):
|
||||||
prepare_db_config_dir()
|
prepare_dir(database_data_path, uid=PG_UID, gid=PG_GID)
|
||||||
|
prepare_dir(db_config_dir)
|
||||||
render_jinja(
|
render_jinja(
|
||||||
db_env_template_path,
|
db_env_template_path,
|
||||||
db_conf_env,
|
db_conf_env,
|
||||||
harbor_db_password=config_dict['harbor_db_password'])
|
harbor_db_password=config_dict['harbor_db_password'])
|
||||||
|
|
||||||
def prepare_db_config_dir():
|
|
||||||
prepare_config_dir(db_config_dir)
|
|
@ -13,8 +13,8 @@ def prepare_docker_compose(configs, with_clair, with_notary, with_chartmuseum):
|
|||||||
VERSION_TAG = versions.get('VERSION_TAG') or 'dev'
|
VERSION_TAG = versions.get('VERSION_TAG') or 'dev'
|
||||||
REGISTRY_VERSION = versions.get('REGISTRY_VERSION') or 'v2.7.1'
|
REGISTRY_VERSION = versions.get('REGISTRY_VERSION') or 'v2.7.1'
|
||||||
NOTARY_VERSION = versions.get('NOTARY_VERSION') or 'v0.6.1'
|
NOTARY_VERSION = versions.get('NOTARY_VERSION') or 'v0.6.1'
|
||||||
CLAIR_VERSION = versions.get('CLAIR_VERSION') or 'v2.0.7'
|
CLAIR_VERSION = versions.get('CLAIR_VERSION') or 'v2.0.9'
|
||||||
CHARTMUSEUM_VERSION = versions.get('CHARTMUSEUM_VERSION') or 'v0.8.1'
|
CHARTMUSEUM_VERSION = versions.get('CHARTMUSEUM_VERSION') or 'v0.9.0'
|
||||||
|
|
||||||
rendering_variables = {
|
rendering_variables = {
|
||||||
'version': VERSION_TAG,
|
'version': VERSION_TAG,
|
||||||
@ -33,17 +33,25 @@ def prepare_docker_compose(configs, with_clair, with_notary, with_chartmuseum):
|
|||||||
'with_chartmuseum': with_chartmuseum
|
'with_chartmuseum': with_chartmuseum
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# for gcs
|
||||||
storage_config = configs.get('storage_provider_config') or {}
|
storage_config = configs.get('storage_provider_config') or {}
|
||||||
if storage_config.get('keyfile') and configs['storage_provider_name'] == 'gcs':
|
if storage_config.get('keyfile') and configs['storage_provider_name'] == 'gcs':
|
||||||
rendering_variables['gcs_keyfile'] = storage_config['keyfile']
|
rendering_variables['gcs_keyfile'] = storage_config['keyfile']
|
||||||
|
|
||||||
|
# for http
|
||||||
if configs['protocol'] == 'https':
|
if configs['protocol'] == 'https':
|
||||||
rendering_variables['cert_key_path'] = configs['cert_key_path']
|
rendering_variables['cert_key_path'] = configs['cert_key_path']
|
||||||
rendering_variables['cert_path'] = configs['cert_path']
|
rendering_variables['cert_path'] = configs['cert_path']
|
||||||
rendering_variables['https_port'] = configs['https_port']
|
rendering_variables['https_port'] = configs['https_port']
|
||||||
|
|
||||||
|
# for uaa
|
||||||
uaa_config = configs.get('uaa') or {}
|
uaa_config = configs.get('uaa') or {}
|
||||||
if uaa_config.get('ca_file'):
|
if uaa_config.get('ca_file'):
|
||||||
rendering_variables['uaa_ca_file'] = uaa_config['ca_file']
|
rendering_variables['uaa_ca_file'] = uaa_config['ca_file']
|
||||||
|
|
||||||
|
# for log
|
||||||
|
log_ep_host = configs.get('log_ep_host')
|
||||||
|
if log_ep_host:
|
||||||
|
rendering_variables['external_log_endpoint'] = True
|
||||||
|
|
||||||
render_jinja(docker_compose_template_path, docker_compose_yml_path, **rendering_variables)
|
render_jinja(docker_compose_template_path, docker_compose_yml_path, **rendering_variables)
|
@ -1,7 +1,7 @@
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
from g import config_dir, DEFAULT_GID, DEFAULT_UID, templates_dir
|
from g import config_dir, DEFAULT_GID, DEFAULT_UID, templates_dir
|
||||||
from utils.misc import prepare_config_dir
|
from utils.misc import prepare_dir
|
||||||
from utils.jinja import render_jinja
|
from utils.jinja import render_jinja
|
||||||
|
|
||||||
job_config_dir = os.path.join(config_dir, "jobservice")
|
job_config_dir = os.path.join(config_dir, "jobservice")
|
||||||
@ -10,15 +10,14 @@ job_service_conf_env = os.path.join(config_dir, "jobservice", "env")
|
|||||||
job_service_conf_template_path = os.path.join(templates_dir, "jobservice", "config.yml.jinja")
|
job_service_conf_template_path = os.path.join(templates_dir, "jobservice", "config.yml.jinja")
|
||||||
jobservice_conf = os.path.join(config_dir, "jobservice", "config.yml")
|
jobservice_conf = os.path.join(config_dir, "jobservice", "config.yml")
|
||||||
|
|
||||||
|
|
||||||
def prepare_job_service(config_dict):
|
def prepare_job_service(config_dict):
|
||||||
prepare_config_dir(job_config_dir)
|
prepare_dir(job_config_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
|
||||||
|
|
||||||
log_level = config_dict['log_level'].upper()
|
log_level = config_dict['log_level'].upper()
|
||||||
|
|
||||||
# Job log is stored in data dir
|
# Job log is stored in data dir
|
||||||
job_log_dir = os.path.join('/data', "job_logs")
|
job_log_dir = os.path.join('/data', "job_logs")
|
||||||
prepare_config_dir(job_log_dir)
|
prepare_dir(job_log_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
|
||||||
# Render Jobservice env
|
# Render Jobservice env
|
||||||
render_jinja(
|
render_jinja(
|
||||||
job_service_env_template_path,
|
job_service_env_template_path,
|
||||||
@ -33,4 +32,4 @@ def prepare_job_service(config_dict):
|
|||||||
gid=DEFAULT_GID,
|
gid=DEFAULT_GID,
|
||||||
max_job_workers=config_dict['max_job_workers'],
|
max_job_workers=config_dict['max_job_workers'],
|
||||||
redis_url=config_dict['redis_url_js'],
|
redis_url=config_dict['redis_url_js'],
|
||||||
level=log_level)
|
level=log_level)
|
||||||
|
@ -1,15 +1,21 @@
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID
|
from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID
|
||||||
from utils.misc import prepare_config_dir
|
from utils.misc import prepare_dir
|
||||||
from utils.jinja import render_jinja
|
from utils.jinja import render_jinja
|
||||||
|
|
||||||
log_config_dir = os.path.join(config_dir, "log")
|
log_config_dir = os.path.join(config_dir, "log")
|
||||||
|
|
||||||
|
# logrotate config file
|
||||||
logrotate_template_path = os.path.join(templates_dir, "log", "logrotate.conf.jinja")
|
logrotate_template_path = os.path.join(templates_dir, "log", "logrotate.conf.jinja")
|
||||||
log_rotate_config = os.path.join(config_dir, "log", "logrotate.conf")
|
log_rotate_config = os.path.join(config_dir, "log", "logrotate.conf")
|
||||||
|
|
||||||
|
# syslog docker config file
|
||||||
|
log_syslog_docker_template_path = os.path.join(templates_dir, 'log', 'rsyslog_docker.conf.jinja')
|
||||||
|
log_syslog_docker_config = os.path.join(config_dir, 'log', 'rsyslog_docker.conf')
|
||||||
|
|
||||||
def prepare_log_configs(config_dict):
|
def prepare_log_configs(config_dict):
|
||||||
prepare_config_dir(log_config_dir)
|
prepare_dir(log_config_dir)
|
||||||
|
|
||||||
# Render Log config
|
# Render Log config
|
||||||
render_jinja(
|
render_jinja(
|
||||||
@ -17,4 +23,13 @@ def prepare_log_configs(config_dict):
|
|||||||
log_rotate_config,
|
log_rotate_config,
|
||||||
uid=DEFAULT_UID,
|
uid=DEFAULT_UID,
|
||||||
gid=DEFAULT_GID,
|
gid=DEFAULT_GID,
|
||||||
**config_dict)
|
**config_dict)
|
||||||
|
|
||||||
|
# Render syslog docker config
|
||||||
|
render_jinja(
|
||||||
|
log_syslog_docker_template_path,
|
||||||
|
log_syslog_docker_config,
|
||||||
|
uid=DEFAULT_UID,
|
||||||
|
gid=DEFAULT_GID,
|
||||||
|
**config_dict
|
||||||
|
)
|
@ -3,7 +3,7 @@ import string
|
|||||||
import random
|
import random
|
||||||
|
|
||||||
from g import DEFAULT_UID, DEFAULT_GID
|
from g import DEFAULT_UID, DEFAULT_GID
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
# To meet security requirement
|
# To meet security requirement
|
||||||
# By default it will change file mode to 0600, and make the owner of the file to 10000:10000
|
# By default it will change file mode to 0600, and make the owner of the file to 10000:10000
|
||||||
@ -84,6 +84,26 @@ def prepare_config_dir(root, *name):
|
|||||||
os.makedirs(absolute_path)
|
os.makedirs(absolute_path)
|
||||||
return absolute_path
|
return absolute_path
|
||||||
|
|
||||||
|
def prepare_dir(root: str, *args, **kwargs) -> str:
|
||||||
|
gid, uid = kwargs.get('gid'), kwargs.get('uid')
|
||||||
|
absolute_path = Path(os.path.join(root, *args))
|
||||||
|
if absolute_path.is_file():
|
||||||
|
raise Exception('Path exists and the type is regular file')
|
||||||
|
mode = kwargs.get('mode') or 0o755
|
||||||
|
absolute_path.mkdir(mode, parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# if uid or gid not None, then change the ownership of this dir
|
||||||
|
if not(gid is None and uid is None):
|
||||||
|
dir_uid, dir_gid = absolute_path.stat().st_uid, absolute_path.stat().st_gid
|
||||||
|
if uid is None:
|
||||||
|
uid = dir_uid
|
||||||
|
if gid is None:
|
||||||
|
gid = dir_gid
|
||||||
|
os.chown(absolute_path, uid, gid)
|
||||||
|
|
||||||
|
return str(absolute_path)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def delfile(src):
|
def delfile(src):
|
||||||
if os.path.isfile(src):
|
if os.path.isfile(src):
|
||||||
|
@ -2,11 +2,13 @@ import os, shutil
|
|||||||
from fnmatch import fnmatch
|
from fnmatch import fnmatch
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from g import config_dir, templates_dir
|
from g import config_dir, templates_dir, host_root_dir, DEFAULT_GID, DEFAULT_UID, data_dir
|
||||||
from utils.misc import prepare_config_dir, mark_file
|
from utils.misc import prepare_dir, mark_file
|
||||||
from utils.jinja import render_jinja
|
from utils.jinja import render_jinja
|
||||||
from utils.cert import SSL_CERT_KEY_PATH, SSL_CERT_PATH
|
from utils.cert import SSL_CERT_KEY_PATH, SSL_CERT_PATH
|
||||||
|
|
||||||
|
host_ngx_real_cert_dir = Path(os.path.join(data_dir, 'secret', 'cert'))
|
||||||
|
|
||||||
nginx_conf = os.path.join(config_dir, "nginx", "nginx.conf")
|
nginx_conf = os.path.join(config_dir, "nginx", "nginx.conf")
|
||||||
nginx_confd_dir = os.path.join(config_dir, "nginx", "conf.d")
|
nginx_confd_dir = os.path.join(config_dir, "nginx", "conf.d")
|
||||||
nginx_https_conf_template = os.path.join(templates_dir, "nginx", "nginx.https.conf.jinja")
|
nginx_https_conf_template = os.path.join(templates_dir, "nginx", "nginx.https.conf.jinja")
|
||||||
@ -17,44 +19,76 @@ CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTPS = 'harbor.https.*.conf'
|
|||||||
CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTP = 'harbor.http.*.conf'
|
CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTP = 'harbor.http.*.conf'
|
||||||
|
|
||||||
def prepare_nginx(config_dict):
|
def prepare_nginx(config_dict):
|
||||||
prepare_config_dir(nginx_confd_dir)
|
prepare_dir(nginx_confd_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
|
||||||
render_nginx_template(config_dict)
|
render_nginx_template(config_dict)
|
||||||
|
|
||||||
|
|
||||||
|
def prepare_nginx_certs(cert_key_path, cert_path):
|
||||||
|
"""
|
||||||
|
Prepare the certs file with proper ownership
|
||||||
|
1. Remove nginx cert files in secret dir
|
||||||
|
2. Copy cert files on host filesystem to secret dir
|
||||||
|
3. Change the permission to 644 and ownership to 10000:10000
|
||||||
|
"""
|
||||||
|
host_ngx_cert_key_path = Path(os.path.join(host_root_dir, cert_key_path.lstrip('/')))
|
||||||
|
host_ngx_cert_path = Path(os.path.join(host_root_dir, cert_path.lstrip('/')))
|
||||||
|
|
||||||
|
if host_ngx_real_cert_dir.exists() and host_ngx_real_cert_dir.is_dir():
|
||||||
|
shutil.rmtree(host_ngx_real_cert_dir)
|
||||||
|
|
||||||
|
os.makedirs(host_ngx_real_cert_dir, mode=0o755)
|
||||||
|
real_key_path = os.path.join(host_ngx_real_cert_dir, 'server.key')
|
||||||
|
real_crt_path = os.path.join(host_ngx_real_cert_dir, 'server.crt')
|
||||||
|
shutil.copy2(host_ngx_cert_key_path, real_key_path)
|
||||||
|
shutil.copy2(host_ngx_cert_path, real_crt_path)
|
||||||
|
|
||||||
|
os.chown(host_ngx_real_cert_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
|
||||||
|
mark_file(real_key_path, uid=DEFAULT_UID, gid=DEFAULT_GID)
|
||||||
|
mark_file(real_crt_path, uid=DEFAULT_UID, gid=DEFAULT_GID)
|
||||||
|
|
||||||
|
|
||||||
def render_nginx_template(config_dict):
|
def render_nginx_template(config_dict):
|
||||||
if config_dict['protocol'] == "https":
|
"""
|
||||||
render_jinja(nginx_https_conf_template, nginx_conf,
|
1. render nginx config file through protocol
|
||||||
|
2. copy additional configs to cert.d dir
|
||||||
|
"""
|
||||||
|
if config_dict['protocol'] == 'https':
|
||||||
|
prepare_nginx_certs(config_dict['cert_key_path'], config_dict['cert_path'])
|
||||||
|
render_jinja(
|
||||||
|
nginx_https_conf_template,
|
||||||
|
nginx_conf,
|
||||||
|
uid=DEFAULT_UID,
|
||||||
|
gid=DEFAULT_GID,
|
||||||
ssl_cert=SSL_CERT_PATH,
|
ssl_cert=SSL_CERT_PATH,
|
||||||
ssl_cert_key=SSL_CERT_KEY_PATH)
|
ssl_cert_key=SSL_CERT_KEY_PATH)
|
||||||
location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTPS
|
location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTPS
|
||||||
cert_dir = Path(os.path.join(config_dir, 'cert'))
|
|
||||||
ssl_key_path = Path(os.path.join(cert_dir, 'server.key'))
|
|
||||||
ssl_crt_path = Path(os.path.join(cert_dir, 'server.crt'))
|
|
||||||
cert_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
ssl_key_path.touch()
|
|
||||||
ssl_crt_path.touch()
|
|
||||||
else:
|
else:
|
||||||
render_jinja(
|
render_jinja(
|
||||||
nginx_http_conf_template,
|
nginx_http_conf_template,
|
||||||
nginx_conf)
|
nginx_conf,
|
||||||
|
uid=DEFAULT_UID,
|
||||||
|
gid=DEFAULT_GID)
|
||||||
location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTP
|
location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTP
|
||||||
copy_nginx_location_configs_if_exist(nginx_template_ext_dir, nginx_confd_dir, location_file_pattern)
|
copy_nginx_location_configs_if_exist(nginx_template_ext_dir, nginx_confd_dir, location_file_pattern)
|
||||||
|
|
||||||
def add_additional_location_config(src, dst):
|
|
||||||
"""
|
|
||||||
These conf files is used for user that wanna add additional customized locations to harbor proxy
|
|
||||||
:params src: source of the file
|
|
||||||
:params dst: destination file path
|
|
||||||
"""
|
|
||||||
if not os.path.isfile(src):
|
|
||||||
return
|
|
||||||
print("Copying nginx configuration file {src} to {dst}".format(
|
|
||||||
src=src, dst=dst))
|
|
||||||
shutil.copy2(src, dst)
|
|
||||||
mark_file(dst, mode=0o644)
|
|
||||||
|
|
||||||
def copy_nginx_location_configs_if_exist(src_config_dir, dst_config_dir, filename_pattern):
|
def copy_nginx_location_configs_if_exist(src_config_dir, dst_config_dir, filename_pattern):
|
||||||
if not os.path.exists(src_config_dir):
|
if not os.path.exists(src_config_dir):
|
||||||
return
|
return
|
||||||
|
|
||||||
|
def add_additional_location_config(src, dst):
|
||||||
|
"""
|
||||||
|
These conf files is used for user that wanna add additional customized locations to harbor proxy
|
||||||
|
:params src: source of the file
|
||||||
|
:params dst: destination file path
|
||||||
|
"""
|
||||||
|
if not os.path.isfile(src):
|
||||||
|
return
|
||||||
|
print("Copying nginx configuration file {src} to {dst}".format(src=src, dst=dst))
|
||||||
|
shutil.copy2(src, dst)
|
||||||
|
mark_file(dst, mode=0o644)
|
||||||
|
|
||||||
map(lambda filename: add_additional_location_config(
|
map(lambda filename: add_additional_location_config(
|
||||||
os.path.join(src_config_dir, filename),
|
os.path.join(src_config_dir, filename),
|
||||||
os.path.join(dst_config_dir, filename)),
|
os.path.join(dst_config_dir, filename)),
|
||||||
|
@ -2,7 +2,7 @@ import os, shutil, pathlib
|
|||||||
from g import templates_dir, config_dir, root_crt_path, secret_key_dir,DEFAULT_UID, DEFAULT_GID
|
from g import templates_dir, config_dir, root_crt_path, secret_key_dir,DEFAULT_UID, DEFAULT_GID
|
||||||
from .cert import openssl_installed, create_cert, create_root_cert, get_alias
|
from .cert import openssl_installed, create_cert, create_root_cert, get_alias
|
||||||
from .jinja import render_jinja
|
from .jinja import render_jinja
|
||||||
from .misc import mark_file, prepare_config_dir
|
from .misc import mark_file, prepare_dir
|
||||||
|
|
||||||
notary_template_dir = os.path.join(templates_dir, "notary")
|
notary_template_dir = os.path.join(templates_dir, "notary")
|
||||||
notary_signer_pg_template = os.path.join(notary_template_dir, "signer-config.postgres.json.jinja")
|
notary_signer_pg_template = os.path.join(notary_template_dir, "signer-config.postgres.json.jinja")
|
||||||
@ -20,12 +20,12 @@ notary_server_env_path = os.path.join(notary_config_dir, "server_env")
|
|||||||
|
|
||||||
|
|
||||||
def prepare_env_notary(nginx_config_dir):
|
def prepare_env_notary(nginx_config_dir):
|
||||||
notary_config_dir = prepare_config_dir(config_dir, "notary")
|
notary_config_dir = prepare_dir(config_dir, "notary")
|
||||||
old_signer_cert_secret_path = pathlib.Path(os.path.join(config_dir, 'notary-signer.crt'))
|
old_signer_cert_secret_path = pathlib.Path(os.path.join(config_dir, 'notary-signer.crt'))
|
||||||
old_signer_key_secret_path = pathlib.Path(os.path.join(config_dir, 'notary-signer.key'))
|
old_signer_key_secret_path = pathlib.Path(os.path.join(config_dir, 'notary-signer.key'))
|
||||||
old_signer_ca_cert_secret_path = pathlib.Path(os.path.join(config_dir, 'notary-signer-ca.crt'))
|
old_signer_ca_cert_secret_path = pathlib.Path(os.path.join(config_dir, 'notary-signer-ca.crt'))
|
||||||
|
|
||||||
notary_secret_dir = prepare_config_dir('/secret/notary')
|
notary_secret_dir = prepare_dir('/secret/notary')
|
||||||
signer_cert_secret_path = pathlib.Path(os.path.join(notary_secret_dir, 'notary-signer.crt'))
|
signer_cert_secret_path = pathlib.Path(os.path.join(notary_secret_dir, 'notary-signer.crt'))
|
||||||
signer_key_secret_path = pathlib.Path(os.path.join(notary_secret_dir, 'notary-signer.key'))
|
signer_key_secret_path = pathlib.Path(os.path.join(notary_secret_dir, 'notary-signer.key'))
|
||||||
signer_ca_cert_secret_path = pathlib.Path(os.path.join(notary_secret_dir, 'notary-signer-ca.crt'))
|
signer_ca_cert_secret_path = pathlib.Path(os.path.join(notary_secret_dir, 'notary-signer-ca.crt'))
|
||||||
@ -72,9 +72,12 @@ def prepare_env_notary(nginx_config_dir):
|
|||||||
|
|
||||||
|
|
||||||
print("Copying nginx configuration file for notary")
|
print("Copying nginx configuration file for notary")
|
||||||
shutil.copy2(
|
|
||||||
|
render_jinja(
|
||||||
os.path.join(templates_dir, "nginx", "notary.upstream.conf.jinja"),
|
os.path.join(templates_dir, "nginx", "notary.upstream.conf.jinja"),
|
||||||
os.path.join(nginx_config_dir, "notary.upstream.conf"))
|
os.path.join(nginx_config_dir, "notary.upstream.conf"),
|
||||||
|
gid=DEFAULT_GID,
|
||||||
|
uid=DEFAULT_UID)
|
||||||
|
|
||||||
mark_file(os.path.join(notary_secret_dir, "notary-signer.crt"))
|
mark_file(os.path.join(notary_secret_dir, "notary-signer.crt"))
|
||||||
mark_file(os.path.join(notary_secret_dir, "notary-signer.key"))
|
mark_file(os.path.join(notary_secret_dir, "notary-signer.key"))
|
||||||
@ -88,6 +91,8 @@ def prepare_notary(config_dict, nginx_config_dir, ssl_cert_path, ssl_cert_key_pa
|
|||||||
render_jinja(
|
render_jinja(
|
||||||
notary_server_nginx_config_template,
|
notary_server_nginx_config_template,
|
||||||
os.path.join(nginx_config_dir, "notary.server.conf"),
|
os.path.join(nginx_config_dir, "notary.server.conf"),
|
||||||
|
gid=DEFAULT_GID,
|
||||||
|
uid=DEFAULT_UID,
|
||||||
ssl_cert=ssl_cert_path,
|
ssl_cert=ssl_cert_path,
|
||||||
ssl_cert_key=ssl_cert_key_path)
|
ssl_cert_key=ssl_cert_key_path)
|
||||||
|
|
||||||
|
9
make/photon/prepare/utils/redis.py
Normal file
9
make/photon/prepare/utils/redis.py
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
from g import data_dir, REDIS_UID, REDIS_GID
|
||||||
|
from utils.misc import prepare_dir
|
||||||
|
|
||||||
|
redis_data_path = os.path.join(data_dir, 'redis')
|
||||||
|
|
||||||
|
def prepare_redis(config_dict):
|
||||||
|
prepare_dir(redis_data_path, uid=REDIS_UID, gid=REDIS_GID)
|
@ -1,7 +1,7 @@
|
|||||||
import os, copy
|
import os, copy
|
||||||
|
|
||||||
from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID
|
from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID
|
||||||
from utils.misc import prepare_config_dir
|
from utils.misc import prepare_dir
|
||||||
from utils.jinja import render_jinja
|
from utils.jinja import render_jinja
|
||||||
|
|
||||||
|
|
||||||
@ -9,9 +9,16 @@ registry_config_dir = os.path.join(config_dir, "registry")
|
|||||||
registry_config_template_path = os.path.join(templates_dir, "registry", "config.yml.jinja")
|
registry_config_template_path = os.path.join(templates_dir, "registry", "config.yml.jinja")
|
||||||
registry_conf = os.path.join(config_dir, "registry", "config.yml")
|
registry_conf = os.path.join(config_dir, "registry", "config.yml")
|
||||||
|
|
||||||
|
levels_map = {
|
||||||
|
'debug': 'debug',
|
||||||
|
'info': 'info',
|
||||||
|
'warning': 'warn',
|
||||||
|
'error': 'error',
|
||||||
|
'fatal': 'fatal'
|
||||||
|
}
|
||||||
|
|
||||||
def prepare_registry(config_dict):
|
def prepare_registry(config_dict):
|
||||||
prepare_config_dir(registry_config_dir)
|
prepare_dir(registry_config_dir)
|
||||||
|
|
||||||
storage_provider_info = get_storage_provider_info(
|
storage_provider_info = get_storage_provider_info(
|
||||||
config_dict['storage_provider_name'],
|
config_dict['storage_provider_name'],
|
||||||
@ -22,6 +29,7 @@ def prepare_registry(config_dict):
|
|||||||
registry_conf,
|
registry_conf,
|
||||||
uid=DEFAULT_UID,
|
uid=DEFAULT_UID,
|
||||||
gid=DEFAULT_GID,
|
gid=DEFAULT_GID,
|
||||||
|
level=levels_map[config_dict['log_level']],
|
||||||
storage_provider_info=storage_provider_info,
|
storage_provider_info=storage_provider_info,
|
||||||
**config_dict)
|
**config_dict)
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
import os, shutil
|
import os, shutil
|
||||||
|
|
||||||
from g import config_dir, templates_dir
|
from g import config_dir, templates_dir
|
||||||
from utils.misc import prepare_config_dir
|
from utils.misc import prepare_dir
|
||||||
from utils.jinja import render_jinja
|
from utils.jinja import render_jinja
|
||||||
|
|
||||||
registryctl_config_dir = os.path.join(config_dir, "registryctl")
|
registryctl_config_dir = os.path.join(config_dir, "registryctl")
|
||||||
@ -24,7 +24,7 @@ def prepare_registry_ctl(config_dict):
|
|||||||
copy_registry_ctl_conf(registryctl_config_template_path, registryctl_conf)
|
copy_registry_ctl_conf(registryctl_config_template_path, registryctl_conf)
|
||||||
|
|
||||||
def prepare_registry_ctl_config_dir():
|
def prepare_registry_ctl_config_dir():
|
||||||
prepare_config_dir(registryctl_config_dir)
|
prepare_dir(registryctl_config_dir)
|
||||||
|
|
||||||
def copy_registry_ctl_conf(src, dst):
|
def copy_registry_ctl_conf(src, dst):
|
||||||
shutil.copyfile(src, dst)
|
shutil.copyfile(src, dst)
|
@ -4,11 +4,12 @@ RUN tdnf install -y redis sudo
|
|||||||
|
|
||||||
VOLUME /var/lib/redis
|
VOLUME /var/lib/redis
|
||||||
WORKDIR /var/lib/redis
|
WORKDIR /var/lib/redis
|
||||||
COPY ./make/photon/redis/docker-entrypoint.sh /usr/bin/
|
COPY ./make/photon/redis/docker-healthcheck /usr/bin/
|
||||||
COPY ./make/photon/redis/redis.conf /etc/redis.conf
|
COPY ./make/photon/redis/redis.conf /etc/redis.conf
|
||||||
RUN chmod +x /usr/bin/docker-entrypoint.sh \
|
RUN chmod +x /usr/bin/docker-healthcheck \
|
||||||
&& chown redis:redis /etc/redis.conf
|
&& chown redis:redis /etc/redis.conf
|
||||||
ENTRYPOINT ["docker-entrypoint.sh"]
|
|
||||||
|
|
||||||
|
HEALTHCHECK CMD ["docker-healthcheck"]
|
||||||
|
USER redis
|
||||||
EXPOSE 6379
|
EXPOSE 6379
|
||||||
CMD ["redis-server", "/etc/redis.conf"]
|
CMD ["redis-server", "/etc/redis.conf"]
|
||||||
|
@ -1,13 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
set -e
|
|
||||||
|
|
||||||
if [ "${1#-}" != "$1" ] || [ "${1%.conf}" != "$1" ]; then
|
|
||||||
set -- redis-server "$@"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$1" = 'redis-server' -a "$(id -u)" = '0' ]; then
|
|
||||||
chown -R redis .
|
|
||||||
exec sudo -u redis "$@"
|
|
||||||
fi
|
|
||||||
|
|
||||||
exec "$@"
|
|
9
make/photon/redis/docker-healthcheck
Normal file
9
make/photon/redis/docker-healthcheck
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
if ping="$(redis-cli -h "127.0.0.1" ping)" && [ "$ping" = 'PONG' ]; then
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit 1
|
16
make/prepare
16
make/prepare
@ -1,8 +1,8 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
set +e
|
set +e
|
||||||
|
|
||||||
# If compling source code this dir is harbor's make dir
|
# If compiling source code this dir is harbor's make dir.
|
||||||
# If install harbor via pacakge, this dir is harbor's root dir
|
# If installing harbor via pacakge, this dir is harbor's root dir.
|
||||||
if [[ -n "$HARBOR_BUNDLE_DIR" ]]; then
|
if [[ -n "$HARBOR_BUNDLE_DIR" ]]; then
|
||||||
harbor_prepare_path=$HARBOR_BUNDLE_DIR
|
harbor_prepare_path=$HARBOR_BUNDLE_DIR
|
||||||
else
|
else
|
||||||
@ -35,7 +35,7 @@ set -e
|
|||||||
# Copy harbor.yml to input dir
|
# Copy harbor.yml to input dir
|
||||||
if [[ ! "$1" =~ ^\-\- ]] && [ -f "$1" ]
|
if [[ ! "$1" =~ ^\-\- ]] && [ -f "$1" ]
|
||||||
then
|
then
|
||||||
cp $1 $input_dir/harbor.yml
|
cp $1 $input_dir/harbor.yml
|
||||||
else
|
else
|
||||||
cp ${harbor_prepare_path}/harbor.yml $input_dir/harbor.yml
|
cp ${harbor_prepare_path}/harbor.yml $input_dir/harbor.yml
|
||||||
fi
|
fi
|
||||||
@ -45,10 +45,12 @@ secret_dir=${data_path}/secret
|
|||||||
config_dir=$harbor_prepare_path/common/config
|
config_dir=$harbor_prepare_path/common/config
|
||||||
|
|
||||||
# Run prepare script
|
# Run prepare script
|
||||||
docker run --rm -v $input_dir:/input \
|
docker run --rm -v $input_dir:/input:z \
|
||||||
-v $harbor_prepare_path:/compose_location \
|
-v $data_path:/data:z \
|
||||||
-v $config_dir:/config \
|
-v $harbor_prepare_path:/compose_location:z \
|
||||||
-v $secret_dir:/secret \
|
-v $config_dir:/config:z \
|
||||||
|
-v $secret_dir:/secret:z \
|
||||||
|
-v /:/hostfs:z \
|
||||||
goharbor/prepare:dev $@
|
goharbor/prepare:dev $@
|
||||||
|
|
||||||
echo "Clean up the input dir"
|
echo "Clean up the input dir"
|
||||||
|
802
src/Gopkg.lock
generated
802
src/Gopkg.lock
generated
@ -1,802 +0,0 @@
|
|||||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
|
||||||
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:b16fbfbcc20645cb419f78325bb2e85ec729b338e996a228124d68931a6f2a37"
|
|
||||||
name = "github.com/BurntSushi/toml"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "b26d9c308763d68093482582cea63d69be07a0f0"
|
|
||||||
version = "v0.3.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:5d3e23515e7916c152cc665eda0f7eaf6fdf8fdfe7c3dbac97049bcbd649b33f"
|
|
||||||
name = "github.com/Knetic/govaluate"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "d216395917cc49052c7c7094cf57f09657ca08a8"
|
|
||||||
version = "v3.0.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:55388fd080150b9a072912f97b1f5891eb0b50df43401f8b75fb4273d3fec9fc"
|
|
||||||
name = "github.com/Masterminds/semver"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "c7af12943936e8c39859482e61f0574c2fd7fc75"
|
|
||||||
version = "v1.4.2"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:e8078e5f9d84e87745efb3c0961e78045500cda10d7102fdf839fbac4b49a423"
|
|
||||||
name = "github.com/Unknwon/goconfig"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "5f601ca6ef4d5cea8d52be2f8b3a420ee4b574a5"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
digest = "1:47ea4fbe2ab4aeb9808502c51e657041c2e49b36b83fc1c1a349135cdf16342f"
|
|
||||||
name = "github.com/agl/ed25519"
|
|
||||||
packages = [
|
|
||||||
".",
|
|
||||||
"edwards25519",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "5312a61534124124185d41f09206b9fef1d88403"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:d2dbd0b0ec5373e89b27d0dd9f59793aa47020a05805b4b75c63aa1b2406781b"
|
|
||||||
name = "github.com/astaxie/beego"
|
|
||||||
packages = [
|
|
||||||
".",
|
|
||||||
"cache",
|
|
||||||
"cache/redis",
|
|
||||||
"config",
|
|
||||||
"context",
|
|
||||||
"context/param",
|
|
||||||
"grace",
|
|
||||||
"logs",
|
|
||||||
"orm",
|
|
||||||
"session",
|
|
||||||
"session/redis",
|
|
||||||
"toolbox",
|
|
||||||
"utils",
|
|
||||||
"validation",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "d96289a81bf67728cff7a19b067aaecc65a62ec6"
|
|
||||||
version = "v1.9.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:4522bd966f53adb3da34201b39df1153534e441c8067d5e674964f05ecca3a71"
|
|
||||||
name = "github.com/beego/i18n"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "e87155e8f0c05bf323d0b13470e1b97af0cb5652"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:2aaf2cc045d0219bba79655e4df795b973168c310574669cb75786684f7287d3"
|
|
||||||
name = "github.com/bmatcuk/doublestar"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "85a78806aa1b4707d1dbace9be592cf1ece91ab3"
|
|
||||||
version = "v1.1.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:76ca0dfcbf951d1868c7449453981dba9e1f79034706d1500a5a785000f5f222"
|
|
||||||
name = "github.com/casbin/casbin"
|
|
||||||
packages = [
|
|
||||||
".",
|
|
||||||
"config",
|
|
||||||
"effect",
|
|
||||||
"log",
|
|
||||||
"model",
|
|
||||||
"persist",
|
|
||||||
"persist/file-adapter",
|
|
||||||
"rbac",
|
|
||||||
"rbac/default-role-manager",
|
|
||||||
"util",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "542e16cac74562eefac970a7d0d1467640d1f1cb"
|
|
||||||
version = "v1.7.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:f6e5e1bc64c2908167e6aa9a1fe0c084d515132a1c63ad5b6c84036aa06dc0c1"
|
|
||||||
name = "github.com/coreos/go-oidc"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "1180514eaf4d9f38d0d19eef639a1d695e066e72"
|
|
||||||
version = "v2.0.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:a2c1d0e43bd3baaa071d1b9ed72c27d78169b2b269f71c105ac4ba34b1be4a39"
|
|
||||||
name = "github.com/davecgh/go-spew"
|
|
||||||
packages = ["spew"]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
|
||||||
version = "v1.1.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:ace1aef6acdf2c4647365dc87c14fb8b71ed8bb0b3ae114ffb216614a24da219"
|
|
||||||
name = "github.com/dghubble/sling"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "eb56e89ac5088bebb12eef3cb4b293300f43608b"
|
|
||||||
version = "v1.1.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:d912bf9afc98bbb6539ea99c9ac3e83119853310dd1a3aec1583d76f340ece27"
|
|
||||||
name = "github.com/dgrijalva/jwt-go"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c"
|
|
||||||
version = "v3.0.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:d06c54bbda3a04ec18a2fa0577896b3c40f13409639b442379ee0a5a53be8259"
|
|
||||||
name = "github.com/docker/distribution"
|
|
||||||
packages = [
|
|
||||||
".",
|
|
||||||
"context",
|
|
||||||
"digestset",
|
|
||||||
"health",
|
|
||||||
"manifest",
|
|
||||||
"manifest/manifestlist",
|
|
||||||
"manifest/schema1",
|
|
||||||
"manifest/schema2",
|
|
||||||
"reference",
|
|
||||||
"registry/api/errcode",
|
|
||||||
"registry/auth",
|
|
||||||
"registry/auth/token",
|
|
||||||
"registry/client/auth/challenge",
|
|
||||||
"uuid",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "2461543d988979529609e8cb6fca9ca190dc48da"
|
|
||||||
version = "v2.7.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
digest = "1:72ba344e60095ac4fe0eac56f56fe95644421670b808238a1c849ea92721037e"
|
|
||||||
name = "github.com/docker/go"
|
|
||||||
packages = ["canonical/json"]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "d30aec9fd63c35133f8f79c3412ad91a3b08be06"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
digest = "1:4841e14252a2cecf11840bd05230412ad469709bbacfc12467e2ce5ad07f339b"
|
|
||||||
name = "github.com/docker/libtrust"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "aabc10ec26b754e797f9028f4589c5b7bd90dc20"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:0594af97b2f4cec6554086eeace6597e20a4b69466eb4ada25adf9f4300dddd2"
|
|
||||||
name = "github.com/garyburd/redigo"
|
|
||||||
packages = [
|
|
||||||
"internal",
|
|
||||||
"redis",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "a69d19351219b6dd56f274f96d85a7014a2ec34e"
|
|
||||||
version = "v1.6.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:2cd7915ab26ede7d95b8749e6b1f933f1c6d5398030684e6505940a10f31cfda"
|
|
||||||
name = "github.com/ghodss/yaml"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
|
|
||||||
version = "v1.0.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:850c49ca338a10fec2cb9e78f793043ed23965489d09e30bcc19fe29719da313"
|
|
||||||
name = "github.com/go-sql-driver/mysql"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "a0583e0143b1624142adab07e0e97fe106d99561"
|
|
||||||
version = "v1.3"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:9ae31ce33b4bab257668963e844d98765b44160be4ee98cafc44637a213e530d"
|
|
||||||
name = "github.com/gobwas/glob"
|
|
||||||
packages = [
|
|
||||||
".",
|
|
||||||
"compiler",
|
|
||||||
"match",
|
|
||||||
"syntax",
|
|
||||||
"syntax/ast",
|
|
||||||
"syntax/lexer",
|
|
||||||
"util/runes",
|
|
||||||
"util/strings",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "5ccd90ef52e1e632236f7326478d4faa74f99438"
|
|
||||||
version = "v0.2.3"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:615643b442214e7a9bade98fa7d50ec072fd17bdc5c955daa194b32e73a532a8"
|
|
||||||
name = "github.com/gocraft/work"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "1d4117a214abff263b472043871c8666aedb716b"
|
|
||||||
version = "v0.5.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:4d02824a56d268f74a6b6fdd944b20b58a77c3d70e81008b3ee0c4f1a6777340"
|
|
||||||
name = "github.com/gogo/protobuf"
|
|
||||||
packages = [
|
|
||||||
"proto",
|
|
||||||
"sortkeys",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c"
|
|
||||||
version = "v1.2.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:39d9284259004077d3b89109d592fce5f311788745ce94a7ccd4545e536ad3ac"
|
|
||||||
name = "github.com/golang-migrate/migrate"
|
|
||||||
packages = [
|
|
||||||
".",
|
|
||||||
"database",
|
|
||||||
"database/postgres",
|
|
||||||
"source",
|
|
||||||
"source/file",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "bcd996f3df28363f43e2d0935484c4559537a3eb"
|
|
||||||
version = "v3.3.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
digest = "1:1ba1d79f2810270045c328ae5d674321db34e3aae468eb4233883b473c5c0467"
|
|
||||||
name = "github.com/golang/glog"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:41e5cefde26c58f1560df2d1c32c2fa85e332d7cb4460d2077ae8fd8e0f3d789"
|
|
||||||
name = "github.com/golang/protobuf"
|
|
||||||
packages = [
|
|
||||||
"proto",
|
|
||||||
"ptypes/any",
|
|
||||||
"ptypes/timestamp",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "130e6b02ab059e7b717a096f397c5b60111cae74"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:38ec74012390146c45af1f92d46e5382b50531247929ff3a685d2b2be65155ac"
|
|
||||||
name = "github.com/gomodule/redigo"
|
|
||||||
packages = [
|
|
||||||
"internal",
|
|
||||||
"redis",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "9c11da706d9b7902c6da69c592f75637793fe121"
|
|
||||||
version = "v2.0.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
digest = "1:a63cff6b5d8b95638bfe300385d93b2a6d9d687734b863da8e09dc834510a690"
|
|
||||||
name = "github.com/google/go-querystring"
|
|
||||||
packages = ["query"]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
digest = "1:3ee90c0d94da31b442dde97c99635aaafec68d0b8a3c12ee2075c6bdabeec6bb"
|
|
||||||
name = "github.com/google/gofuzz"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:160eabf7a69910fd74f29c692718bc2437c1c1c7d4c9dea9712357752a70e5df"
|
|
||||||
name = "github.com/gorilla/context"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "1ea25387ff6f684839d82767c1733ff4d4d15d0a"
|
|
||||||
version = "v1.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:185a43b59a1f4e7ad4e7ccafb8a1538193d897a2a75be16dda093ec42ad231cf"
|
|
||||||
name = "github.com/gorilla/handlers"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "90663712d74cb411cbef281bc1e08c19d1a76145"
|
|
||||||
version = "v1.3.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:3c44722a6360b8d8abf6f70f122c69007189be992a150e39571224c54a9bc380"
|
|
||||||
name = "github.com/gorilla/mux"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "7f08801859139f86dfafd1c296e2cba9a80d292e"
|
|
||||||
version = "v1.6.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:f5a2051c55d05548d2d4fd23d244027b59fbd943217df8aa3b5e170ac2fd6e1b"
|
|
||||||
name = "github.com/json-iterator/go"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "0ff49de124c6f76f8494e194af75bde0f1a49a29"
|
|
||||||
version = "v1.1.6"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:31e761d97c76151dde79e9d28964a812c46efc5baee4085b86f68f0c654450de"
|
|
||||||
name = "github.com/konsorten/go-windows-terminal-sequences"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e"
|
|
||||||
version = "v1.0.2"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
digest = "1:bd26bbaf1e9f9dfe829a88f87a0849b56f717c31785443a67668f2c752fa8412"
|
|
||||||
name = "github.com/lib/pq"
|
|
||||||
packages = [
|
|
||||||
".",
|
|
||||||
"oid",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "b2004221932bd6b13167ef654c81cffac36f7537"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:5113b1edf6e2f370f9ce6101e7b5a86c3e8decd108067e34b762ae91e42964ee"
|
|
||||||
name = "github.com/miekg/pkcs11"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "7283ca79f35edb89bc1b4ecae7f86a3680ce737f"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:33422d238f147d247752996a26574ac48dcf472976eda7f5134015f06bf16563"
|
|
||||||
name = "github.com/modern-go/concurrent"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
|
|
||||||
version = "1.0.3"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:e32bdbdb7c377a07a9a46378290059822efdce5c8d96fe71940d87cb4f918855"
|
|
||||||
name = "github.com/modern-go/reflect2"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
|
|
||||||
version = "1.0.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:159d8a990f45d4891f1f04cb6ad7eb18b307cd02d783f7d37fa7a3b93912b172"
|
|
||||||
name = "github.com/opencontainers/go-digest"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "aa2ec055abd10d26d539eb630a92241b781ce4bc"
|
|
||||||
version = "v1.0.0-rc0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:11db38d694c130c800d0aefb502fb02519e514dc53d9804ce51d1ad25ec27db6"
|
|
||||||
name = "github.com/opencontainers/image-spec"
|
|
||||||
packages = [
|
|
||||||
"specs-go",
|
|
||||||
"specs-go/v1",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "d60099175f88c47cd379c4738d158884749ed235"
|
|
||||||
version = "v1.0.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b"
|
|
||||||
name = "github.com/pkg/errors"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4"
|
|
||||||
version = "v0.8.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
|
|
||||||
name = "github.com/pmezard/go-difflib"
|
|
||||||
packages = ["difflib"]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
|
||||||
version = "v1.0.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
digest = "1:bd9efe4e0b0f768302a1e2f0c22458149278de533e521206e5ddc71848c269a0"
|
|
||||||
name = "github.com/pquerna/cachecontrol"
|
|
||||||
packages = [
|
|
||||||
".",
|
|
||||||
"cacheobject",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "1555304b9b35fdd2b425bccf1a5613677705e7d0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:3f68283c56d93b885f33c679708079e834815138649e9f59ffbc572c2993e0f8"
|
|
||||||
name = "github.com/robfig/cron"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "b024fc5ea0e34bc3f83d9941c8d60b0622bfaca4"
|
|
||||||
version = "v1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:fd61cf4ae1953d55df708acb6b91492d538f49c305b364a014049914495db426"
|
|
||||||
name = "github.com/sirupsen/logrus"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "8bdbc7bcc01dcbb8ec23dc8a28e332258d25251f"
|
|
||||||
version = "v1.4.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:9424f440bba8f7508b69414634aef3b2b3a877e522d8a4624692412805407bb7"
|
|
||||||
name = "github.com/spf13/pflag"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
|
|
||||||
version = "v1.0.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:ac83cf90d08b63ad5f7e020ef480d319ae890c208f8524622a2f3136e2686b02"
|
|
||||||
name = "github.com/stretchr/objx"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c"
|
|
||||||
version = "v0.1.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:288e2ba4192b77ec619875ab54d82e2179ca8978e8baa690dcb4343a4a1f4da7"
|
|
||||||
name = "github.com/stretchr/testify"
|
|
||||||
packages = [
|
|
||||||
"assert",
|
|
||||||
"mock",
|
|
||||||
"require",
|
|
||||||
"suite",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "ffdc059bfe9ce6a4e144ba849dbedead332c6053"
|
|
||||||
version = "v1.3.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:a5702d6fd0891671faf050c05451d3ee4cfd70cb958e11556fefaca628ce832e"
|
|
||||||
name = "github.com/theupdateframework/notary"
|
|
||||||
packages = [
|
|
||||||
".",
|
|
||||||
"client",
|
|
||||||
"client/changelist",
|
|
||||||
"cryptoservice",
|
|
||||||
"storage",
|
|
||||||
"trustmanager",
|
|
||||||
"trustmanager/yubikey",
|
|
||||||
"trustpinning",
|
|
||||||
"tuf",
|
|
||||||
"tuf/data",
|
|
||||||
"tuf/signed",
|
|
||||||
"tuf/utils",
|
|
||||||
"tuf/validation",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "d6e1431feb32348e0650bf7551ac5cffd01d857b"
|
|
||||||
version = "v0.6.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:ab3259b9f5008a18ff8c1cc34623eccce354f3a9faf5b409983cd6717d64b40b"
|
|
||||||
name = "golang.org/x/crypto"
|
|
||||||
packages = [
|
|
||||||
"cast5",
|
|
||||||
"ed25519",
|
|
||||||
"ed25519/internal/edwards25519",
|
|
||||||
"openpgp",
|
|
||||||
"openpgp/armor",
|
|
||||||
"openpgp/clearsign",
|
|
||||||
"openpgp/elgamal",
|
|
||||||
"openpgp/errors",
|
|
||||||
"openpgp/packet",
|
|
||||||
"openpgp/s2k",
|
|
||||||
"pbkdf2",
|
|
||||||
"ssh/terminal",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "5f961cd492ac9d43fc33a8ef646bae79d113fd97"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:2a465dcd21dc1094bd90bc28adc168d5c12d4d754b49d67b34362d26bd5c21b2"
|
|
||||||
name = "golang.org/x/net"
|
|
||||||
packages = [
|
|
||||||
"context",
|
|
||||||
"context/ctxhttp",
|
|
||||||
"http2",
|
|
||||||
"http2/hpack",
|
|
||||||
"lex/httplex",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "075e191f18186a8ff2becaf64478e30f4545cdad"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:3d57c230f6800023b6fec274f38a139337b5fc0d00169a100a538eb3ef5e3da8"
|
|
||||||
name = "golang.org/x/oauth2"
|
|
||||||
packages = [
|
|
||||||
".",
|
|
||||||
"clientcredentials",
|
|
||||||
"internal",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "bb50c06baba3d0c76f9d125c0719093e315b5b44"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
digest = "1:f21f21efdd315b95a015ffd7ddca70ca60ff021848618b5a4efd88bb1603335f"
|
|
||||||
name = "golang.org/x/sys"
|
|
||||||
packages = ["unix"]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "571f7bbbe08da2a8955aed9d4db316e78630e9a3"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90"
|
|
||||||
name = "golang.org/x/time"
|
|
||||||
packages = ["rate"]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "9d24e82272b4f38b78bc8cff74fa936d31ccd8ef"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:52133d6859535332391e6193c8878d06347f28881111efa900392802485e9a18"
|
|
||||||
name = "google.golang.org/appengine"
|
|
||||||
packages = [
|
|
||||||
"internal",
|
|
||||||
"internal/base",
|
|
||||||
"internal/datastore",
|
|
||||||
"internal/log",
|
|
||||||
"internal/remote_api",
|
|
||||||
"internal/urlfetch",
|
|
||||||
"urlfetch",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "24e4144ec923c2374f6b06610c0df16a9222c3d9"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:79decf236a2000df456fe7478fd23da8af950563c922747b299e1fab7fa7d78f"
|
|
||||||
name = "gopkg.in/asn1-ber.v1"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "4e86f4367175e39f69d9358a5f17b4dda270378d"
|
|
||||||
version = "v1.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a"
|
|
||||||
name = "gopkg.in/inf.v0"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
|
|
||||||
version = "v0.9.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:79691acfc86fc3204928daf67e44955e8021ec5e10091599d344b0e16de32236"
|
|
||||||
name = "gopkg.in/ldap.v2"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "8168ee085ee43257585e50c6441aadf54ecb2c9f"
|
|
||||||
version = "v2.5.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:c0c30f47f9c16f227ba82f0bdfd14fa968453c30b7677a07903b3b4f34b98d49"
|
|
||||||
name = "gopkg.in/square/go-jose.v2"
|
|
||||||
packages = [
|
|
||||||
".",
|
|
||||||
"cipher",
|
|
||||||
"json",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "628223f44a71f715d2881ea69afc795a1e9c01be"
|
|
||||||
version = "v2.3.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:2a81c6e126d36ad027328cffaa4888fc3be40f09dc48028d1f93705b718130b9"
|
|
||||||
name = "gopkg.in/yaml.v2"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "7f97868eec74b32b0982dd158a51a446d1da7eb5"
|
|
||||||
version = "v2.1.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:7727a365529cdf6af394821dd990b046c56b8afac31e15e78fed58cf7bc179ad"
|
|
||||||
name = "k8s.io/api"
|
|
||||||
packages = [
|
|
||||||
"admissionregistration/v1alpha1",
|
|
||||||
"admissionregistration/v1beta1",
|
|
||||||
"apps/v1",
|
|
||||||
"apps/v1beta1",
|
|
||||||
"apps/v1beta2",
|
|
||||||
"authentication/v1",
|
|
||||||
"authentication/v1beta1",
|
|
||||||
"authorization/v1",
|
|
||||||
"authorization/v1beta1",
|
|
||||||
"autoscaling/v1",
|
|
||||||
"autoscaling/v2beta1",
|
|
||||||
"batch/v1",
|
|
||||||
"batch/v1beta1",
|
|
||||||
"batch/v2alpha1",
|
|
||||||
"certificates/v1beta1",
|
|
||||||
"core/v1",
|
|
||||||
"events/v1beta1",
|
|
||||||
"extensions/v1beta1",
|
|
||||||
"networking/v1",
|
|
||||||
"policy/v1beta1",
|
|
||||||
"rbac/v1",
|
|
||||||
"rbac/v1alpha1",
|
|
||||||
"rbac/v1beta1",
|
|
||||||
"scheduling/v1alpha1",
|
|
||||||
"scheduling/v1beta1",
|
|
||||||
"settings/v1alpha1",
|
|
||||||
"storage/v1",
|
|
||||||
"storage/v1alpha1",
|
|
||||||
"storage/v1beta1",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "5cb15d34447165a97c76ed5a60e4e99c8a01ecfe"
|
|
||||||
version = "kubernetes-1.13.4"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
digest = "1:d0d43cf61b49d2750351759e1d220134ab7731db608b6716dc4ed792a493027d"
|
|
||||||
name = "k8s.io/apimachinery"
|
|
||||||
packages = [
|
|
||||||
"pkg/api/errors",
|
|
||||||
"pkg/api/resource",
|
|
||||||
"pkg/apis/meta/v1",
|
|
||||||
"pkg/apis/meta/v1/unstructured",
|
|
||||||
"pkg/conversion",
|
|
||||||
"pkg/conversion/queryparams",
|
|
||||||
"pkg/fields",
|
|
||||||
"pkg/labels",
|
|
||||||
"pkg/runtime",
|
|
||||||
"pkg/runtime/schema",
|
|
||||||
"pkg/runtime/serializer",
|
|
||||||
"pkg/runtime/serializer/json",
|
|
||||||
"pkg/runtime/serializer/protobuf",
|
|
||||||
"pkg/runtime/serializer/recognizer",
|
|
||||||
"pkg/runtime/serializer/streaming",
|
|
||||||
"pkg/runtime/serializer/versioning",
|
|
||||||
"pkg/selection",
|
|
||||||
"pkg/types",
|
|
||||||
"pkg/util/clock",
|
|
||||||
"pkg/util/errors",
|
|
||||||
"pkg/util/framer",
|
|
||||||
"pkg/util/intstr",
|
|
||||||
"pkg/util/json",
|
|
||||||
"pkg/util/net",
|
|
||||||
"pkg/util/runtime",
|
|
||||||
"pkg/util/sets",
|
|
||||||
"pkg/util/validation",
|
|
||||||
"pkg/util/validation/field",
|
|
||||||
"pkg/util/wait",
|
|
||||||
"pkg/util/yaml",
|
|
||||||
"pkg/version",
|
|
||||||
"pkg/watch",
|
|
||||||
"third_party/forked/golang/reflect",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "f534d624797b270e5e46104dc7e2c2d61edbb85d"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:131682c26796b64f0abb77ac3d85525712706fde0b085aaa7b6d10b4398167cc"
|
|
||||||
name = "k8s.io/client-go"
|
|
||||||
packages = [
|
|
||||||
"kubernetes/scheme",
|
|
||||||
"pkg/apis/clientauthentication",
|
|
||||||
"pkg/apis/clientauthentication/v1alpha1",
|
|
||||||
"pkg/apis/clientauthentication/v1beta1",
|
|
||||||
"pkg/version",
|
|
||||||
"plugin/pkg/client/auth/exec",
|
|
||||||
"rest",
|
|
||||||
"rest/watch",
|
|
||||||
"tools/clientcmd/api",
|
|
||||||
"tools/metrics",
|
|
||||||
"transport",
|
|
||||||
"util/cert",
|
|
||||||
"util/connrotation",
|
|
||||||
"util/flowcontrol",
|
|
||||||
"util/homedir",
|
|
||||||
"util/integer",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65"
|
|
||||||
version = "v8.0.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
digest = "1:1076dbb6a69b965ccfda2a06a04e5038db78eff586f74b5daf4a41444e6f6077"
|
|
||||||
name = "k8s.io/helm"
|
|
||||||
packages = [
|
|
||||||
"cmd/helm/search",
|
|
||||||
"pkg/chartutil",
|
|
||||||
"pkg/getter",
|
|
||||||
"pkg/helm/environment",
|
|
||||||
"pkg/helm/helmpath",
|
|
||||||
"pkg/ignore",
|
|
||||||
"pkg/plugin",
|
|
||||||
"pkg/proto/hapi/chart",
|
|
||||||
"pkg/proto/hapi/version",
|
|
||||||
"pkg/provenance",
|
|
||||||
"pkg/repo",
|
|
||||||
"pkg/sympath",
|
|
||||||
"pkg/tlsutil",
|
|
||||||
"pkg/urlutil",
|
|
||||||
"pkg/version",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "20adb27c7c5868466912eebdf6664e7390ebe710"
|
|
||||||
version = "v2.9.1"
|
|
||||||
|
|
||||||
[solve-meta]
|
|
||||||
analyzer-name = "dep"
|
|
||||||
analyzer-version = 1
|
|
||||||
input-imports = [
|
|
||||||
"github.com/Masterminds/semver",
|
|
||||||
"github.com/astaxie/beego",
|
|
||||||
"github.com/astaxie/beego/cache",
|
|
||||||
"github.com/astaxie/beego/cache/redis",
|
|
||||||
"github.com/astaxie/beego/context",
|
|
||||||
"github.com/astaxie/beego/orm",
|
|
||||||
"github.com/astaxie/beego/session",
|
|
||||||
"github.com/astaxie/beego/session/redis",
|
|
||||||
"github.com/astaxie/beego/validation",
|
|
||||||
"github.com/beego/i18n",
|
|
||||||
"github.com/bmatcuk/doublestar",
|
|
||||||
"github.com/casbin/casbin",
|
|
||||||
"github.com/casbin/casbin/model",
|
|
||||||
"github.com/casbin/casbin/persist",
|
|
||||||
"github.com/casbin/casbin/util",
|
|
||||||
"github.com/coreos/go-oidc",
|
|
||||||
"github.com/dghubble/sling",
|
|
||||||
"github.com/dgrijalva/jwt-go",
|
|
||||||
"github.com/docker/distribution",
|
|
||||||
"github.com/docker/distribution/health",
|
|
||||||
"github.com/docker/distribution/manifest/manifestlist",
|
|
||||||
"github.com/docker/distribution/manifest/schema1",
|
|
||||||
"github.com/docker/distribution/manifest/schema2",
|
|
||||||
"github.com/docker/distribution/reference",
|
|
||||||
"github.com/docker/distribution/registry/auth/token",
|
|
||||||
"github.com/docker/distribution/registry/client/auth/challenge",
|
|
||||||
"github.com/docker/libtrust",
|
|
||||||
"github.com/garyburd/redigo/redis",
|
|
||||||
"github.com/ghodss/yaml",
|
|
||||||
"github.com/go-sql-driver/mysql",
|
|
||||||
"github.com/gocraft/work",
|
|
||||||
"github.com/golang-migrate/migrate",
|
|
||||||
"github.com/golang-migrate/migrate/database/postgres",
|
|
||||||
"github.com/golang-migrate/migrate/source/file",
|
|
||||||
"github.com/gomodule/redigo/redis",
|
|
||||||
"github.com/gorilla/handlers",
|
|
||||||
"github.com/gorilla/mux",
|
|
||||||
"github.com/lib/pq",
|
|
||||||
"github.com/opencontainers/go-digest",
|
|
||||||
"github.com/pkg/errors",
|
|
||||||
"github.com/robfig/cron",
|
|
||||||
"github.com/stretchr/testify/assert",
|
|
||||||
"github.com/stretchr/testify/mock",
|
|
||||||
"github.com/stretchr/testify/require",
|
|
||||||
"github.com/stretchr/testify/suite",
|
|
||||||
"github.com/theupdateframework/notary",
|
|
||||||
"github.com/theupdateframework/notary/client",
|
|
||||||
"github.com/theupdateframework/notary/trustpinning",
|
|
||||||
"github.com/theupdateframework/notary/tuf/data",
|
|
||||||
"golang.org/x/crypto/pbkdf2",
|
|
||||||
"golang.org/x/oauth2",
|
|
||||||
"golang.org/x/oauth2/clientcredentials",
|
|
||||||
"gopkg.in/ldap.v2",
|
|
||||||
"gopkg.in/yaml.v2",
|
|
||||||
"k8s.io/api/authentication/v1beta1",
|
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1",
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema",
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/serializer",
|
|
||||||
"k8s.io/client-go/kubernetes/scheme",
|
|
||||||
"k8s.io/client-go/rest",
|
|
||||||
"k8s.io/helm/cmd/helm/search",
|
|
||||||
"k8s.io/helm/pkg/chartutil",
|
|
||||||
"k8s.io/helm/pkg/proto/hapi/chart",
|
|
||||||
"k8s.io/helm/pkg/repo",
|
|
||||||
]
|
|
||||||
solver-name = "gps-cdcl"
|
|
||||||
solver-version = 1
|
|
137
src/Gopkg.toml
137
src/Gopkg.toml
@ -1,137 +0,0 @@
|
|||||||
# Gopkg.toml example
|
|
||||||
#
|
|
||||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
|
||||||
# for detailed Gopkg.toml documentation.
|
|
||||||
#
|
|
||||||
# required = ["github.com/user/thing/cmd/thing"]
|
|
||||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
|
||||||
#
|
|
||||||
# [[constraint]]
|
|
||||||
# name = "github.com/user/project"
|
|
||||||
# version = "1.0.0"
|
|
||||||
#
|
|
||||||
# [[constraint]]
|
|
||||||
# name = "github.com/user/project2"
|
|
||||||
# branch = "dev"
|
|
||||||
# source = "github.com/myfork/project2"
|
|
||||||
#
|
|
||||||
# [[override]]
|
|
||||||
# name = "github.com/x/y"
|
|
||||||
# version = "2.4.0"
|
|
||||||
ignored = ["github.com/goharbor/harbor/tests*"]
|
|
||||||
|
|
||||||
[prune]
|
|
||||||
go-tests = true
|
|
||||||
unused-packages = true
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/astaxie/beego"
|
|
||||||
version = "=1.9.0"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/casbin/casbin"
|
|
||||||
version = "=1.7.0"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/dghubble/sling"
|
|
||||||
version = "=1.1.0"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/dgrijalva/jwt-go"
|
|
||||||
version = "=3.0.0"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/docker/distribution"
|
|
||||||
version = "=2.7.1"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
branch = "master"
|
|
||||||
name = "github.com/docker/libtrust"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/go-sql-driver/mysql"
|
|
||||||
version = "=1.3.0"
|
|
||||||
|
|
||||||
[[override]]
|
|
||||||
name = "github.com/mattn/go-sqlite3"
|
|
||||||
version = "=1.6.0"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/opencontainers/go-digest"
|
|
||||||
version = "=1.0.0-rc0"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "gopkg.in/ldap.v2"
|
|
||||||
version = "=2.5.0"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/stretchr/testify"
|
|
||||||
version = "=1.3.0"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/gorilla/handlers"
|
|
||||||
version = "=1.3.0"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/gorilla/mux"
|
|
||||||
version = "=1.6.0"
|
|
||||||
|
|
||||||
[[override]]
|
|
||||||
name = "github.com/Sirupsen/logrus"
|
|
||||||
version = "=1.0.5"
|
|
||||||
|
|
||||||
[[override]]
|
|
||||||
name = "github.com/gorilla/context"
|
|
||||||
version = "=1.1"
|
|
||||||
|
|
||||||
[[override]]
|
|
||||||
name = "github.com/garyburd/redigo"
|
|
||||||
version = "=1.6.0"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/golang-migrate/migrate"
|
|
||||||
version = "=3.3.0"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "k8s.io/helm"
|
|
||||||
version = "2.9.1"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/ghodss/yaml"
|
|
||||||
version = "=1.0.0"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/Masterminds/semver"
|
|
||||||
version = "=1.4.2"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/gocraft/work"
|
|
||||||
version = "=0.5.1"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/robfig/cron"
|
|
||||||
version = "=1.0"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/coreos/go-oidc"
|
|
||||||
version = "=2.0.0"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "gopkg.in/yaml.v2"
|
|
||||||
version = "=2.1.1"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "k8s.io/api"
|
|
||||||
version = "kubernetes-1.13.4"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/bmatcuk/doublestar"
|
|
||||||
version = "=1.1.1"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/pkg/errors"
|
|
||||||
version = "=0.8.1"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/docker/notary"
|
|
||||||
version = "=0.6.1"
|
|
@ -1,16 +1,16 @@
|
|||||||
package chartserver
|
package chartserver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
commonhttp "github.com/goharbor/harbor/src/common/http"
|
|
||||||
hlog "github.com/goharbor/harbor/src/common/utils/log"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
commonhttp "github.com/goharbor/harbor/src/common/http"
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -49,11 +49,13 @@ func NewChartClient(credential *Credential) *ChartClient { // Create http client
|
|||||||
func (cc *ChartClient) GetContent(addr string) ([]byte, error) {
|
func (cc *ChartClient) GetContent(addr string) ([]byte, error) {
|
||||||
response, err := cc.sendRequest(addr, http.MethodGet, nil)
|
response, err := cc.sendRequest(addr, http.MethodGet, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
err = errors.Wrap(err, "get content failed")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
content, err := ioutil.ReadAll(response.Body)
|
content, err := ioutil.ReadAll(response.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
err = errors.Wrap(err, "Read response body error")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer response.Body.Close()
|
defer response.Body.Close()
|
||||||
@ -61,6 +63,7 @@ func (cc *ChartClient) GetContent(addr string) ([]byte, error) {
|
|||||||
if response.StatusCode != http.StatusOK {
|
if response.StatusCode != http.StatusOK {
|
||||||
text, err := extractError(content)
|
text, err := extractError(content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
err = errors.Wrap(err, "Extract content error failed")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return nil, &commonhttp.Error{
|
return nil, &commonhttp.Error{
|
||||||
@ -106,7 +109,8 @@ func (cc *ChartClient) sendRequest(addr string, method string, body io.Reader) (
|
|||||||
|
|
||||||
fullURI, err := url.Parse(addr)
|
fullURI, err := url.Parse(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid url: %s", err.Error())
|
err = errors.Wrap(err, "Invalid url")
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
request, err := http.NewRequest(method, addr, body)
|
request, err := http.NewRequest(method, addr, body)
|
||||||
@ -121,7 +125,7 @@ func (cc *ChartClient) sendRequest(addr string, method string, body io.Reader) (
|
|||||||
|
|
||||||
response, err := cc.httpClient.Do(request)
|
response, err := cc.httpClient.Do(request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hlog.Errorf("%s '%s' failed with error: %s", method, fullURI.Path, err)
|
err = errors.Wrap(err, fmt.Sprintf("send request %s %s failed", method, fullURI.Path))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
|
|
||||||
hlog "github.com/goharbor/harbor/src/common/utils/log"
|
hlog "github.com/goharbor/harbor/src/common/utils/log"
|
||||||
|
"github.com/justinas/alice"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -42,7 +43,7 @@ type Controller struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewController is constructor of the chartserver.Controller
|
// NewController is constructor of the chartserver.Controller
|
||||||
func NewController(backendServer *url.URL) (*Controller, error) {
|
func NewController(backendServer *url.URL, chains ...*alice.Chain) (*Controller, error) {
|
||||||
if backendServer == nil {
|
if backendServer == nil {
|
||||||
return nil, errors.New("failed to create chartserver.Controller: backend sever address is required")
|
return nil, errors.New("failed to create chartserver.Controller: backend sever address is required")
|
||||||
}
|
}
|
||||||
@ -68,7 +69,7 @@ func NewController(backendServer *url.URL) (*Controller, error) {
|
|||||||
return &Controller{
|
return &Controller{
|
||||||
backendServerAddress: backendServer,
|
backendServerAddress: backendServer,
|
||||||
// Use customized reverse proxy
|
// Use customized reverse proxy
|
||||||
trafficProxy: NewProxyEngine(backendServer, cred),
|
trafficProxy: NewProxyEngine(backendServer, cred, chains...),
|
||||||
// Initialize chart operator for use
|
// Initialize chart operator for use
|
||||||
chartOperator: &ChartOperator{},
|
chartOperator: &ChartOperator{},
|
||||||
// Create http client with customized timeouts
|
// Create http client with customized timeouts
|
||||||
|
@ -2,19 +2,20 @@ package chartserver
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/ghodss/yaml"
|
"github.com/ghodss/yaml"
|
||||||
|
commonhttp "github.com/goharbor/harbor/src/common/http"
|
||||||
|
"github.com/goharbor/harbor/src/common/utils/log"
|
||||||
"github.com/goharbor/harbor/src/replication"
|
"github.com/goharbor/harbor/src/replication"
|
||||||
rep_event "github.com/goharbor/harbor/src/replication/event"
|
rep_event "github.com/goharbor/harbor/src/replication/event"
|
||||||
"github.com/goharbor/harbor/src/replication/model"
|
"github.com/goharbor/harbor/src/replication/model"
|
||||||
|
"github.com/pkg/errors"
|
||||||
helm_repo "k8s.io/helm/pkg/repo"
|
helm_repo "k8s.io/helm/pkg/repo"
|
||||||
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/goharbor/harbor/src/common/utils/log"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ListCharts gets the chart list under the namespace
|
// ListCharts gets the chart list under the namespace
|
||||||
@ -68,11 +69,21 @@ func (c *Controller) DeleteChartVersion(namespace, chartName, version string) er
|
|||||||
return errors.New("invalid chart for deleting")
|
return errors.New("invalid chart for deleting")
|
||||||
}
|
}
|
||||||
|
|
||||||
url := fmt.Sprintf("%s/%s/%s", c.APIPrefix(namespace), chartName, version)
|
url := fmt.Sprintf("/api/chartrepo/%s/charts/%s/%s", namespace, chartName, version)
|
||||||
|
req, _ := http.NewRequest(http.MethodDelete, url, nil)
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
|
||||||
err := c.apiClient.DeleteContent(url)
|
c.trafficProxy.ServeHTTP(w, req)
|
||||||
if err != nil {
|
|
||||||
return err
|
if w.Code != http.StatusOK {
|
||||||
|
text, err := extractError(w.Body.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return &commonhttp.Error{
|
||||||
|
Code: w.Code,
|
||||||
|
Message: text,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// send notification to replication handler
|
// send notification to replication handler
|
||||||
|
@ -17,6 +17,7 @@ import (
|
|||||||
hlog "github.com/goharbor/harbor/src/common/utils/log"
|
hlog "github.com/goharbor/harbor/src/common/utils/log"
|
||||||
"github.com/goharbor/harbor/src/replication"
|
"github.com/goharbor/harbor/src/replication"
|
||||||
rep_event "github.com/goharbor/harbor/src/replication/event"
|
rep_event "github.com/goharbor/harbor/src/replication/event"
|
||||||
|
"github.com/justinas/alice"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -36,20 +37,29 @@ type ProxyEngine struct {
|
|||||||
backend *url.URL
|
backend *url.URL
|
||||||
|
|
||||||
// Use go reverse proxy as engine
|
// Use go reverse proxy as engine
|
||||||
engine *httputil.ReverseProxy
|
engine http.Handler
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewProxyEngine is constructor of NewProxyEngine
|
// NewProxyEngine is constructor of NewProxyEngine
|
||||||
func NewProxyEngine(target *url.URL, cred *Credential) *ProxyEngine {
|
func NewProxyEngine(target *url.URL, cred *Credential, chains ...*alice.Chain) *ProxyEngine {
|
||||||
|
var engine http.Handler
|
||||||
|
|
||||||
|
engine = &httputil.ReverseProxy{
|
||||||
|
ErrorLog: log.New(os.Stdout, "", log.Ldate|log.Ltime|log.Lshortfile),
|
||||||
|
Director: func(req *http.Request) {
|
||||||
|
director(target, cred, req)
|
||||||
|
},
|
||||||
|
ModifyResponse: modifyResponse,
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(chains) > 0 {
|
||||||
|
hlog.Info("New chart server traffic proxy with middlewares")
|
||||||
|
engine = chains[0].Then(engine)
|
||||||
|
}
|
||||||
|
|
||||||
return &ProxyEngine{
|
return &ProxyEngine{
|
||||||
backend: target,
|
backend: target,
|
||||||
engine: &httputil.ReverseProxy{
|
engine: engine,
|
||||||
ErrorLog: log.New(os.Stdout, "", log.Ldate|log.Ltime|log.Lshortfile),
|
|
||||||
Director: func(req *http.Request) {
|
|
||||||
director(target, cred, req)
|
|
||||||
},
|
|
||||||
ModifyResponse: modifyResponse,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,12 +20,11 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/astaxie/beego"
|
||||||
"github.com/astaxie/beego/validation"
|
"github.com/astaxie/beego/validation"
|
||||||
commonhttp "github.com/goharbor/harbor/src/common/http"
|
commonhttp "github.com/goharbor/harbor/src/common/http"
|
||||||
"github.com/goharbor/harbor/src/common/utils/log"
|
"github.com/goharbor/harbor/src/common/utils/log"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"errors"
|
|
||||||
"github.com/astaxie/beego"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -210,12 +210,14 @@ func (c *CfgManager) GetDatabaseCfg() *models.Database {
|
|||||||
return &models.Database{
|
return &models.Database{
|
||||||
Type: c.Get(common.DatabaseType).GetString(),
|
Type: c.Get(common.DatabaseType).GetString(),
|
||||||
PostGreSQL: &models.PostGreSQL{
|
PostGreSQL: &models.PostGreSQL{
|
||||||
Host: c.Get(common.PostGreSQLHOST).GetString(),
|
Host: c.Get(common.PostGreSQLHOST).GetString(),
|
||||||
Port: c.Get(common.PostGreSQLPort).GetInt(),
|
Port: c.Get(common.PostGreSQLPort).GetInt(),
|
||||||
Username: c.Get(common.PostGreSQLUsername).GetString(),
|
Username: c.Get(common.PostGreSQLUsername).GetString(),
|
||||||
Password: c.Get(common.PostGreSQLPassword).GetString(),
|
Password: c.Get(common.PostGreSQLPassword).GetString(),
|
||||||
Database: c.Get(common.PostGreSQLDatabase).GetString(),
|
Database: c.Get(common.PostGreSQLDatabase).GetString(),
|
||||||
SSLMode: c.Get(common.PostGreSQLSSLMode).GetString(),
|
SSLMode: c.Get(common.PostGreSQLSSLMode).GetString(),
|
||||||
|
MaxIdleConns: c.Get(common.PostGreSQLMaxIdleConns).GetInt(),
|
||||||
|
MaxOpenConns: c.Get(common.PostGreSQLMaxOpenConns).GetInt(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -47,6 +47,7 @@ const (
|
|||||||
HTTPAuthGroup = "http_auth"
|
HTTPAuthGroup = "http_auth"
|
||||||
OIDCGroup = "oidc"
|
OIDCGroup = "oidc"
|
||||||
DatabaseGroup = "database"
|
DatabaseGroup = "database"
|
||||||
|
QuotaGroup = "quota"
|
||||||
// Put all config items do not belong a existing group into basic
|
// Put all config items do not belong a existing group into basic
|
||||||
BasicGroup = "basic"
|
BasicGroup = "basic"
|
||||||
ClairGroup = "clair"
|
ClairGroup = "clair"
|
||||||
@ -74,6 +75,7 @@ var (
|
|||||||
{Name: common.ClairURL, Scope: SystemScope, Group: ClairGroup, EnvKey: "CLAIR_URL", DefaultValue: "http://clair:6060", ItemType: &StringType{}, Editable: false},
|
{Name: common.ClairURL, Scope: SystemScope, Group: ClairGroup, EnvKey: "CLAIR_URL", DefaultValue: "http://clair:6060", ItemType: &StringType{}, Editable: false},
|
||||||
|
|
||||||
{Name: common.CoreURL, Scope: SystemScope, Group: BasicGroup, EnvKey: "CORE_URL", DefaultValue: "http://core:8080", ItemType: &StringType{}, Editable: false},
|
{Name: common.CoreURL, Scope: SystemScope, Group: BasicGroup, EnvKey: "CORE_URL", DefaultValue: "http://core:8080", ItemType: &StringType{}, Editable: false},
|
||||||
|
{Name: common.CoreLocalURL, Scope: SystemScope, Group: BasicGroup, EnvKey: "CORE_LOCAL_URL", DefaultValue: "http://127.0.0.1:8080", ItemType: &StringType{}, Editable: false},
|
||||||
{Name: common.DatabaseType, Scope: SystemScope, Group: BasicGroup, EnvKey: "DATABASE_TYPE", DefaultValue: "postgresql", ItemType: &StringType{}, Editable: false},
|
{Name: common.DatabaseType, Scope: SystemScope, Group: BasicGroup, EnvKey: "DATABASE_TYPE", DefaultValue: "postgresql", ItemType: &StringType{}, Editable: false},
|
||||||
|
|
||||||
{Name: common.EmailFrom, Scope: UserScope, Group: EmailGroup, EnvKey: "EMAIL_FROM", DefaultValue: "admin <sample_admin@mydomain.com>", ItemType: &StringType{}, Editable: false},
|
{Name: common.EmailFrom, Scope: UserScope, Group: EmailGroup, EnvKey: "EMAIL_FROM", DefaultValue: "admin <sample_admin@mydomain.com>", ItemType: &StringType{}, Editable: false},
|
||||||
@ -91,7 +93,7 @@ var (
|
|||||||
{Name: common.LDAPBaseDN, Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_BASE_DN", DefaultValue: "", ItemType: &NonEmptyStringType{}, Editable: false},
|
{Name: common.LDAPBaseDN, Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_BASE_DN", DefaultValue: "", ItemType: &NonEmptyStringType{}, Editable: false},
|
||||||
{Name: common.LDAPFilter, Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_FILTER", DefaultValue: "", ItemType: &StringType{}, Editable: false},
|
{Name: common.LDAPFilter, Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_FILTER", DefaultValue: "", ItemType: &StringType{}, Editable: false},
|
||||||
{Name: common.LDAPGroupBaseDN, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_BASE_DN", DefaultValue: "", ItemType: &StringType{}, Editable: false},
|
{Name: common.LDAPGroupBaseDN, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_BASE_DN", DefaultValue: "", ItemType: &StringType{}, Editable: false},
|
||||||
{Name: common.LdapGroupAdminDn, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_ADMIN_DN", DefaultValue: "", ItemType: &StringType{}, Editable: false},
|
{Name: common.LDAPGroupAdminDn, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_ADMIN_DN", DefaultValue: "", ItemType: &StringType{}, Editable: false},
|
||||||
{Name: common.LDAPGroupAttributeName, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_GID", DefaultValue: "", ItemType: &StringType{}, Editable: false},
|
{Name: common.LDAPGroupAttributeName, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_GID", DefaultValue: "", ItemType: &StringType{}, Editable: false},
|
||||||
{Name: common.LDAPGroupSearchFilter, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_FILTER", DefaultValue: "", ItemType: &StringType{}, Editable: false},
|
{Name: common.LDAPGroupSearchFilter, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_FILTER", DefaultValue: "", ItemType: &StringType{}, Editable: false},
|
||||||
{Name: common.LDAPGroupSearchScope, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_SCOPE", DefaultValue: "2", ItemType: &LdapScopeType{}, Editable: false},
|
{Name: common.LDAPGroupSearchScope, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_SCOPE", DefaultValue: "2", ItemType: &LdapScopeType{}, Editable: false},
|
||||||
@ -114,6 +116,8 @@ var (
|
|||||||
{Name: common.PostGreSQLPort, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_PORT", DefaultValue: "5432", ItemType: &PortType{}, Editable: false},
|
{Name: common.PostGreSQLPort, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_PORT", DefaultValue: "5432", ItemType: &PortType{}, Editable: false},
|
||||||
{Name: common.PostGreSQLSSLMode, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_SSLMODE", DefaultValue: "disable", ItemType: &StringType{}, Editable: false},
|
{Name: common.PostGreSQLSSLMode, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_SSLMODE", DefaultValue: "disable", ItemType: &StringType{}, Editable: false},
|
||||||
{Name: common.PostGreSQLUsername, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_USERNAME", DefaultValue: "postgres", ItemType: &StringType{}, Editable: false},
|
{Name: common.PostGreSQLUsername, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_USERNAME", DefaultValue: "postgres", ItemType: &StringType{}, Editable: false},
|
||||||
|
{Name: common.PostGreSQLMaxIdleConns, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_MAX_IDLE_CONNS", DefaultValue: "2", ItemType: &IntType{}, Editable: false},
|
||||||
|
{Name: common.PostGreSQLMaxOpenConns, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_MAX_OPEN_CONNS", DefaultValue: "0", ItemType: &IntType{}, Editable: false},
|
||||||
|
|
||||||
{Name: common.ProjectCreationRestriction, Scope: UserScope, Group: BasicGroup, EnvKey: "PROJECT_CREATION_RESTRICTION", DefaultValue: common.ProCrtRestrEveryone, ItemType: &ProjectCreationRestrictionType{}, Editable: false},
|
{Name: common.ProjectCreationRestriction, Scope: UserScope, Group: BasicGroup, EnvKey: "PROJECT_CREATION_RESTRICTION", DefaultValue: common.ProCrtRestrEveryone, ItemType: &ProjectCreationRestrictionType{}, Editable: false},
|
||||||
{Name: common.ReadOnly, Scope: UserScope, Group: BasicGroup, EnvKey: "READ_ONLY", DefaultValue: "false", ItemType: &BoolType{}, Editable: false},
|
{Name: common.ReadOnly, Scope: UserScope, Group: BasicGroup, EnvKey: "READ_ONLY", DefaultValue: "false", ItemType: &BoolType{}, Editable: false},
|
||||||
@ -133,7 +137,7 @@ var (
|
|||||||
{Name: common.HTTPAuthProxyEndpoint, Scope: UserScope, Group: HTTPAuthGroup, ItemType: &StringType{}},
|
{Name: common.HTTPAuthProxyEndpoint, Scope: UserScope, Group: HTTPAuthGroup, ItemType: &StringType{}},
|
||||||
{Name: common.HTTPAuthProxyTokenReviewEndpoint, Scope: UserScope, Group: HTTPAuthGroup, ItemType: &StringType{}},
|
{Name: common.HTTPAuthProxyTokenReviewEndpoint, Scope: UserScope, Group: HTTPAuthGroup, ItemType: &StringType{}},
|
||||||
{Name: common.HTTPAuthProxyVerifyCert, Scope: UserScope, Group: HTTPAuthGroup, DefaultValue: "true", ItemType: &BoolType{}},
|
{Name: common.HTTPAuthProxyVerifyCert, Scope: UserScope, Group: HTTPAuthGroup, DefaultValue: "true", ItemType: &BoolType{}},
|
||||||
{Name: common.HTTPAuthProxyAlwaysOnboard, Scope: UserScope, Group: HTTPAuthGroup, DefaultValue: "false", ItemType: &BoolType{}},
|
{Name: common.HTTPAuthProxySkipSearch, Scope: UserScope, Group: HTTPAuthGroup, DefaultValue: "false", ItemType: &BoolType{}},
|
||||||
|
|
||||||
{Name: common.OIDCName, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}},
|
{Name: common.OIDCName, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}},
|
||||||
{Name: common.OIDCEndpoint, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}},
|
{Name: common.OIDCEndpoint, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}},
|
||||||
@ -147,5 +151,10 @@ var (
|
|||||||
{Name: common.WithNotary, Scope: SystemScope, Group: BasicGroup, EnvKey: "WITH_NOTARY", DefaultValue: "false", ItemType: &BoolType{}, Editable: true},
|
{Name: common.WithNotary, Scope: SystemScope, Group: BasicGroup, EnvKey: "WITH_NOTARY", DefaultValue: "false", ItemType: &BoolType{}, Editable: true},
|
||||||
// the unit of expiration is minute, 43200 minutes = 30 days
|
// the unit of expiration is minute, 43200 minutes = 30 days
|
||||||
{Name: common.RobotTokenDuration, Scope: UserScope, Group: BasicGroup, EnvKey: "ROBOT_TOKEN_DURATION", DefaultValue: "43200", ItemType: &IntType{}, Editable: true},
|
{Name: common.RobotTokenDuration, Scope: UserScope, Group: BasicGroup, EnvKey: "ROBOT_TOKEN_DURATION", DefaultValue: "43200", ItemType: &IntType{}, Editable: true},
|
||||||
|
{Name: common.NotificationEnable, Scope: UserScope, Group: BasicGroup, EnvKey: "NOTIFICATION_ENABLE", DefaultValue: "true", ItemType: &BoolType{}, Editable: true},
|
||||||
|
|
||||||
|
{Name: common.QuotaPerProjectEnable, Scope: UserScope, Group: QuotaGroup, EnvKey: "QUOTA_PER_PROJECT_ENABLE", DefaultValue: "true", ItemType: &BoolType{}, Editable: true},
|
||||||
|
{Name: common.CountPerProject, Scope: UserScope, Group: QuotaGroup, EnvKey: "COUNT_PER_PROJECT", DefaultValue: "-1", ItemType: &QuotaType{}, Editable: true},
|
||||||
|
{Name: common.StoragePerProject, Scope: UserScope, Group: QuotaGroup, EnvKey: "STORAGE_PER_PROJECT", DefaultValue: "-1", ItemType: &QuotaType{}, Editable: true},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
@ -18,9 +18,10 @@ package metadata
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/goharbor/harbor/src/common"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/goharbor/harbor/src/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Type - Use this interface to define and encapsulate the behavior of validation and transformation
|
// Type - Use this interface to define and encapsulate the behavior of validation and transformation
|
||||||
@ -186,3 +187,21 @@ func (t *MapType) get(str string) (interface{}, error) {
|
|||||||
err := json.Unmarshal([]byte(str), &result)
|
err := json.Unmarshal([]byte(str), &result)
|
||||||
return result, err
|
return result, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// QuotaType ...
|
||||||
|
type QuotaType struct {
|
||||||
|
Int64Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *QuotaType) validate(str string) error {
|
||||||
|
val, err := strconv.ParseInt(str, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if val <= 0 && val != -1 {
|
||||||
|
return fmt.Errorf("quota value should be -1 or great than zero")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -40,7 +40,7 @@ func (d *Database) Load() (map[string]interface{}, error) {
|
|||||||
|
|
||||||
itemMetadata, ok := metadata.Instance().GetByName(item.Key)
|
itemMetadata, ok := metadata.Instance().GetByName(item.Key)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Warningf("failed to get metadata, key:%v, error:%v, skip to load item", item.Key, err)
|
log.Debugf("failed to get metadata, key:%v, error:%v, skip to load item", item.Key, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if itemMetadata.Scope == metadata.SystemScope {
|
if itemMetadata.Scope == metadata.SystemScope {
|
||||||
|
18
src/common/const.go
Normal file → Executable file
18
src/common/const.go
Normal file → Executable file
@ -53,8 +53,11 @@ const (
|
|||||||
PostGreSQLPassword = "postgresql_password"
|
PostGreSQLPassword = "postgresql_password"
|
||||||
PostGreSQLDatabase = "postgresql_database"
|
PostGreSQLDatabase = "postgresql_database"
|
||||||
PostGreSQLSSLMode = "postgresql_sslmode"
|
PostGreSQLSSLMode = "postgresql_sslmode"
|
||||||
|
PostGreSQLMaxIdleConns = "postgresql_max_idle_conns"
|
||||||
|
PostGreSQLMaxOpenConns = "postgresql_max_open_conns"
|
||||||
SelfRegistration = "self_registration"
|
SelfRegistration = "self_registration"
|
||||||
CoreURL = "core_url"
|
CoreURL = "core_url"
|
||||||
|
CoreLocalURL = "core_local_url"
|
||||||
JobServiceURL = "jobservice_url"
|
JobServiceURL = "jobservice_url"
|
||||||
LDAPURL = "ldap_url"
|
LDAPURL = "ldap_url"
|
||||||
LDAPSearchDN = "ldap_search_dn"
|
LDAPSearchDN = "ldap_search_dn"
|
||||||
@ -100,7 +103,7 @@ const (
|
|||||||
HTTPAuthProxyEndpoint = "http_authproxy_endpoint"
|
HTTPAuthProxyEndpoint = "http_authproxy_endpoint"
|
||||||
HTTPAuthProxyTokenReviewEndpoint = "http_authproxy_tokenreview_endpoint"
|
HTTPAuthProxyTokenReviewEndpoint = "http_authproxy_tokenreview_endpoint"
|
||||||
HTTPAuthProxyVerifyCert = "http_authproxy_verify_cert"
|
HTTPAuthProxyVerifyCert = "http_authproxy_verify_cert"
|
||||||
HTTPAuthProxyAlwaysOnboard = "http_authproxy_always_onboard"
|
HTTPAuthProxySkipSearch = "http_authproxy_skip_search"
|
||||||
OIDCName = "oidc_name"
|
OIDCName = "oidc_name"
|
||||||
OIDCEndpoint = "oidc_endpoint"
|
OIDCEndpoint = "oidc_endpoint"
|
||||||
OIDCCLientID = "oidc_client_id"
|
OIDCCLientID = "oidc_client_id"
|
||||||
@ -120,8 +123,9 @@ const (
|
|||||||
NotaryURL = "notary_url"
|
NotaryURL = "notary_url"
|
||||||
DefaultCoreEndpoint = "http://core:8080"
|
DefaultCoreEndpoint = "http://core:8080"
|
||||||
DefaultNotaryEndpoint = "http://notary-server:4443"
|
DefaultNotaryEndpoint = "http://notary-server:4443"
|
||||||
LdapGroupType = 1
|
LDAPGroupType = 1
|
||||||
LdapGroupAdminDn = "ldap_group_admin_dn"
|
HTTPGroupType = 2
|
||||||
|
LDAPGroupAdminDn = "ldap_group_admin_dn"
|
||||||
LDAPGroupMembershipAttribute = "ldap_group_membership_attribute"
|
LDAPGroupMembershipAttribute = "ldap_group_membership_attribute"
|
||||||
DefaultRegistryControllerEndpoint = "http://registryctl:8080"
|
DefaultRegistryControllerEndpoint = "http://registryctl:8080"
|
||||||
WithChartMuseum = "with_chartmuseum"
|
WithChartMuseum = "with_chartmuseum"
|
||||||
@ -141,4 +145,12 @@ const (
|
|||||||
OIDCLoginPath = "/c/oidc/login"
|
OIDCLoginPath = "/c/oidc/login"
|
||||||
|
|
||||||
ChartUploadCtxKey = contextKey("chart_upload_event")
|
ChartUploadCtxKey = contextKey("chart_upload_event")
|
||||||
|
|
||||||
|
// Global notification enable configuration
|
||||||
|
NotificationEnable = "notification_enable"
|
||||||
|
|
||||||
|
// Quota setting items for project
|
||||||
|
QuotaPerProjectEnable = "quota_per_project_enable"
|
||||||
|
CountPerProject = "count_per_project"
|
||||||
|
StoragePerProject = "storage_per_project"
|
||||||
)
|
)
|
||||||
|
142
src/common/dao/artifact.go
Normal file
142
src/common/dao/artifact.go
Normal file
@ -0,0 +1,142 @@
|
|||||||
|
// Copyright Project Harbor Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package dao
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/astaxie/beego/orm"
|
||||||
|
"github.com/goharbor/harbor/src/common/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AddArtifact ...
|
||||||
|
func AddArtifact(af *models.Artifact) (int64, error) {
|
||||||
|
now := time.Now()
|
||||||
|
af.CreationTime = now
|
||||||
|
af.PushTime = now
|
||||||
|
|
||||||
|
id, err := GetOrmer().Insert(af)
|
||||||
|
if err != nil {
|
||||||
|
if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
|
||||||
|
return 0, ErrDupRows
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return id, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateArtifact ...
|
||||||
|
func UpdateArtifact(af *models.Artifact) error {
|
||||||
|
_, err := GetOrmer().Update(af)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateArtifactDigest ...
|
||||||
|
func UpdateArtifactDigest(af *models.Artifact) error {
|
||||||
|
_, err := GetOrmer().Update(af, "digest")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateArtifactPullTime updates the pull time of the artifact.
|
||||||
|
func UpdateArtifactPullTime(af *models.Artifact) error {
|
||||||
|
_, err := GetOrmer().Update(af, "pull_time")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteArtifact ...
|
||||||
|
func DeleteArtifact(id int64) error {
|
||||||
|
|
||||||
|
_, err := GetOrmer().QueryTable(&models.Artifact{}).Filter("ID", id).Delete()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteArtifactByDigest ...
|
||||||
|
func DeleteArtifactByDigest(projectID int64, repo, digest string) error {
|
||||||
|
_, err := GetOrmer().Raw(`delete from artifact where project_id = ? and repo = ? and digest = ? `,
|
||||||
|
projectID, repo, digest).Exec()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteArtifactByTag ...
|
||||||
|
func DeleteArtifactByTag(projectID int64, repo, tag string) error {
|
||||||
|
_, err := GetOrmer().Raw(`delete from artifact where project_id = ? and repo = ? and tag = ? `,
|
||||||
|
projectID, repo, tag).Exec()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListArtifacts list artifacts according to the query conditions
|
||||||
|
func ListArtifacts(query *models.ArtifactQuery) ([]*models.Artifact, error) {
|
||||||
|
qs := getArtifactQuerySetter(query)
|
||||||
|
if query.Size > 0 {
|
||||||
|
qs = qs.Limit(query.Size)
|
||||||
|
if query.Page > 0 {
|
||||||
|
qs = qs.Offset((query.Page - 1) * query.Size)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
afs := []*models.Artifact{}
|
||||||
|
_, err := qs.All(&afs)
|
||||||
|
return afs, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetArtifact by repository and tag
|
||||||
|
func GetArtifact(repo, tag string) (*models.Artifact, error) {
|
||||||
|
artifact := &models.Artifact{}
|
||||||
|
err := GetOrmer().QueryTable(&models.Artifact{}).
|
||||||
|
Filter("Repo", repo).
|
||||||
|
Filter("Tag", tag).One(artifact)
|
||||||
|
if err != nil {
|
||||||
|
if err == orm.ErrNoRows {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return artifact, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTotalOfArtifacts returns total of artifacts
|
||||||
|
func GetTotalOfArtifacts(query ...*models.ArtifactQuery) (int64, error) {
|
||||||
|
var qs orm.QuerySeter
|
||||||
|
if len(query) > 0 {
|
||||||
|
qs = getArtifactQuerySetter(query[0])
|
||||||
|
} else {
|
||||||
|
qs = GetOrmer().QueryTable(&models.Artifact{})
|
||||||
|
}
|
||||||
|
|
||||||
|
return qs.Count()
|
||||||
|
}
|
||||||
|
|
||||||
|
func getArtifactQuerySetter(query *models.ArtifactQuery) orm.QuerySeter {
|
||||||
|
qs := GetOrmer().QueryTable(&models.Artifact{})
|
||||||
|
if query.PID != 0 {
|
||||||
|
qs = qs.Filter("PID", query.PID)
|
||||||
|
}
|
||||||
|
if len(query.Repo) > 0 {
|
||||||
|
qs = qs.Filter("Repo", query.Repo)
|
||||||
|
}
|
||||||
|
if len(query.Tag) > 0 {
|
||||||
|
qs = qs.Filter("Tag", query.Tag)
|
||||||
|
}
|
||||||
|
if len(query.Digest) > 0 {
|
||||||
|
qs = qs.Filter("Digest", query.Digest)
|
||||||
|
}
|
||||||
|
return qs
|
||||||
|
}
|
110
src/common/dao/artifact_blob.go
Normal file
110
src/common/dao/artifact_blob.go
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
// Copyright Project Harbor Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package dao
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/astaxie/beego/orm"
|
||||||
|
"github.com/goharbor/harbor/src/common/models"
|
||||||
|
"github.com/goharbor/harbor/src/common/utils/log"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AddArtifactNBlob ...
|
||||||
|
func AddArtifactNBlob(afnb *models.ArtifactAndBlob) (int64, error) {
|
||||||
|
now := time.Now()
|
||||||
|
afnb.CreationTime = now
|
||||||
|
id, err := GetOrmer().Insert(afnb)
|
||||||
|
if err != nil {
|
||||||
|
if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
|
||||||
|
return 0, ErrDupRows
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return id, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddArtifactNBlobs ...
|
||||||
|
func AddArtifactNBlobs(afnbs []*models.ArtifactAndBlob) error {
|
||||||
|
o := orm.NewOrm()
|
||||||
|
err := o.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var errInsertMultiple error
|
||||||
|
total := len(afnbs)
|
||||||
|
successNums, err := o.InsertMulti(total, afnbs)
|
||||||
|
if err != nil {
|
||||||
|
errInsertMultiple = err
|
||||||
|
if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
|
||||||
|
errInsertMultiple = errors.Wrap(errInsertMultiple, ErrDupRows.Error())
|
||||||
|
}
|
||||||
|
err := o.Rollback()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("fail to rollback when to insert multiple artifact and blobs, %v", err)
|
||||||
|
errInsertMultiple = errors.Wrap(errInsertMultiple, err.Error())
|
||||||
|
}
|
||||||
|
return errInsertMultiple
|
||||||
|
}
|
||||||
|
|
||||||
|
// part of them cannot be inserted successfully.
|
||||||
|
if successNums != int64(total) {
|
||||||
|
errInsertMultiple = errors.New("Not all of artifact and blobs are inserted successfully")
|
||||||
|
err := o.Rollback()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("fail to rollback when to insert multiple artifact and blobs, %v", err)
|
||||||
|
errInsertMultiple = errors.Wrap(errInsertMultiple, err.Error())
|
||||||
|
}
|
||||||
|
return errInsertMultiple
|
||||||
|
}
|
||||||
|
|
||||||
|
err = o.Commit()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("fail to commit when to insert multiple artifact and blobs, %v", err)
|
||||||
|
return fmt.Errorf("fail to commit when to insert multiple artifact and blobs, %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteArtifactAndBlobByDigest ...
|
||||||
|
func DeleteArtifactAndBlobByDigest(digest string) error {
|
||||||
|
_, err := GetOrmer().Raw(`delete from artifact_blob where digest_af = ? `, digest).Exec()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountSizeOfArtifact ...
|
||||||
|
func CountSizeOfArtifact(digest string) (int64, error) {
|
||||||
|
var res []orm.Params
|
||||||
|
num, err := GetOrmer().Raw(`SELECT sum(bb.size) FROM artifact_blob afnb LEFT JOIN blob bb ON afnb.digest_blob = bb.digest WHERE afnb.digest_af = ? `, digest).Values(&res)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
if num > 0 {
|
||||||
|
size, err := strconv.ParseInt(res[0]["sum"].(string), 0, 64)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
return size, nil
|
||||||
|
}
|
||||||
|
return -1, err
|
||||||
|
}
|
131
src/common/dao/artifact_blob_test.go
Normal file
131
src/common/dao/artifact_blob_test.go
Normal file
@ -0,0 +1,131 @@
|
|||||||
|
// Copyright Project Harbor Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package dao
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/goharbor/harbor/src/common/models"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAddArtifactNBlob(t *testing.T) {
|
||||||
|
afnb := &models.ArtifactAndBlob{
|
||||||
|
DigestAF: "vvvv",
|
||||||
|
DigestBlob: "aaaa",
|
||||||
|
}
|
||||||
|
|
||||||
|
// add
|
||||||
|
id, err := AddArtifactNBlob(afnb)
|
||||||
|
require.Nil(t, err)
|
||||||
|
afnb.ID = id
|
||||||
|
assert.Equal(t, id, int64(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddArtifactNBlobs(t *testing.T) {
|
||||||
|
afnb1 := &models.ArtifactAndBlob{
|
||||||
|
DigestAF: "zzzz",
|
||||||
|
DigestBlob: "zzza",
|
||||||
|
}
|
||||||
|
afnb2 := &models.ArtifactAndBlob{
|
||||||
|
DigestAF: "zzzz",
|
||||||
|
DigestBlob: "zzzb",
|
||||||
|
}
|
||||||
|
afnb3 := &models.ArtifactAndBlob{
|
||||||
|
DigestAF: "zzzz",
|
||||||
|
DigestBlob: "zzzc",
|
||||||
|
}
|
||||||
|
|
||||||
|
var afnbs []*models.ArtifactAndBlob
|
||||||
|
afnbs = append(afnbs, afnb1)
|
||||||
|
afnbs = append(afnbs, afnb2)
|
||||||
|
afnbs = append(afnbs, afnb3)
|
||||||
|
|
||||||
|
// add
|
||||||
|
err := AddArtifactNBlobs(afnbs)
|
||||||
|
require.Nil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeleteArtifactAndBlobByDigest(t *testing.T) {
|
||||||
|
afnb := &models.ArtifactAndBlob{
|
||||||
|
DigestAF: "vvvv",
|
||||||
|
DigestBlob: "vvva",
|
||||||
|
}
|
||||||
|
|
||||||
|
// add
|
||||||
|
_, err := AddArtifactNBlob(afnb)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
// delete
|
||||||
|
err = DeleteArtifactAndBlobByDigest(afnb.DigestAF)
|
||||||
|
require.Nil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCountSizeOfArtifact(t *testing.T) {
|
||||||
|
|
||||||
|
afnb1 := &models.ArtifactAndBlob{
|
||||||
|
DigestAF: "xxxx",
|
||||||
|
DigestBlob: "aaaa",
|
||||||
|
}
|
||||||
|
afnb2 := &models.ArtifactAndBlob{
|
||||||
|
DigestAF: "xxxx",
|
||||||
|
DigestBlob: "aaab",
|
||||||
|
}
|
||||||
|
afnb3 := &models.ArtifactAndBlob{
|
||||||
|
DigestAF: "xxxx",
|
||||||
|
DigestBlob: "aaac",
|
||||||
|
}
|
||||||
|
|
||||||
|
var afnbs []*models.ArtifactAndBlob
|
||||||
|
afnbs = append(afnbs, afnb1)
|
||||||
|
afnbs = append(afnbs, afnb2)
|
||||||
|
afnbs = append(afnbs, afnb3)
|
||||||
|
|
||||||
|
err := AddArtifactNBlobs(afnbs)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
blob1 := &models.Blob{
|
||||||
|
Digest: "aaaa",
|
||||||
|
ContentType: "v2.blob",
|
||||||
|
Size: 100,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = AddBlob(blob1)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
blob2 := &models.Blob{
|
||||||
|
Digest: "aaab",
|
||||||
|
ContentType: "v2.blob",
|
||||||
|
Size: 200,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = AddBlob(blob2)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
blob3 := &models.Blob{
|
||||||
|
Digest: "aaac",
|
||||||
|
ContentType: "v2.blob",
|
||||||
|
Size: 300,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = AddBlob(blob3)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
imageSize, err := CountSizeOfArtifact("xxxx")
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, imageSize, int64(600))
|
||||||
|
}
|
184
src/common/dao/artifact_test.go
Normal file
184
src/common/dao/artifact_test.go
Normal file
@ -0,0 +1,184 @@
|
|||||||
|
// Copyright Project Harbor Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package dao
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/goharbor/harbor/src/common/models"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAddArtifact(t *testing.T) {
|
||||||
|
af := &models.Artifact{
|
||||||
|
PID: 1,
|
||||||
|
Repo: "hello-world",
|
||||||
|
Tag: "latest",
|
||||||
|
Digest: "1234abcd",
|
||||||
|
Kind: "image",
|
||||||
|
}
|
||||||
|
|
||||||
|
// add
|
||||||
|
id, err := AddArtifact(af)
|
||||||
|
require.Nil(t, err)
|
||||||
|
af.ID = id
|
||||||
|
assert.Equal(t, id, int64(1))
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetArtifact(t *testing.T) {
|
||||||
|
repo := "hello-world"
|
||||||
|
tag := "latest"
|
||||||
|
artifact, err := GetArtifact(repo, tag)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.NotNil(t, artifact)
|
||||||
|
assert.Equal(t, repo, artifact.Repo)
|
||||||
|
assert.Equal(t, tag, artifact.Tag)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateArtifactDigest(t *testing.T) {
|
||||||
|
af := &models.Artifact{
|
||||||
|
PID: 1,
|
||||||
|
Repo: "hello-world",
|
||||||
|
Tag: "v2.0",
|
||||||
|
Digest: "4321abcd",
|
||||||
|
Kind: "image",
|
||||||
|
}
|
||||||
|
|
||||||
|
// add
|
||||||
|
_, err := AddArtifact(af)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
af.Digest = "update_4321abcd"
|
||||||
|
require.Nil(t, UpdateArtifactDigest(af))
|
||||||
|
assert.Equal(t, af.Digest, "update_4321abcd")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateArtifactPullTime(t *testing.T) {
|
||||||
|
timeNow := time.Now()
|
||||||
|
af := &models.Artifact{
|
||||||
|
PID: 1,
|
||||||
|
Repo: "TestUpdateArtifactPullTime",
|
||||||
|
Tag: "v1.0",
|
||||||
|
Digest: "4321abcd",
|
||||||
|
Kind: "image",
|
||||||
|
PullTime: timeNow,
|
||||||
|
}
|
||||||
|
|
||||||
|
// add
|
||||||
|
_, err := AddArtifact(af)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
time.Sleep(time.Second * 1)
|
||||||
|
|
||||||
|
af.PullTime = time.Now()
|
||||||
|
require.Nil(t, UpdateArtifactPullTime(af))
|
||||||
|
assert.NotEqual(t, timeNow, af.PullTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeleteArtifact(t *testing.T) {
|
||||||
|
af := &models.Artifact{
|
||||||
|
PID: 1,
|
||||||
|
Repo: "hello-world",
|
||||||
|
Tag: "v1.0",
|
||||||
|
Digest: "1234abcd",
|
||||||
|
Kind: "image",
|
||||||
|
}
|
||||||
|
// add
|
||||||
|
id, err := AddArtifact(af)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
// delete
|
||||||
|
err = DeleteArtifact(id)
|
||||||
|
require.Nil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeleteArtifactByDigest(t *testing.T) {
|
||||||
|
af := &models.Artifact{
|
||||||
|
PID: 1,
|
||||||
|
Repo: "hello-world",
|
||||||
|
Tag: "v1.1",
|
||||||
|
Digest: "TestDeleteArtifactByDigest",
|
||||||
|
Kind: "image",
|
||||||
|
}
|
||||||
|
// add
|
||||||
|
_, err := AddArtifact(af)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
// delete
|
||||||
|
err = DeleteArtifactByDigest(af.PID, af.Repo, af.Digest)
|
||||||
|
require.Nil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeleteArtifactByTag(t *testing.T) {
|
||||||
|
af := &models.Artifact{
|
||||||
|
PID: 1,
|
||||||
|
Repo: "hello-world",
|
||||||
|
Tag: "v1.2",
|
||||||
|
Digest: "TestDeleteArtifactByTag",
|
||||||
|
Kind: "image",
|
||||||
|
}
|
||||||
|
// add
|
||||||
|
_, err := AddArtifact(af)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
// delete
|
||||||
|
err = DeleteArtifactByTag(1, "hello-world", "v1.2")
|
||||||
|
require.Nil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestListArtifacts(t *testing.T) {
|
||||||
|
af := &models.Artifact{
|
||||||
|
PID: 1,
|
||||||
|
Repo: "hello-world",
|
||||||
|
Tag: "v3.0",
|
||||||
|
Digest: "TestListArtifacts",
|
||||||
|
Kind: "image",
|
||||||
|
}
|
||||||
|
// add
|
||||||
|
_, err := AddArtifact(af)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
afs, err := ListArtifacts(&models.ArtifactQuery{
|
||||||
|
PID: 1,
|
||||||
|
Repo: "hello-world",
|
||||||
|
Tag: "v3.0",
|
||||||
|
})
|
||||||
|
require.Nil(t, err)
|
||||||
|
assert.Equal(t, 1, len(afs))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetTotalOfArtifacts(t *testing.T) {
|
||||||
|
af := &models.Artifact{
|
||||||
|
PID: 2,
|
||||||
|
Repo: "hello-world",
|
||||||
|
Tag: "v3.0",
|
||||||
|
Digest: "TestGetTotalOfArtifacts",
|
||||||
|
Kind: "image",
|
||||||
|
}
|
||||||
|
// add
|
||||||
|
_, err := AddArtifact(af)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
total, err := GetTotalOfArtifacts(&models.ArtifactQuery{
|
||||||
|
PID: 2,
|
||||||
|
Repo: "hello-world",
|
||||||
|
Tag: "v3.0",
|
||||||
|
})
|
||||||
|
require.Nil(t, err)
|
||||||
|
assert.Equal(t, int64(1), total)
|
||||||
|
}
|
@ -121,12 +121,16 @@ func getDatabase(database *models.Database) (db Database, err error) {
|
|||||||
|
|
||||||
switch database.Type {
|
switch database.Type {
|
||||||
case "", "postgresql":
|
case "", "postgresql":
|
||||||
db = NewPGSQL(database.PostGreSQL.Host,
|
db = NewPGSQL(
|
||||||
|
database.PostGreSQL.Host,
|
||||||
strconv.Itoa(database.PostGreSQL.Port),
|
strconv.Itoa(database.PostGreSQL.Port),
|
||||||
database.PostGreSQL.Username,
|
database.PostGreSQL.Username,
|
||||||
database.PostGreSQL.Password,
|
database.PostGreSQL.Password,
|
||||||
database.PostGreSQL.Database,
|
database.PostGreSQL.Database,
|
||||||
database.PostGreSQL.SSLMode)
|
database.PostGreSQL.SSLMode,
|
||||||
|
database.PostGreSQL.MaxIdleConns,
|
||||||
|
database.PostGreSQL.MaxOpenConns,
|
||||||
|
)
|
||||||
default:
|
default:
|
||||||
err = fmt.Errorf("invalid database: %s", database.Type)
|
err = fmt.Errorf("invalid database: %s", database.Type)
|
||||||
}
|
}
|
||||||
@ -139,6 +143,8 @@ var once sync.Once
|
|||||||
// GetOrmer :set ormer singleton
|
// GetOrmer :set ormer singleton
|
||||||
func GetOrmer() orm.Ormer {
|
func GetOrmer() orm.Ormer {
|
||||||
once.Do(func() {
|
once.Do(func() {
|
||||||
|
// override the default value(1000) to return all records when setting no limit
|
||||||
|
orm.DefaultRowsLimit = -1
|
||||||
globalOrm = orm.NewOrm()
|
globalOrm = orm.NewOrm()
|
||||||
})
|
})
|
||||||
return globalOrm
|
return globalOrm
|
||||||
@ -167,11 +173,13 @@ func ClearTable(table string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func paginateForRawSQL(sql string, limit, offset int64) string {
|
// PaginateForRawSQL ...
|
||||||
|
func PaginateForRawSQL(sql string, limit, offset int64) string {
|
||||||
return fmt.Sprintf("%s limit %d offset %d", sql, limit, offset)
|
return fmt.Sprintf("%s limit %d offset %d", sql, limit, offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
func paginateForQuerySetter(qs orm.QuerySeter, page, size int64) orm.QuerySeter {
|
// PaginateForQuerySetter ...
|
||||||
|
func PaginateForQuerySetter(qs orm.QuerySeter, page, size int64) orm.QuerySeter {
|
||||||
if size > 0 {
|
if size > 0 {
|
||||||
qs = qs.Limit(size)
|
qs = qs.Limit(size)
|
||||||
if page > 0 {
|
if page > 0 {
|
||||||
@ -183,7 +191,34 @@ func paginateForQuerySetter(qs orm.QuerySeter, page, size int64) orm.QuerySeter
|
|||||||
|
|
||||||
// Escape ..
|
// Escape ..
|
||||||
func Escape(str string) string {
|
func Escape(str string) string {
|
||||||
|
str = strings.Replace(str, `\`, `\\`, -1)
|
||||||
str = strings.Replace(str, `%`, `\%`, -1)
|
str = strings.Replace(str, `%`, `\%`, -1)
|
||||||
str = strings.Replace(str, `_`, `\_`, -1)
|
str = strings.Replace(str, `_`, `\_`, -1)
|
||||||
return str
|
return str
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithTransaction helper for transaction
|
||||||
|
func WithTransaction(handler func(o orm.Ormer) error) error {
|
||||||
|
o := orm.NewOrm()
|
||||||
|
|
||||||
|
if err := o.Begin(); err != nil {
|
||||||
|
log.Errorf("begin transaction failed: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := handler(o); err != nil {
|
||||||
|
if e := o.Rollback(); e != nil {
|
||||||
|
log.Errorf("rollback transaction failed: %v", e)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := o.Commit(); err != nil {
|
||||||
|
log.Errorf("commit transaction failed: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
136
src/common/dao/blob.go
Normal file
136
src/common/dao/blob.go
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
package dao
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/goharbor/harbor/src/common/models"
|
||||||
|
"github.com/goharbor/harbor/src/common/utils/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AddBlob ...
|
||||||
|
func AddBlob(blob *models.Blob) (int64, error) {
|
||||||
|
now := time.Now()
|
||||||
|
blob.CreationTime = now
|
||||||
|
id, err := GetOrmer().Insert(blob)
|
||||||
|
if err != nil {
|
||||||
|
if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
|
||||||
|
return 0, ErrDupRows
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return id, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetOrCreateBlob returns blob by digest, create it if not exists
|
||||||
|
func GetOrCreateBlob(blob *models.Blob) (bool, *models.Blob, error) {
|
||||||
|
blob.CreationTime = time.Now()
|
||||||
|
|
||||||
|
created, id, err := GetOrmer().ReadOrCreate(blob, "digest")
|
||||||
|
if err != nil {
|
||||||
|
return false, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
blob.ID = id
|
||||||
|
|
||||||
|
return created, blob, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlob ...
|
||||||
|
func GetBlob(digest string) (*models.Blob, error) {
|
||||||
|
o := GetOrmer()
|
||||||
|
qs := o.QueryTable(&models.Blob{})
|
||||||
|
qs = qs.Filter("Digest", digest)
|
||||||
|
b := []*models.Blob{}
|
||||||
|
_, err := qs.All(&b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get blob for digest %s, error: %v", digest, err)
|
||||||
|
}
|
||||||
|
if len(b) == 0 {
|
||||||
|
log.Infof("No blob found for digest %s, returning empty.", digest)
|
||||||
|
return &models.Blob{}, nil
|
||||||
|
} else if len(b) > 1 {
|
||||||
|
log.Infof("Multiple blob found for digest %s", digest)
|
||||||
|
return &models.Blob{}, fmt.Errorf("Multiple blob found for digest %s", digest)
|
||||||
|
}
|
||||||
|
return b[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteBlob ...
|
||||||
|
func DeleteBlob(digest string) error {
|
||||||
|
o := GetOrmer()
|
||||||
|
_, err := o.QueryTable("blob").Filter("digest", digest).Delete()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlobsByArtifact returns blobs of artifact
|
||||||
|
func GetBlobsByArtifact(artifactDigest string) ([]*models.Blob, error) {
|
||||||
|
sql := `SELECT * FROM blob WHERE digest IN (SELECT digest_blob FROM artifact_blob WHERE digest_af = ?)`
|
||||||
|
|
||||||
|
var blobs []*models.Blob
|
||||||
|
if _, err := GetOrmer().Raw(sql, artifactDigest).QueryRows(&blobs); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return blobs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetExclusiveBlobs returns layers of repository:tag which are not shared with other repositories in the project
|
||||||
|
func GetExclusiveBlobs(projectID int64, repository, digest string) ([]*models.Blob, error) {
|
||||||
|
blobs, err := GetBlobsByArtifact(digest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sql := fmt.Sprintf(`
|
||||||
|
SELECT
|
||||||
|
DISTINCT b.digest_blob AS digest
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT
|
||||||
|
digest
|
||||||
|
FROM
|
||||||
|
artifact
|
||||||
|
WHERE
|
||||||
|
(
|
||||||
|
project_id = ?
|
||||||
|
AND repo != ?
|
||||||
|
)
|
||||||
|
OR (
|
||||||
|
project_id = ?
|
||||||
|
AND digest != ?
|
||||||
|
)
|
||||||
|
) AS a
|
||||||
|
LEFT JOIN artifact_blob b ON a.digest = b.digest_af
|
||||||
|
AND b.digest_blob IN (%s)`, ParamPlaceholderForIn(len(blobs)-1))
|
||||||
|
|
||||||
|
params := []interface{}{projectID, repository, projectID, digest}
|
||||||
|
for _, blob := range blobs {
|
||||||
|
if blob.Digest != digest {
|
||||||
|
params = append(params, blob.Digest)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var rows []struct {
|
||||||
|
Digest string
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := GetOrmer().Raw(sql, params...).QueryRows(&rows); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
shared := map[string]bool{}
|
||||||
|
for _, row := range rows {
|
||||||
|
shared[row.Digest] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
var exclusive []*models.Blob
|
||||||
|
for _, blob := range blobs {
|
||||||
|
if blob.Digest != digest && !shared[blob.Digest] {
|
||||||
|
exclusive = append(exclusive, blob)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return exclusive, nil
|
||||||
|
}
|
222
src/common/dao/blob_test.go
Normal file
222
src/common/dao/blob_test.go
Normal file
@ -0,0 +1,222 @@
|
|||||||
|
// Copyright Project Harbor Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package dao
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/goharbor/harbor/src/common/models"
|
||||||
|
"github.com/goharbor/harbor/src/common/utils"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAddBlob(t *testing.T) {
|
||||||
|
blob := &models.Blob{
|
||||||
|
Digest: "1234abcd",
|
||||||
|
ContentType: "v2.blob",
|
||||||
|
Size: 1523,
|
||||||
|
}
|
||||||
|
|
||||||
|
// add
|
||||||
|
_, err := AddBlob(blob)
|
||||||
|
require.Nil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBlob(t *testing.T) {
|
||||||
|
blob := &models.Blob{
|
||||||
|
Digest: "12345abcde",
|
||||||
|
ContentType: "v2.blob",
|
||||||
|
Size: 453,
|
||||||
|
}
|
||||||
|
|
||||||
|
// add
|
||||||
|
id, err := AddBlob(blob)
|
||||||
|
require.Nil(t, err)
|
||||||
|
blob.ID = id
|
||||||
|
|
||||||
|
blob2, err := GetBlob("12345abcde")
|
||||||
|
require.Nil(t, err)
|
||||||
|
assert.Equal(t, blob.Digest, blob2.Digest)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeleteBlob(t *testing.T) {
|
||||||
|
blob := &models.Blob{
|
||||||
|
Digest: "123456abcdef",
|
||||||
|
ContentType: "v2.blob",
|
||||||
|
Size: 4543,
|
||||||
|
}
|
||||||
|
id, err := AddBlob(blob)
|
||||||
|
require.Nil(t, err)
|
||||||
|
blob.ID = id
|
||||||
|
err = DeleteBlob(blob.Digest)
|
||||||
|
require.Nil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareImage(projectID int64, projectName, name, tag string, layerDigests ...string) (string, error) {
|
||||||
|
digest := digest.FromString(strings.Join(layerDigests, ":")).String()
|
||||||
|
artifact := &models.Artifact{PID: projectID, Repo: projectName + "/" + name, Digest: digest, Tag: tag}
|
||||||
|
if _, err := AddArtifact(artifact); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
var afnbs []*models.ArtifactAndBlob
|
||||||
|
|
||||||
|
blobDigests := append([]string{digest}, layerDigests...)
|
||||||
|
for _, blobDigest := range blobDigests {
|
||||||
|
blob := &models.Blob{Digest: blobDigest, Size: 1}
|
||||||
|
if _, _, err := GetOrCreateBlob(blob); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
afnbs = append(afnbs, &models.ArtifactAndBlob{DigestAF: digest, DigestBlob: blobDigest})
|
||||||
|
}
|
||||||
|
|
||||||
|
total, err := GetTotalOfArtifacts(&models.ArtifactQuery{Digest: digest})
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if total == 1 {
|
||||||
|
if err := AddArtifactNBlobs(afnbs); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return digest, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func withProject(f func(int64, string)) {
|
||||||
|
projectName := utils.GenerateRandomString()
|
||||||
|
|
||||||
|
projectID, err := AddProject(models.Project{
|
||||||
|
Name: projectName,
|
||||||
|
OwnerID: 1,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
DeleteProject(projectID)
|
||||||
|
}()
|
||||||
|
|
||||||
|
f(projectID, projectName)
|
||||||
|
}
|
||||||
|
|
||||||
|
type GetExclusiveBlobsSuite struct {
|
||||||
|
suite.Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *GetExclusiveBlobsSuite) mustPrepareImage(projectID int64, projectName, name, tag string, layerDigests ...string) string {
|
||||||
|
digest, err := prepareImage(projectID, projectName, name, tag, layerDigests...)
|
||||||
|
suite.Nil(err)
|
||||||
|
|
||||||
|
return digest
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *GetExclusiveBlobsSuite) TestInSameRepository() {
|
||||||
|
withProject(func(projectID int64, projectName string) {
|
||||||
|
digest1 := digest.FromString(utils.GenerateRandomString()).String()
|
||||||
|
digest2 := digest.FromString(utils.GenerateRandomString()).String()
|
||||||
|
digest3 := digest.FromString(utils.GenerateRandomString()).String()
|
||||||
|
|
||||||
|
manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2)
|
||||||
|
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
|
||||||
|
suite.Len(blobs, 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
manifest2 := suite.mustPrepareImage(projectID, projectName, "mysql", "8.0", digest1, digest2)
|
||||||
|
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) {
|
||||||
|
suite.Len(blobs, 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
manifest3 := suite.mustPrepareImage(projectID, projectName, "mysql", "dev", digest1, digest2, digest3)
|
||||||
|
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
|
||||||
|
suite.Len(blobs, 0)
|
||||||
|
}
|
||||||
|
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) {
|
||||||
|
suite.Len(blobs, 0)
|
||||||
|
}
|
||||||
|
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest3); suite.Nil(err) {
|
||||||
|
suite.Len(blobs, 1)
|
||||||
|
suite.Equal(digest3, blobs[0].Digest)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *GetExclusiveBlobsSuite) TestInDifferentRepositories() {
|
||||||
|
withProject(func(projectID int64, projectName string) {
|
||||||
|
digest1 := digest.FromString(utils.GenerateRandomString()).String()
|
||||||
|
digest2 := digest.FromString(utils.GenerateRandomString()).String()
|
||||||
|
digest3 := digest.FromString(utils.GenerateRandomString()).String()
|
||||||
|
|
||||||
|
manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2)
|
||||||
|
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
|
||||||
|
suite.Len(blobs, 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
manifest2 := suite.mustPrepareImage(projectID, projectName, "mariadb", "latest", digest1, digest2)
|
||||||
|
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
|
||||||
|
suite.Len(blobs, 0)
|
||||||
|
}
|
||||||
|
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mariadb", manifest2); suite.Nil(err) {
|
||||||
|
suite.Len(blobs, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
manifest3 := suite.mustPrepareImage(projectID, projectName, "mysql", "dev", digest1, digest2, digest3)
|
||||||
|
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
|
||||||
|
suite.Len(blobs, 0)
|
||||||
|
}
|
||||||
|
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) {
|
||||||
|
suite.Len(blobs, 0)
|
||||||
|
}
|
||||||
|
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest3); suite.Nil(err) {
|
||||||
|
suite.Len(blobs, 1)
|
||||||
|
suite.Equal(digest3, blobs[0].Digest)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *GetExclusiveBlobsSuite) TestInDifferentProjects() {
|
||||||
|
withProject(func(projectID int64, projectName string) {
|
||||||
|
digest1 := digest.FromString(utils.GenerateRandomString()).String()
|
||||||
|
digest2 := digest.FromString(utils.GenerateRandomString()).String()
|
||||||
|
|
||||||
|
manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2)
|
||||||
|
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
|
||||||
|
suite.Len(blobs, 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
withProject(func(id int64, name string) {
|
||||||
|
manifest2 := suite.mustPrepareImage(id, name, "mysql", "latest", digest1, digest2)
|
||||||
|
if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
|
||||||
|
suite.Len(blobs, 2)
|
||||||
|
}
|
||||||
|
if blobs, err := GetExclusiveBlobs(id, name+"/mysql", manifest2); suite.Nil(err) {
|
||||||
|
suite.Len(blobs, 2)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunGetExclusiveBlobsSuite(t *testing.T) {
|
||||||
|
suite.Run(t, new(GetExclusiveBlobsSuite))
|
||||||
|
}
|
@ -54,7 +54,7 @@ func GetConfigEntries() ([]*models.ConfigEntry, error) {
|
|||||||
func SaveConfigEntries(entries []models.ConfigEntry) error {
|
func SaveConfigEntries(entries []models.ConfigEntry) error {
|
||||||
o := GetOrmer()
|
o := GetOrmer()
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
if entry.Key == common.LdapGroupAdminDn {
|
if entry.Key == common.LDAPGroupAdminDn {
|
||||||
entry.Value = utils.TrimLower(entry.Value)
|
entry.Value = utils.TrimLower(entry.Value)
|
||||||
}
|
}
|
||||||
tempEntry := models.ConfigEntry{}
|
tempEntry := models.ConfigEntry{}
|
||||||
|
64
src/common/dao/cve_whitelist.go
Normal file
64
src/common/dao/cve_whitelist.go
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
// Copyright Project Harbor Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package dao
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"github.com/goharbor/harbor/src/common/models"
|
||||||
|
"github.com/goharbor/harbor/src/common/utils/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CreateCVEWhitelist creates the CVE whitelist
|
||||||
|
func CreateCVEWhitelist(l models.CVEWhitelist) (int64, error) {
|
||||||
|
o := GetOrmer()
|
||||||
|
itemsBytes, _ := json.Marshal(l.Items)
|
||||||
|
l.ItemsText = string(itemsBytes)
|
||||||
|
return o.Insert(&l)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateCVEWhitelist Updates the vulnerability white list to DB
|
||||||
|
func UpdateCVEWhitelist(l models.CVEWhitelist) (int64, error) {
|
||||||
|
o := GetOrmer()
|
||||||
|
itemsBytes, _ := json.Marshal(l.Items)
|
||||||
|
l.ItemsText = string(itemsBytes)
|
||||||
|
id, err := o.InsertOrUpdate(&l, "project_id")
|
||||||
|
return id, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCVEWhitelist Gets the CVE whitelist of the project based on the project ID in parameter
|
||||||
|
func GetCVEWhitelist(pid int64) (*models.CVEWhitelist, error) {
|
||||||
|
o := GetOrmer()
|
||||||
|
qs := o.QueryTable(&models.CVEWhitelist{})
|
||||||
|
qs = qs.Filter("ProjectID", pid)
|
||||||
|
r := []*models.CVEWhitelist{}
|
||||||
|
_, err := qs.All(&r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get CVE whitelist for project %d, error: %v", pid, err)
|
||||||
|
}
|
||||||
|
if len(r) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
} else if len(r) > 1 {
|
||||||
|
log.Infof("Multiple CVE whitelists found for project %d, length: %d, returning first element.", pid, len(r))
|
||||||
|
}
|
||||||
|
items := []models.CVEWhitelistItem{}
|
||||||
|
err = json.Unmarshal([]byte(r[0].ItemsText), &items)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Failed to decode item list, err: %v, text: %s", err, r[0].ItemsText)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
r[0].Items = items
|
||||||
|
return r[0], nil
|
||||||
|
}
|
55
src/common/dao/cve_whitelist_test.go
Normal file
55
src/common/dao/cve_whitelist_test.go
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
// Copyright Project Harbor Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package dao
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/goharbor/harbor/src/common/models"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestUpdateAndGetCVEWhitelist(t *testing.T) {
|
||||||
|
require.Nil(t, ClearTable("cve_whitelist"))
|
||||||
|
l2, err := GetCVEWhitelist(5)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Nil(t, l2)
|
||||||
|
|
||||||
|
longList := []models.CVEWhitelistItem{}
|
||||||
|
for i := 0; i < 50; i++ {
|
||||||
|
longList = append(longList, models.CVEWhitelistItem{CVEID: "CVE-1999-0067"})
|
||||||
|
}
|
||||||
|
|
||||||
|
e := int64(1573254000)
|
||||||
|
in1 := models.CVEWhitelist{ProjectID: 3, Items: longList, ExpiresAt: &e}
|
||||||
|
_, err = UpdateCVEWhitelist(in1)
|
||||||
|
require.Nil(t, err)
|
||||||
|
// assert.Equal(t, int64(1), n)
|
||||||
|
out1, err := GetCVEWhitelist(3)
|
||||||
|
require.Nil(t, err)
|
||||||
|
assert.Equal(t, int64(3), out1.ProjectID)
|
||||||
|
assert.Equal(t, longList, out1.Items)
|
||||||
|
assert.Equal(t, e, *out1.ExpiresAt)
|
||||||
|
|
||||||
|
sysCVEs := []models.CVEWhitelistItem{
|
||||||
|
{CVEID: "CVE-2019-10164"},
|
||||||
|
{CVEID: "CVE-2017-12345"},
|
||||||
|
}
|
||||||
|
in3 := models.CVEWhitelist{Items: sysCVEs}
|
||||||
|
_, err = UpdateCVEWhitelist(in3)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
require.Nil(t, ClearTable("cve_whitelist"))
|
||||||
|
}
|
@ -47,8 +47,8 @@ func cleanByUser(username string) {
|
|||||||
o := GetOrmer()
|
o := GetOrmer()
|
||||||
o.Begin()
|
o.Begin()
|
||||||
|
|
||||||
err = execUpdate(o, `delete
|
err = execUpdate(o, `delete
|
||||||
from project_member
|
from project_member
|
||||||
where entity_id = (
|
where entity_id = (
|
||||||
select user_id
|
select user_id
|
||||||
from harbor_user
|
from harbor_user
|
||||||
@ -59,7 +59,7 @@ func cleanByUser(username string) {
|
|||||||
log.Error(err)
|
log.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = execUpdate(o, `delete
|
err = execUpdate(o, `delete
|
||||||
from project_member
|
from project_member
|
||||||
where project_id = (
|
where project_id = (
|
||||||
select project_id
|
select project_id
|
||||||
@ -71,8 +71,8 @@ func cleanByUser(username string) {
|
|||||||
log.Error(err)
|
log.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = execUpdate(o, `delete
|
err = execUpdate(o, `delete
|
||||||
from access_log
|
from access_log
|
||||||
where username = ?
|
where username = ?
|
||||||
`, username)
|
`, username)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -80,7 +80,7 @@ func cleanByUser(username string) {
|
|||||||
log.Error(err)
|
log.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = execUpdate(o, `delete
|
err = execUpdate(o, `delete
|
||||||
from access_log
|
from access_log
|
||||||
where project_id = (
|
where project_id = (
|
||||||
select project_id
|
select project_id
|
||||||
@ -302,9 +302,6 @@ func TestListUsers(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error occurred in ListUsers: %v", err)
|
t.Errorf("Error occurred in ListUsers: %v", err)
|
||||||
}
|
}
|
||||||
if len(users) != 1 {
|
|
||||||
t.Errorf("Expect one user in list, but the acutal length is %d, the list: %+v", len(users), users)
|
|
||||||
}
|
|
||||||
users2, err := ListUsers(&models.UserQuery{Username: username})
|
users2, err := ListUsers(&models.UserQuery{Username: username})
|
||||||
if len(users2) != 1 {
|
if len(users2) != 1 {
|
||||||
t.Errorf("Expect one user in list, but the acutal length is %d, the list: %+v", len(users), users)
|
t.Errorf("Expect one user in list, but the acutal length is %d, the list: %+v", len(users), users)
|
||||||
@ -1035,3 +1032,53 @@ func TestIsDupRecError(t *testing.T) {
|
|||||||
assert.True(t, isDupRecErr(fmt.Errorf("pq: duplicate key value violates unique constraint \"properties_k_key\"")))
|
assert.True(t, isDupRecErr(fmt.Errorf("pq: duplicate key value violates unique constraint \"properties_k_key\"")))
|
||||||
assert.False(t, isDupRecErr(fmt.Errorf("other error")))
|
assert.False(t, isDupRecErr(fmt.Errorf("other error")))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestWithTransaction(t *testing.T) {
|
||||||
|
reference := "transaction"
|
||||||
|
|
||||||
|
quota := models.Quota{
|
||||||
|
Reference: reference,
|
||||||
|
ReferenceID: "1",
|
||||||
|
Hard: "{}",
|
||||||
|
}
|
||||||
|
|
||||||
|
failed := func(o orm.Ormer) error {
|
||||||
|
o.Insert("a)
|
||||||
|
|
||||||
|
return fmt.Errorf("failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
var quotaID int64
|
||||||
|
success := func(o orm.Ormer) error {
|
||||||
|
id, err := o.Insert("a)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
quotaID = id
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
if assert.Error(WithTransaction(failed)) {
|
||||||
|
var quota models.Quota
|
||||||
|
quota.Reference = reference
|
||||||
|
quota.ReferenceID = "1"
|
||||||
|
err := GetOrmer().Read("a, "reference", "reference_id")
|
||||||
|
assert.Error(err)
|
||||||
|
assert.False(quota.ID != 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
if assert.Nil(WithTransaction(success)) {
|
||||||
|
var quota models.Quota
|
||||||
|
quota.Reference = reference
|
||||||
|
quota.ReferenceID = "1"
|
||||||
|
err := GetOrmer().Read("a, "reference", "reference_id")
|
||||||
|
assert.Nil(err)
|
||||||
|
assert.True(quota.ID != 0)
|
||||||
|
assert.Equal(quotaID, quota.ID)
|
||||||
|
|
||||||
|
GetOrmer().Delete(&models.Quota{ID: quotaID}, "id")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -18,23 +18,35 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/goharbor/harbor/src/common"
|
|
||||||
"github.com/goharbor/harbor/src/common/utils"
|
"github.com/goharbor/harbor/src/common/utils"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/goharbor/harbor/src/common"
|
||||||
"github.com/goharbor/harbor/src/common/dao"
|
"github.com/goharbor/harbor/src/common/dao"
|
||||||
"github.com/goharbor/harbor/src/common/models"
|
"github.com/goharbor/harbor/src/common/models"
|
||||||
"github.com/goharbor/harbor/src/common/utils/log"
|
"github.com/goharbor/harbor/src/common/utils/log"
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ErrGroupNameDup ...
|
||||||
|
var ErrGroupNameDup = errors.New("duplicated user group name")
|
||||||
|
|
||||||
// AddUserGroup - Add User Group
|
// AddUserGroup - Add User Group
|
||||||
func AddUserGroup(userGroup models.UserGroup) (int, error) {
|
func AddUserGroup(userGroup models.UserGroup) (int, error) {
|
||||||
|
userGroupList, err := QueryUserGroup(models.UserGroup{GroupName: userGroup.GroupName, GroupType: common.HTTPGroupType})
|
||||||
|
if err != nil {
|
||||||
|
return 0, ErrGroupNameDup
|
||||||
|
}
|
||||||
|
if len(userGroupList) > 0 {
|
||||||
|
return 0, ErrGroupNameDup
|
||||||
|
}
|
||||||
o := dao.GetOrmer()
|
o := dao.GetOrmer()
|
||||||
|
|
||||||
sql := "insert into user_group (group_name, group_type, ldap_group_dn, creation_time, update_time) values (?, ?, ?, ?, ?) RETURNING id"
|
sql := "insert into user_group (group_name, group_type, ldap_group_dn, creation_time, update_time) values (?, ?, ?, ?, ?) RETURNING id"
|
||||||
var id int
|
var id int
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
||||||
err := o.Raw(sql, userGroup.GroupName, userGroup.GroupType, utils.TrimLower(userGroup.LdapGroupDN), now, now).QueryRow(&id)
|
err = o.Raw(sql, userGroup.GroupName, userGroup.GroupType, utils.TrimLower(userGroup.LdapGroupDN), now, now).QueryRow(&id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -47,10 +59,10 @@ func QueryUserGroup(query models.UserGroup) ([]*models.UserGroup, error) {
|
|||||||
o := dao.GetOrmer()
|
o := dao.GetOrmer()
|
||||||
sql := `select id, group_name, group_type, ldap_group_dn from user_group where 1=1 `
|
sql := `select id, group_name, group_type, ldap_group_dn from user_group where 1=1 `
|
||||||
sqlParam := make([]interface{}, 1)
|
sqlParam := make([]interface{}, 1)
|
||||||
groups := []*models.UserGroup{}
|
var groups []*models.UserGroup
|
||||||
if len(query.GroupName) != 0 {
|
if len(query.GroupName) != 0 {
|
||||||
sql += ` and group_name like ? `
|
sql += ` and group_name = ? `
|
||||||
sqlParam = append(sqlParam, `%`+dao.Escape(query.GroupName)+`%`)
|
sqlParam = append(sqlParam, query.GroupName)
|
||||||
}
|
}
|
||||||
|
|
||||||
if query.GroupType != 0 {
|
if query.GroupType != 0 {
|
||||||
@ -86,6 +98,27 @@ func GetUserGroup(id int) (*models.UserGroup, error) {
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetGroupIDByGroupName - Return the group ID by given group name. it is possible less group ID than the given group name if some group doesn't exist.
|
||||||
|
func GetGroupIDByGroupName(groupName []string, groupType int) ([]int, error) {
|
||||||
|
var retGroupID []int
|
||||||
|
var conditions []string
|
||||||
|
if len(groupName) == 0 {
|
||||||
|
return retGroupID, nil
|
||||||
|
}
|
||||||
|
for _, gName := range groupName {
|
||||||
|
con := "'" + gName + "'"
|
||||||
|
conditions = append(conditions, con)
|
||||||
|
}
|
||||||
|
sql := fmt.Sprintf("select id from user_group where group_name in ( %s ) and group_type = %v", strings.Join(conditions, ","), groupType)
|
||||||
|
o := dao.GetOrmer()
|
||||||
|
cnt, err := o.Raw(sql).QueryRows(&retGroupID)
|
||||||
|
if err != nil {
|
||||||
|
return retGroupID, err
|
||||||
|
}
|
||||||
|
log.Debugf("Found rows %v", cnt)
|
||||||
|
return retGroupID, nil
|
||||||
|
}
|
||||||
|
|
||||||
// DeleteUserGroup ...
|
// DeleteUserGroup ...
|
||||||
func DeleteUserGroup(id int) error {
|
func DeleteUserGroup(id int) error {
|
||||||
userGroup := models.UserGroup{ID: id}
|
userGroup := models.UserGroup{ID: id}
|
||||||
@ -111,11 +144,7 @@ func UpdateUserGroupName(id int, groupName string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// OnBoardUserGroup will check if a usergroup exists in usergroup table, if not insert the usergroup and
|
func onBoardCommonUserGroup(g *models.UserGroup, keyAttribute string, combinedKeyAttributes ...string) error {
|
||||||
// put the id in the pointer of usergroup model, if it does exist, return the usergroup's profile.
|
|
||||||
// This is used for ldap and uaa authentication, such the usergroup can have an ID in Harbor.
|
|
||||||
// the keyAttribute and combinedKeyAttribute are key columns used to check duplicate usergroup in harbor
|
|
||||||
func OnBoardUserGroup(g *models.UserGroup, keyAttribute string, combinedKeyAttributes ...string) error {
|
|
||||||
g.LdapGroupDN = utils.TrimLower(g.LdapGroupDN)
|
g.LdapGroupDN = utils.TrimLower(g.LdapGroupDN)
|
||||||
|
|
||||||
o := dao.GetOrmer()
|
o := dao.GetOrmer()
|
||||||
@ -140,19 +169,11 @@ func OnBoardUserGroup(g *models.UserGroup, keyAttribute string, combinedKeyAttri
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetGroupDNQueryCondition get the part of IN ('XXX', 'XXX') condition
|
// OnBoardUserGroup will check if a usergroup exists in usergroup table, if not insert the usergroup and
|
||||||
func GetGroupDNQueryCondition(userGroupList []*models.UserGroup) string {
|
// put the id in the pointer of usergroup model, if it does exist, return the usergroup's profile.
|
||||||
result := make([]string, 0)
|
func OnBoardUserGroup(g *models.UserGroup) error {
|
||||||
count := 0
|
if g.GroupType == common.LDAPGroupType {
|
||||||
for _, userGroup := range userGroupList {
|
return onBoardCommonUserGroup(g, "LdapGroupDN", "GroupType")
|
||||||
if userGroup.GroupType == common.LdapGroupType {
|
|
||||||
result = append(result, "'"+userGroup.LdapGroupDN+"'")
|
|
||||||
count++
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// No LDAP Group found
|
return onBoardCommonUserGroup(g, "GroupName", "GroupType")
|
||||||
if count == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return strings.Join(result, ",")
|
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ package group
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/goharbor/harbor/src/common"
|
"github.com/goharbor/harbor/src/common"
|
||||||
@ -46,20 +47,30 @@ func TestMain(m *testing.M) {
|
|||||||
// Extract to test utils
|
// Extract to test utils
|
||||||
initSqls := []string{
|
initSqls := []string{
|
||||||
"insert into harbor_user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')",
|
"insert into harbor_user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')",
|
||||||
|
"insert into harbor_user (username, email, password, realname) values ('grouptestu09', 'grouptestu09@example.com', '123456', 'grouptestu09')",
|
||||||
"insert into project (name, owner_id) values ('member_test_01', 1)",
|
"insert into project (name, owner_id) values ('member_test_01', 1)",
|
||||||
|
`insert into project (name, owner_id) values ('group_project2', 1)`,
|
||||||
|
`insert into project (name, owner_id) values ('group_project_private', 1)`,
|
||||||
"insert into user_group (group_name, group_type, ldap_group_dn) values ('test_group_01', 1, 'cn=harbor_users,ou=sample,ou=vmware,dc=harbor,dc=com')",
|
"insert into user_group (group_name, group_type, ldap_group_dn) values ('test_group_01', 1, 'cn=harbor_users,ou=sample,ou=vmware,dc=harbor,dc=com')",
|
||||||
|
"insert into user_group (group_name, group_type, ldap_group_dn) values ('test_http_group', 2, '')",
|
||||||
|
"insert into user_group (group_name, group_type, ldap_group_dn) values ('test_myhttp_group', 2, '')",
|
||||||
"update project set owner_id = (select user_id from harbor_user where username = 'member_test_01') where name = 'member_test_01'",
|
"update project set owner_id = (select user_id from harbor_user where username = 'member_test_01') where name = 'member_test_01'",
|
||||||
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from harbor_user where username = 'member_test_01'), 'u', 1)",
|
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from harbor_user where username = 'member_test_01'), 'u', 1)",
|
||||||
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_group_01'), 'g', 1)",
|
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_group_01'), 'g', 1)",
|
||||||
|
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_http_group'), 'g', 4)",
|
||||||
|
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_myhttp_group'), 'g', 4)",
|
||||||
}
|
}
|
||||||
|
|
||||||
clearSqls := []string{
|
clearSqls := []string{
|
||||||
"delete from project where name='member_test_01'",
|
"delete from project where name='member_test_01'",
|
||||||
"delete from harbor_user where username='member_test_01' or username='pm_sample'",
|
"delete from project where name='group_project2'",
|
||||||
|
"delete from project where name='group_project_private'",
|
||||||
|
"delete from harbor_user where username='member_test_01' or username='pm_sample' or username='grouptestu09'",
|
||||||
"delete from user_group",
|
"delete from user_group",
|
||||||
"delete from project_member",
|
"delete from project_member",
|
||||||
}
|
}
|
||||||
dao.PrepareTestData(clearSqls, initSqls)
|
dao.ExecuteBatchSQL(initSqls)
|
||||||
|
defer dao.ExecuteBatchSQL(clearSqls)
|
||||||
|
|
||||||
result = m.Run()
|
result = m.Run()
|
||||||
|
|
||||||
@ -80,7 +91,7 @@ func TestAddUserGroup(t *testing.T) {
|
|||||||
want int
|
want int
|
||||||
wantErr bool
|
wantErr bool
|
||||||
}{
|
}{
|
||||||
{"Insert an ldap user group", args{userGroup: models.UserGroup{GroupName: "sample_group", GroupType: common.LdapGroupType, LdapGroupDN: "sample_ldap_dn_string"}}, 0, false},
|
{"Insert an ldap user group", args{userGroup: models.UserGroup{GroupName: "sample_group", GroupType: common.LDAPGroupType, LdapGroupDN: "sample_ldap_dn_string"}}, 0, false},
|
||||||
{"Insert other user group", args{userGroup: models.UserGroup{GroupName: "other_group", GroupType: 3, LdapGroupDN: "other information"}}, 0, false},
|
{"Insert other user group", args{userGroup: models.UserGroup{GroupName: "other_group", GroupType: 3, LdapGroupDN: "other information"}}, 0, false},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
@ -108,8 +119,8 @@ func TestQueryUserGroup(t *testing.T) {
|
|||||||
wantErr bool
|
wantErr bool
|
||||||
}{
|
}{
|
||||||
{"Query all user group", args{query: models.UserGroup{GroupName: "test_group_01"}}, 1, false},
|
{"Query all user group", args{query: models.UserGroup{GroupName: "test_group_01"}}, 1, false},
|
||||||
{"Query all ldap group", args{query: models.UserGroup{GroupType: common.LdapGroupType}}, 2, false},
|
{"Query all ldap group", args{query: models.UserGroup{GroupType: common.LDAPGroupType}}, 2, false},
|
||||||
{"Query ldap group with group property", args{query: models.UserGroup{GroupType: common.LdapGroupType, LdapGroupDN: "CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com"}}, 1, false},
|
{"Query ldap group with group property", args{query: models.UserGroup{GroupType: common.LDAPGroupType, LdapGroupDN: "CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com"}}, 1, false},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
@ -126,7 +137,7 @@ func TestQueryUserGroup(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGetUserGroup(t *testing.T) {
|
func TestGetUserGroup(t *testing.T) {
|
||||||
userGroup := models.UserGroup{GroupName: "insert_group", GroupType: common.LdapGroupType, LdapGroupDN: "ldap_dn_string"}
|
userGroup := models.UserGroup{GroupName: "insert_group", GroupType: common.LDAPGroupType, LdapGroupDN: "ldap_dn_string"}
|
||||||
result, err := AddUserGroup(userGroup)
|
result, err := AddUserGroup(userGroup)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error occurred when AddUserGroup: %v", err)
|
t.Errorf("Error occurred when AddUserGroup: %v", err)
|
||||||
@ -175,7 +186,7 @@ func TestUpdateUserGroup(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
fmt.Printf("id=%v", createdUserGroupID)
|
fmt.Printf("id=%v\n", createdUserGroupID)
|
||||||
if err := UpdateUserGroupName(tt.args.id, tt.args.groupName); (err != nil) != tt.wantErr {
|
if err := UpdateUserGroupName(tt.args.id, tt.args.groupName); (err != nil) != tt.wantErr {
|
||||||
t.Errorf("UpdateUserGroup() error = %v, wantErr %v", err, tt.wantErr)
|
t.Errorf("UpdateUserGroup() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
userGroup, err := GetUserGroup(tt.args.id)
|
userGroup, err := GetUserGroup(tt.args.id)
|
||||||
@ -231,65 +242,30 @@ func TestOnBoardUserGroup(t *testing.T) {
|
|||||||
args{g: &models.UserGroup{
|
args{g: &models.UserGroup{
|
||||||
GroupName: "harbor_example",
|
GroupName: "harbor_example",
|
||||||
LdapGroupDN: "cn=harbor_example,ou=groups,dc=example,dc=com",
|
LdapGroupDN: "cn=harbor_example,ou=groups,dc=example,dc=com",
|
||||||
GroupType: common.LdapGroupType}},
|
GroupType: common.LDAPGroupType}},
|
||||||
false},
|
false},
|
||||||
{"OnBoardUserGroup second time",
|
{"OnBoardUserGroup second time",
|
||||||
args{g: &models.UserGroup{
|
args{g: &models.UserGroup{
|
||||||
GroupName: "harbor_example",
|
GroupName: "harbor_example",
|
||||||
LdapGroupDN: "cn=harbor_example,ou=groups,dc=example,dc=com",
|
LdapGroupDN: "cn=harbor_example,ou=groups,dc=example,dc=com",
|
||||||
GroupType: common.LdapGroupType}},
|
GroupType: common.LDAPGroupType}},
|
||||||
|
false},
|
||||||
|
{"OnBoardUserGroup HTTP user group",
|
||||||
|
args{g: &models.UserGroup{
|
||||||
|
GroupName: "test_myhttp_group",
|
||||||
|
GroupType: common.HTTPGroupType}},
|
||||||
false},
|
false},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
if err := OnBoardUserGroup(tt.args.g, "LdapGroupDN", "GroupType"); (err != nil) != tt.wantErr {
|
if err := OnBoardUserGroup(tt.args.g); (err != nil) != tt.wantErr {
|
||||||
t.Errorf("OnBoardUserGroup() error = %v, wantErr %v", err, tt.wantErr)
|
t.Errorf("OnBoardUserGroup() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetGroupDNQueryCondition(t *testing.T) {
|
|
||||||
userGroupList := []*models.UserGroup{
|
|
||||||
{
|
|
||||||
GroupName: "sample1",
|
|
||||||
GroupType: 1,
|
|
||||||
LdapGroupDN: "cn=sample1_users,ou=groups,dc=example,dc=com",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
GroupName: "sample2",
|
|
||||||
GroupType: 1,
|
|
||||||
LdapGroupDN: "cn=sample2_users,ou=groups,dc=example,dc=com",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
GroupName: "sample3",
|
|
||||||
GroupType: 0,
|
|
||||||
LdapGroupDN: "cn=sample3_users,ou=groups,dc=example,dc=com",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
groupQueryConditions := GetGroupDNQueryCondition(userGroupList)
|
|
||||||
expectedConditions := `'cn=sample1_users,ou=groups,dc=example,dc=com','cn=sample2_users,ou=groups,dc=example,dc=com'`
|
|
||||||
if groupQueryConditions != expectedConditions {
|
|
||||||
t.Errorf("Failed to GetGroupDNQueryCondition, expected %v, actual %v", expectedConditions, groupQueryConditions)
|
|
||||||
}
|
|
||||||
var userGroupList2 []*models.UserGroup
|
|
||||||
groupQueryCondition2 := GetGroupDNQueryCondition(userGroupList2)
|
|
||||||
if len(groupQueryCondition2) > 0 {
|
|
||||||
t.Errorf("Failed to GetGroupDNQueryCondition, expected %v, actual %v", "", groupQueryCondition2)
|
|
||||||
}
|
|
||||||
groupQueryCondition3 := GetGroupDNQueryCondition(nil)
|
|
||||||
if len(groupQueryCondition3) > 0 {
|
|
||||||
t.Errorf("Failed to GetGroupDNQueryCondition, expected %v, actual %v", "", groupQueryCondition3)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func TestGetGroupProjects(t *testing.T) {
|
func TestGetGroupProjects(t *testing.T) {
|
||||||
userID, err := dao.Register(models.User{
|
|
||||||
Username: "grouptestu09",
|
|
||||||
Email: "grouptest09@example.com",
|
|
||||||
Password: "Harbor123456",
|
|
||||||
})
|
|
||||||
defer dao.DeleteUser(int(userID))
|
|
||||||
projectID1, err := dao.AddProject(models.Project{
|
projectID1, err := dao.AddProject(models.Project{
|
||||||
Name: "grouptest01",
|
Name: "grouptest01",
|
||||||
OwnerID: 1,
|
OwnerID: 1,
|
||||||
@ -307,7 +283,7 @@ func TestGetGroupProjects(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer dao.DeleteProject(projectID2)
|
defer dao.DeleteProject(projectID2)
|
||||||
groupID, err := AddUserGroup(models.UserGroup{
|
groupID, err := AddUserGroup(models.UserGroup{
|
||||||
GroupName: "test_group_01",
|
GroupName: "test_group_03",
|
||||||
GroupType: 1,
|
GroupType: 1,
|
||||||
LdapGroupDN: "cn=harbor_users,ou=groups,dc=example,dc=com",
|
LdapGroupDN: "cn=harbor_users,ou=groups,dc=example,dc=com",
|
||||||
})
|
})
|
||||||
@ -322,8 +298,7 @@ func TestGetGroupProjects(t *testing.T) {
|
|||||||
})
|
})
|
||||||
defer project.DeleteProjectMemberByID(pmid)
|
defer project.DeleteProjectMemberByID(pmid)
|
||||||
type args struct {
|
type args struct {
|
||||||
groupDNCondition string
|
query *models.ProjectQueryParam
|
||||||
query *models.ProjectQueryParam
|
|
||||||
}
|
}
|
||||||
member := &models.MemberQuery{
|
member := &models.MemberQuery{
|
||||||
Name: "grouptestu09",
|
Name: "grouptestu09",
|
||||||
@ -335,19 +310,17 @@ func TestGetGroupProjects(t *testing.T) {
|
|||||||
wantErr bool
|
wantErr bool
|
||||||
}{
|
}{
|
||||||
{"Query with group DN",
|
{"Query with group DN",
|
||||||
args{"'cn=harbor_users,ou=groups,dc=example,dc=com'",
|
args{&models.ProjectQueryParam{
|
||||||
&models.ProjectQueryParam{
|
Member: member,
|
||||||
Member: member,
|
}},
|
||||||
}},
|
|
||||||
1, false},
|
1, false},
|
||||||
{"Query without group DN",
|
{"Query without group DN",
|
||||||
args{"",
|
args{&models.ProjectQueryParam{}},
|
||||||
&models.ProjectQueryParam{}},
|
|
||||||
1, false},
|
1, false},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
got, err := dao.GetGroupProjects(tt.args.groupDNCondition, tt.args.query)
|
got, err := dao.GetGroupProjects([]int{groupID}, tt.args.query)
|
||||||
if (err != nil) != tt.wantErr {
|
if (err != nil) != tt.wantErr {
|
||||||
t.Errorf("GetGroupProjects() error = %v, wantErr %v", err, tt.wantErr)
|
t.Errorf("GetGroupProjects() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
return
|
return
|
||||||
@ -377,7 +350,7 @@ func TestGetTotalGroupProjects(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer dao.DeleteProject(projectID2)
|
defer dao.DeleteProject(projectID2)
|
||||||
groupID, err := AddUserGroup(models.UserGroup{
|
groupID, err := AddUserGroup(models.UserGroup{
|
||||||
GroupName: "test_group_01",
|
GroupName: "test_group_05",
|
||||||
GroupType: 1,
|
GroupType: 1,
|
||||||
LdapGroupDN: "cn=harbor_users,ou=groups,dc=example,dc=com",
|
LdapGroupDN: "cn=harbor_users,ou=groups,dc=example,dc=com",
|
||||||
})
|
})
|
||||||
@ -392,8 +365,7 @@ func TestGetTotalGroupProjects(t *testing.T) {
|
|||||||
})
|
})
|
||||||
defer project.DeleteProjectMemberByID(pmid)
|
defer project.DeleteProjectMemberByID(pmid)
|
||||||
type args struct {
|
type args struct {
|
||||||
groupDNCondition string
|
query *models.ProjectQueryParam
|
||||||
query *models.ProjectQueryParam
|
|
||||||
}
|
}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
@ -401,18 +373,16 @@ func TestGetTotalGroupProjects(t *testing.T) {
|
|||||||
wantSize int
|
wantSize int
|
||||||
wantErr bool
|
wantErr bool
|
||||||
}{
|
}{
|
||||||
{"Query with group DN",
|
{"Query with group ID",
|
||||||
args{"'cn=harbor_users,ou=groups,dc=example,dc=com'",
|
args{&models.ProjectQueryParam{}},
|
||||||
&models.ProjectQueryParam{}},
|
|
||||||
1, false},
|
1, false},
|
||||||
{"Query without group DN",
|
{"Query without group ID",
|
||||||
args{"",
|
args{&models.ProjectQueryParam{}},
|
||||||
&models.ProjectQueryParam{}},
|
|
||||||
1, false},
|
1, false},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
got, err := dao.GetTotalGroupProjects(tt.args.groupDNCondition, tt.args.query)
|
got, err := dao.GetTotalGroupProjects([]int{groupID}, tt.args.query)
|
||||||
if (err != nil) != tt.wantErr {
|
if (err != nil) != tt.wantErr {
|
||||||
t.Errorf("GetGroupProjects() error = %v, wantErr %v", err, tt.wantErr)
|
t.Errorf("GetGroupProjects() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
return
|
return
|
||||||
@ -423,3 +393,90 @@ func TestGetTotalGroupProjects(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
func TestGetRolesByLDAPGroup(t *testing.T) {
|
||||||
|
|
||||||
|
userGroupList, err := QueryUserGroup(models.UserGroup{LdapGroupDN: "cn=harbor_users,ou=sample,ou=vmware,dc=harbor,dc=com", GroupType: 1})
|
||||||
|
if err != nil || len(userGroupList) < 1 {
|
||||||
|
t.Errorf("failed to query user group, err %v", err)
|
||||||
|
}
|
||||||
|
gl2, err2 := GetGroupIDByGroupName([]string{"test_http_group", "test_myhttp_group"}, common.HTTPGroupType)
|
||||||
|
if err2 != nil || len(gl2) != 2 {
|
||||||
|
t.Errorf("failed to query http user group, err %v", err)
|
||||||
|
}
|
||||||
|
project, err := dao.GetProjectByName("member_test_01")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error occurred when Get project by name: %v", err)
|
||||||
|
}
|
||||||
|
privateProject, err := dao.GetProjectByName("group_project_private")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error occurred when Get project by name: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
type args struct {
|
||||||
|
projectID int64
|
||||||
|
groupIDs []int
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
wantSize int
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{"Check normal", args{projectID: project.ProjectID, groupIDs: []int{userGroupList[0].ID, gl2[0], gl2[1]}}, 2, false},
|
||||||
|
{"Check non exist", args{projectID: privateProject.ProjectID, groupIDs: []int{9999}}, 0, false},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := dao.GetRolesByGroupID(tt.args.projectID, tt.args.groupIDs)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("TestGetRolesByLDAPGroup() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(got) != tt.wantSize {
|
||||||
|
t.Errorf("TestGetRolesByLDAPGroup() = %v, want %v", len(got), tt.wantSize)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetGroupIDByGroupName(t *testing.T) {
|
||||||
|
groupList, err := QueryUserGroup(models.UserGroup{GroupName: "test_http_group", GroupType: 2})
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
if len(groupList) < 0 {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
groupList2, err := QueryUserGroup(models.UserGroup{GroupName: "test_myhttp_group", GroupType: 2})
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
if len(groupList2) < 0 {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
var expectGroupID []int
|
||||||
|
type args struct {
|
||||||
|
groupName []string
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
want []int
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{"empty query", args{groupName: []string{}}, expectGroupID, false},
|
||||||
|
{"normal query", args{groupName: []string{"test_http_group", "test_myhttp_group"}}, []int{groupList[0].ID, groupList2[0].ID}, false},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := GetGroupIDByGroupName(tt.args.groupName, common.HTTPGroupType)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("GetHTTPGroupIDByGroupName() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(got, tt.want) {
|
||||||
|
t.Errorf("GetHTTPGroupIDByGroupName() = %#v, want %#v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
122
src/common/dao/notification/notification_job.go
Executable file
122
src/common/dao/notification/notification_job.go
Executable file
@ -0,0 +1,122 @@
|
|||||||
|
package notification
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/astaxie/beego/orm"
|
||||||
|
"github.com/goharbor/harbor/src/common/dao"
|
||||||
|
"github.com/goharbor/harbor/src/common/models"
|
||||||
|
"github.com/goharbor/harbor/src/common/utils/log"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UpdateNotificationJob update notification job
|
||||||
|
func UpdateNotificationJob(job *models.NotificationJob, props ...string) (int64, error) {
|
||||||
|
if job == nil {
|
||||||
|
return 0, errors.New("nil job")
|
||||||
|
}
|
||||||
|
|
||||||
|
if job.ID == 0 {
|
||||||
|
return 0, fmt.Errorf("notification job ID is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
o := dao.GetOrmer()
|
||||||
|
return o.Update(job, props...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddNotificationJob insert new notification job to DB
|
||||||
|
func AddNotificationJob(job *models.NotificationJob) (int64, error) {
|
||||||
|
if job == nil {
|
||||||
|
return 0, errors.New("nil job")
|
||||||
|
}
|
||||||
|
o := dao.GetOrmer()
|
||||||
|
if len(job.Status) == 0 {
|
||||||
|
job.Status = models.JobPending
|
||||||
|
}
|
||||||
|
return o.Insert(job)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNotificationJob ...
|
||||||
|
func GetNotificationJob(id int64) (*models.NotificationJob, error) {
|
||||||
|
o := dao.GetOrmer()
|
||||||
|
j := &models.NotificationJob{
|
||||||
|
ID: id,
|
||||||
|
}
|
||||||
|
err := o.Read(j)
|
||||||
|
if err == orm.ErrNoRows {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return j, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTotalCountOfNotificationJobs ...
|
||||||
|
func GetTotalCountOfNotificationJobs(query ...*models.NotificationJobQuery) (int64, error) {
|
||||||
|
qs := notificationJobQueryConditions(query...)
|
||||||
|
return qs.Count()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNotificationJobs ...
|
||||||
|
func GetNotificationJobs(query ...*models.NotificationJobQuery) ([]*models.NotificationJob, error) {
|
||||||
|
var jobs []*models.NotificationJob
|
||||||
|
|
||||||
|
qs := notificationJobQueryConditions(query...)
|
||||||
|
if len(query) > 0 && query[0] != nil {
|
||||||
|
qs = dao.PaginateForQuerySetter(qs, query[0].Page, query[0].Size)
|
||||||
|
}
|
||||||
|
|
||||||
|
qs = qs.OrderBy("-UpdateTime")
|
||||||
|
|
||||||
|
_, err := qs.All(&jobs)
|
||||||
|
return jobs, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLastTriggerJobsGroupByEventType get notification jobs info of policy, including event type and last trigger time
|
||||||
|
func GetLastTriggerJobsGroupByEventType(policyID int64) ([]*models.NotificationJob, error) {
|
||||||
|
o := dao.GetOrmer()
|
||||||
|
// get jobs last triggered(created) group by event_type. postgres group by usage reference:
|
||||||
|
// https://stackoverflow.com/questions/13325583/postgresql-max-and-group-by
|
||||||
|
sql := `select distinct on (event_type) event_type, id, creation_time, status, notify_type, job_uuid, update_time,
|
||||||
|
creation_time, job_detail from notification_job where policy_id = ?
|
||||||
|
order by event_type, id desc, creation_time, status, notify_type, job_uuid, update_time, creation_time, job_detail`
|
||||||
|
|
||||||
|
jobs := []*models.NotificationJob{}
|
||||||
|
_, err := o.Raw(sql, policyID).QueryRows(&jobs)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("query last trigger info group by event type failed: %v", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return jobs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteNotificationJob ...
|
||||||
|
func DeleteNotificationJob(id int64) error {
|
||||||
|
o := dao.GetOrmer()
|
||||||
|
_, err := o.Delete(&models.NotificationJob{ID: id})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAllNotificationJobsByPolicyID ...
|
||||||
|
func DeleteAllNotificationJobsByPolicyID(policyID int64) (int64, error) {
|
||||||
|
o := dao.GetOrmer()
|
||||||
|
return o.Delete(&models.NotificationJob{PolicyID: policyID}, "policy_id")
|
||||||
|
}
|
||||||
|
|
||||||
|
func notificationJobQueryConditions(query ...*models.NotificationJobQuery) orm.QuerySeter {
|
||||||
|
qs := dao.GetOrmer().QueryTable(&models.NotificationJob{})
|
||||||
|
if len(query) == 0 || query[0] == nil {
|
||||||
|
return qs
|
||||||
|
}
|
||||||
|
|
||||||
|
q := query[0]
|
||||||
|
if q.PolicyID != 0 {
|
||||||
|
qs = qs.Filter("PolicyID", q.PolicyID)
|
||||||
|
}
|
||||||
|
if len(q.Statuses) > 0 {
|
||||||
|
qs = qs.Filter("Status__in", q.Statuses)
|
||||||
|
}
|
||||||
|
if len(q.EventTypes) > 0 {
|
||||||
|
qs = qs.Filter("EventType__in", q.EventTypes)
|
||||||
|
}
|
||||||
|
return qs
|
||||||
|
}
|
263
src/common/dao/notification/notification_job_test.go
Normal file
263
src/common/dao/notification/notification_job_test.go
Normal file
@ -0,0 +1,263 @@
|
|||||||
|
package notification
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/goharbor/harbor/src/common/models"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
testJob1 = &models.NotificationJob{
|
||||||
|
PolicyID: 1111,
|
||||||
|
EventType: "pushImage",
|
||||||
|
NotifyType: "http",
|
||||||
|
Status: "pending",
|
||||||
|
JobDetail: "{\"type\":\"pushImage\",\"occur_at\":1563536782,\"event_data\":{\"resources\":[{\"digest\":\"sha256:bf1684a6e3676389ec861c602e97f27b03f14178e5bc3f70dce198f9f160cce9\",\"tag\":\"v1.0\",\"resource_url\":\"10.194.32.23/myproj/alpine:v1.0\"}],\"repository\":{\"date_created\":1563505587,\"name\":\"alpine\",\"namespace\":\"myproj\",\"repo_full_name\":\"myproj/alpine\",\"repo_type\":\"private\"}},\"operator\":\"admin\"}",
|
||||||
|
UUID: "00000000",
|
||||||
|
}
|
||||||
|
testJob2 = &models.NotificationJob{
|
||||||
|
PolicyID: 111,
|
||||||
|
EventType: "pullImage",
|
||||||
|
NotifyType: "http",
|
||||||
|
Status: "",
|
||||||
|
JobDetail: "{\"type\":\"pushImage\",\"occur_at\":1563537782,\"event_data\":{\"resources\":[{\"digest\":\"sha256:bf1684a6e3676389ec861c602e97f27b03f14178e5bc3f70dce198f9f160cce9\",\"tag\":\"v1.0\",\"resource_url\":\"10.194.32.23/myproj/alpine:v1.0\"}],\"repository\":{\"date_created\":1563505587,\"name\":\"alpine\",\"namespace\":\"myproj\",\"repo_full_name\":\"myproj/alpine\",\"repo_type\":\"private\"}},\"operator\":\"admin\"}",
|
||||||
|
UUID: "00000000",
|
||||||
|
}
|
||||||
|
testJob3 = &models.NotificationJob{
|
||||||
|
PolicyID: 111,
|
||||||
|
EventType: "deleteImage",
|
||||||
|
NotifyType: "http",
|
||||||
|
Status: "pending",
|
||||||
|
JobDetail: "{\"type\":\"pushImage\",\"occur_at\":1563538782,\"event_data\":{\"resources\":[{\"digest\":\"sha256:bf1684a6e3676389ec861c602e97f27b03f14178e5bc3f70dce198f9f160cce9\",\"tag\":\"v1.0\",\"resource_url\":\"10.194.32.23/myproj/alpine:v1.0\"}],\"repository\":{\"date_created\":1563505587,\"name\":\"alpine\",\"namespace\":\"myproj\",\"repo_full_name\":\"myproj/alpine\",\"repo_type\":\"private\"}},\"operator\":\"admin\"}",
|
||||||
|
UUID: "00000000",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAddNotificationJob(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
job *models.NotificationJob
|
||||||
|
want int64
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{name: "AddNotificationJob nil", job: nil, wantErr: true},
|
||||||
|
{name: "AddNotificationJob 1", job: testJob1, want: 1},
|
||||||
|
{name: "AddNotificationJob 2", job: testJob2, want: 2},
|
||||||
|
{name: "AddNotificationJob 3", job: testJob3, want: 3},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := AddNotificationJob(tt.job)
|
||||||
|
if tt.wantErr {
|
||||||
|
require.NotNil(t, err, "wantErr: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.Nil(t, err)
|
||||||
|
assert.Equal(t, tt.want, got)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetTotalCountOfNotificationJobs(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
query *models.NotificationJobQuery
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
want int64
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "GetTotalCountOfNotificationJobs 1",
|
||||||
|
args: args{
|
||||||
|
query: &models.NotificationJobQuery{
|
||||||
|
PolicyID: 111,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "GetTotalCountOfNotificationJobs 2",
|
||||||
|
args: args{},
|
||||||
|
want: 3,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "GetTotalCountOfNotificationJobs 3",
|
||||||
|
args: args{
|
||||||
|
query: &models.NotificationJobQuery{
|
||||||
|
Statuses: []string{"pending"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: 3,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := GetTotalCountOfNotificationJobs(tt.args.query)
|
||||||
|
if tt.wantErr {
|
||||||
|
require.NotNil(t, err, "wantErr: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.Nil(t, err)
|
||||||
|
assert.Equal(t, tt.want, got)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetLastTriggerJobsGroupByEventType(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
policyID int64
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
want []*models.NotificationJob
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "GetLastTriggerJobsGroupByEventType",
|
||||||
|
args: args{
|
||||||
|
policyID: 111,
|
||||||
|
},
|
||||||
|
want: []*models.NotificationJob{
|
||||||
|
testJob2,
|
||||||
|
testJob3,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := GetLastTriggerJobsGroupByEventType(tt.args.policyID)
|
||||||
|
if tt.wantErr {
|
||||||
|
require.NotNil(t, err, "wantErr: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.Nil(t, err)
|
||||||
|
assert.Equal(t, len(tt.want), len(got))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateNotificationJob(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
job *models.NotificationJob
|
||||||
|
props []string
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
want int64
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{name: "UpdateNotificationJob Want Error 1", args: args{job: nil}, wantErr: true},
|
||||||
|
{name: "UpdateNotificationJob Want Error 2", args: args{job: &models.NotificationJob{ID: 0}}, wantErr: true},
|
||||||
|
{
|
||||||
|
name: "UpdateNotificationJob 1",
|
||||||
|
args: args{
|
||||||
|
job: &models.NotificationJob{ID: 1, UUID: "111111111111111"},
|
||||||
|
props: []string{"UUID"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "UpdateNotificationJob 2",
|
||||||
|
args: args{
|
||||||
|
job: &models.NotificationJob{ID: 2, UUID: "222222222222222"},
|
||||||
|
props: []string{"UUID"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "UpdateNotificationJob 3",
|
||||||
|
args: args{
|
||||||
|
job: &models.NotificationJob{ID: 3, UUID: "333333333333333"},
|
||||||
|
props: []string{"UUID"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
_, err := UpdateNotificationJob(tt.args.job, tt.args.props...)
|
||||||
|
if tt.wantErr {
|
||||||
|
require.NotNil(t, err, "Error: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Nil(t, err)
|
||||||
|
gotJob, err := GetNotificationJob(tt.args.job.ID)
|
||||||
|
|
||||||
|
require.Nil(t, err)
|
||||||
|
assert.Equal(t, tt.args.job.UUID, gotJob.UUID)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeleteNotificationJob(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
id int64
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{name: "DeleteNotificationJob 1", args: args{id: 1}},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
err := DeleteNotificationJob(tt.args.id)
|
||||||
|
|
||||||
|
if tt.wantErr {
|
||||||
|
require.NotNil(t, err, "Error: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Nil(t, err)
|
||||||
|
job, err := GetNotificationJob(tt.args.id)
|
||||||
|
|
||||||
|
require.Nil(t, err)
|
||||||
|
assert.Nil(t, job)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeleteAllNotificationJobs(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
policyID int64
|
||||||
|
query []*models.NotificationJobQuery
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "DeleteAllNotificationJobs 1",
|
||||||
|
args: args{
|
||||||
|
policyID: 111,
|
||||||
|
query: []*models.NotificationJobQuery{
|
||||||
|
{PolicyID: 111},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
_, err := DeleteAllNotificationJobsByPolicyID(tt.args.policyID)
|
||||||
|
|
||||||
|
if tt.wantErr {
|
||||||
|
require.NotNil(t, err, "Error: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Nil(t, err)
|
||||||
|
jobs, err := GetNotificationJobs(tt.args.query...)
|
||||||
|
|
||||||
|
require.Nil(t, err)
|
||||||
|
assert.Equal(t, 0, len(jobs))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
69
src/common/dao/notification/notification_policy.go
Executable file
69
src/common/dao/notification/notification_policy.go
Executable file
@ -0,0 +1,69 @@
|
|||||||
|
package notification
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/astaxie/beego/orm"
|
||||||
|
"github.com/goharbor/harbor/src/common/dao"
|
||||||
|
"github.com/goharbor/harbor/src/common/models"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetNotificationPolicy return notification policy by id
|
||||||
|
func GetNotificationPolicy(id int64) (*models.NotificationPolicy, error) {
|
||||||
|
policy := new(models.NotificationPolicy)
|
||||||
|
o := dao.GetOrmer()
|
||||||
|
err := o.QueryTable(policy).Filter("id", id).One(policy)
|
||||||
|
if err == orm.ErrNoRows {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return policy, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNotificationPolicyByName return notification policy by name
|
||||||
|
func GetNotificationPolicyByName(name string, projectID int64) (*models.NotificationPolicy, error) {
|
||||||
|
policy := new(models.NotificationPolicy)
|
||||||
|
o := dao.GetOrmer()
|
||||||
|
err := o.QueryTable(policy).Filter("name", name).Filter("projectID", projectID).One(policy)
|
||||||
|
if err == orm.ErrNoRows {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return policy, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNotificationPolicies returns all notification policy in project
|
||||||
|
func GetNotificationPolicies(projectID int64) ([]*models.NotificationPolicy, error) {
|
||||||
|
var policies []*models.NotificationPolicy
|
||||||
|
qs := dao.GetOrmer().QueryTable(new(models.NotificationPolicy)).Filter("ProjectID", projectID)
|
||||||
|
|
||||||
|
_, err := qs.All(&policies)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return policies, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddNotificationPolicy insert new notification policy to DB
|
||||||
|
func AddNotificationPolicy(policy *models.NotificationPolicy) (int64, error) {
|
||||||
|
if policy == nil {
|
||||||
|
return 0, errors.New("nil policy")
|
||||||
|
}
|
||||||
|
o := dao.GetOrmer()
|
||||||
|
return o.Insert(policy)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateNotificationPolicy update t specified notification policy
|
||||||
|
func UpdateNotificationPolicy(policy *models.NotificationPolicy) error {
|
||||||
|
if policy == nil {
|
||||||
|
return errors.New("nil policy")
|
||||||
|
}
|
||||||
|
o := dao.GetOrmer()
|
||||||
|
_, err := o.Update(policy)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteNotificationPolicy delete notification policy by id
|
||||||
|
func DeleteNotificationPolicy(id int64) error {
|
||||||
|
o := dao.GetOrmer()
|
||||||
|
_, err := o.Delete(&models.NotificationPolicy{ID: id})
|
||||||
|
return err
|
||||||
|
}
|
291
src/common/dao/notification/notification_policy_test.go
Normal file
291
src/common/dao/notification/notification_policy_test.go
Normal file
@ -0,0 +1,291 @@
|
|||||||
|
package notification
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/goharbor/harbor/src/common/models"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
testPly1 = &models.NotificationPolicy{
|
||||||
|
Name: "webhook test policy1",
|
||||||
|
Description: "webhook test policy1 description",
|
||||||
|
ProjectID: 111,
|
||||||
|
TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\",\"token\":\"xxxxxxxxx\",\"skip_cert_verify\":true}]",
|
||||||
|
EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\",\"uploadChart\",\"deleteChart\",\"downloadChart\",\"scanningFailed\",\"scanningCompleted\"]",
|
||||||
|
Creator: "no one",
|
||||||
|
CreationTime: time.Now(),
|
||||||
|
UpdateTime: time.Now(),
|
||||||
|
Enabled: true,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
testPly2 = &models.NotificationPolicy{
|
||||||
|
Name: "webhook test policy2",
|
||||||
|
Description: "webhook test policy2 description",
|
||||||
|
ProjectID: 222,
|
||||||
|
TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\",\"token\":\"xxxxxxxxx\",\"skip_cert_verify\":true}]",
|
||||||
|
EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\",\"uploadChart\",\"deleteChart\",\"downloadChart\",\"scanningFailed\",\"scanningCompleted\"]",
|
||||||
|
Creator: "no one",
|
||||||
|
CreationTime: time.Now(),
|
||||||
|
UpdateTime: time.Now(),
|
||||||
|
Enabled: true,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
testPly3 = &models.NotificationPolicy{
|
||||||
|
Name: "webhook test policy3",
|
||||||
|
Description: "webhook test policy3 description",
|
||||||
|
ProjectID: 333,
|
||||||
|
TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\",\"token\":\"xxxxxxxxx\",\"skip_cert_verify\":true}]",
|
||||||
|
EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\",\"uploadChart\",\"deleteChart\",\"downloadChart\",\"scanningFailed\",\"scanningCompleted\"]",
|
||||||
|
Creator: "no one",
|
||||||
|
CreationTime: time.Now(),
|
||||||
|
UpdateTime: time.Now(),
|
||||||
|
Enabled: true,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAddNotificationPolicy(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
policy *models.NotificationPolicy
|
||||||
|
want int64
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{name: "AddNotificationPolicy nil", policy: nil, wantErr: true},
|
||||||
|
{name: "AddNotificationPolicy 1", policy: testPly1, want: 1},
|
||||||
|
{name: "AddNotificationPolicy 2", policy: testPly2, want: 2},
|
||||||
|
{name: "AddNotificationPolicy 3", policy: testPly3, want: 3},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := AddNotificationPolicy(tt.policy)
|
||||||
|
|
||||||
|
if tt.wantErr {
|
||||||
|
require.NotNil(t, err, "wantErr: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.Nil(t, err)
|
||||||
|
assert.Equal(t, tt.want, got)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetNotificationPolicies(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
projectID int64
|
||||||
|
wantPolicies []*models.NotificationPolicy
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{name: "GetNotificationPolicies nil", projectID: 0, wantPolicies: []*models.NotificationPolicy{}},
|
||||||
|
{name: "GetNotificationPolicies 1", projectID: 111, wantPolicies: []*models.NotificationPolicy{testPly1}},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
gotPolicies, err := GetNotificationPolicies(tt.projectID)
|
||||||
|
if tt.wantErr {
|
||||||
|
require.NotNil(t, err, "wantErr: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Nil(t, err)
|
||||||
|
for i, gotPolicy := range gotPolicies {
|
||||||
|
assert.Equal(t, tt.wantPolicies[i].Name, gotPolicy.Name)
|
||||||
|
assert.Equal(t, tt.wantPolicies[i].ID, gotPolicy.ID)
|
||||||
|
assert.Equal(t, tt.wantPolicies[i].EventTypesDB, gotPolicy.EventTypesDB)
|
||||||
|
assert.Equal(t, tt.wantPolicies[i].TargetsDB, gotPolicy.TargetsDB)
|
||||||
|
assert.Equal(t, tt.wantPolicies[i].Creator, gotPolicy.Creator)
|
||||||
|
assert.Equal(t, tt.wantPolicies[i].Enabled, gotPolicy.Enabled)
|
||||||
|
assert.Equal(t, tt.wantPolicies[i].Description, gotPolicy.Description)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetNotificationPolicy(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
id int64
|
||||||
|
wantPolicy *models.NotificationPolicy
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{name: "GetRepPolicy 1", id: 1, wantPolicy: testPly1},
|
||||||
|
{name: "GetRepPolicy 2", id: 2, wantPolicy: testPly2},
|
||||||
|
{name: "GetRepPolicy 3", id: 3, wantPolicy: testPly3},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
gotPolicy, err := GetNotificationPolicy(tt.id)
|
||||||
|
if tt.wantErr {
|
||||||
|
require.NotNil(t, err, "wantErr: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.Nil(t, err)
|
||||||
|
assert.Equal(t, tt.wantPolicy.Name, gotPolicy.Name)
|
||||||
|
assert.Equal(t, tt.wantPolicy.ID, gotPolicy.ID)
|
||||||
|
assert.Equal(t, tt.wantPolicy.EventTypesDB, gotPolicy.EventTypesDB)
|
||||||
|
assert.Equal(t, tt.wantPolicy.TargetsDB, gotPolicy.TargetsDB)
|
||||||
|
assert.Equal(t, tt.wantPolicy.Creator, gotPolicy.Creator)
|
||||||
|
assert.Equal(t, tt.wantPolicy.Enabled, gotPolicy.Enabled)
|
||||||
|
assert.Equal(t, tt.wantPolicy.Description, gotPolicy.Description)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetNotificationPolicyByName(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
name string
|
||||||
|
projectID int64
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
wantPolicy *models.NotificationPolicy
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{name: "GetNotificationPolicyByName 1", args: args{name: testPly1.Name, projectID: testPly1.ProjectID}, wantPolicy: testPly1},
|
||||||
|
{name: "GetNotificationPolicyByName 2", args: args{name: testPly2.Name, projectID: testPly2.ProjectID}, wantPolicy: testPly2},
|
||||||
|
{name: "GetNotificationPolicyByName 3", args: args{name: testPly3.Name, projectID: testPly3.ProjectID}, wantPolicy: testPly3},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
gotPolicy, err := GetNotificationPolicyByName(tt.args.name, tt.args.projectID)
|
||||||
|
if tt.wantErr {
|
||||||
|
require.NotNil(t, err, "wantErr: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.Nil(t, err)
|
||||||
|
assert.Equal(t, tt.wantPolicy.Name, gotPolicy.Name)
|
||||||
|
assert.Equal(t, tt.wantPolicy.ID, gotPolicy.ID)
|
||||||
|
assert.Equal(t, tt.wantPolicy.EventTypesDB, gotPolicy.EventTypesDB)
|
||||||
|
assert.Equal(t, tt.wantPolicy.TargetsDB, gotPolicy.TargetsDB)
|
||||||
|
assert.Equal(t, tt.wantPolicy.Creator, gotPolicy.Creator)
|
||||||
|
assert.Equal(t, tt.wantPolicy.Enabled, gotPolicy.Enabled)
|
||||||
|
assert.Equal(t, tt.wantPolicy.Description, gotPolicy.Description)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateNotificationPolicy(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
policy *models.NotificationPolicy
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "UpdateNotificationPolicy nil",
|
||||||
|
args: args{
|
||||||
|
policy: nil,
|
||||||
|
},
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
name: "UpdateNotificationPolicy 1",
|
||||||
|
args: args{
|
||||||
|
policy: &models.NotificationPolicy{
|
||||||
|
ID: 1,
|
||||||
|
Name: "webhook test policy1 new",
|
||||||
|
Description: "webhook test policy1 description new",
|
||||||
|
ProjectID: 111,
|
||||||
|
TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\",\"token\":\"xxxxxxxxx\",\"skip_cert_verify\":true}]",
|
||||||
|
EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\",\"uploadChart\",\"deleteChart\",\"downloadChart\",\"scanningFailed\",\"scanningCompleted\"]",
|
||||||
|
Creator: "no one",
|
||||||
|
CreationTime: time.Now(),
|
||||||
|
UpdateTime: time.Now(),
|
||||||
|
Enabled: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "UpdateNotificationPolicy 2",
|
||||||
|
args: args{
|
||||||
|
policy: &models.NotificationPolicy{
|
||||||
|
ID: 2,
|
||||||
|
Name: "webhook test policy2 new",
|
||||||
|
Description: "webhook test policy2 description new",
|
||||||
|
ProjectID: 222,
|
||||||
|
TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\",\"token\":\"xxxxxxxxx\",\"skip_cert_verify\":true}]",
|
||||||
|
EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\",\"uploadChart\",\"deleteChart\",\"downloadChart\",\"scanningFailed\",\"scanningCompleted\"]",
|
||||||
|
Creator: "no one",
|
||||||
|
CreationTime: time.Now(),
|
||||||
|
UpdateTime: time.Now(),
|
||||||
|
Enabled: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "UpdateNotificationPolicy 3",
|
||||||
|
args: args{
|
||||||
|
policy: &models.NotificationPolicy{
|
||||||
|
ID: 3,
|
||||||
|
Name: "webhook test policy3 new",
|
||||||
|
Description: "webhook test policy3 description new",
|
||||||
|
ProjectID: 333,
|
||||||
|
TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\",\"token\":\"xxxxxxxxx\",\"skip_cert_verify\":true}]",
|
||||||
|
EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\",\"uploadChart\",\"deleteChart\",\"downloadChart\",\"scanningFailed\",\"scanningCompleted\"]",
|
||||||
|
Creator: "no one",
|
||||||
|
CreationTime: time.Now(),
|
||||||
|
UpdateTime: time.Now(),
|
||||||
|
Enabled: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
err := UpdateNotificationPolicy(tt.args.policy)
|
||||||
|
|
||||||
|
if tt.wantErr {
|
||||||
|
require.NotNil(t, err, "Error: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Nil(t, err)
|
||||||
|
gotPolicy, err := GetNotificationPolicy(tt.args.policy.ID)
|
||||||
|
|
||||||
|
require.Nil(t, err)
|
||||||
|
assert.Equal(t, tt.args.policy.Description, gotPolicy.Description)
|
||||||
|
assert.Equal(t, tt.args.policy.Name, gotPolicy.Name)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeleteNotificationPolicy(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
id int64
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{name: "DeleteNotificationPolicy 1", id: 1, wantErr: false},
|
||||||
|
{name: "DeleteNotificationPolicy 2", id: 2, wantErr: false},
|
||||||
|
{name: "DeleteNotificationPolicy 3", id: 3, wantErr: false},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
err := DeleteNotificationPolicy(tt.id)
|
||||||
|
if tt.wantErr {
|
||||||
|
require.NotNil(t, err, "wantErr: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.Nil(t, err)
|
||||||
|
policy, err := GetNotificationPolicy(tt.id)
|
||||||
|
require.Nil(t, err)
|
||||||
|
assert.Nil(t, policy)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
13
src/common/dao/notification/notification_test.go
Normal file
13
src/common/dao/notification/notification_test.go
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
package notification
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/goharbor/harbor/src/common/dao"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
dao.PrepareTestForPostgresSQL()
|
||||||
|
os.Exit(m.Run())
|
||||||
|
}
|
@ -31,12 +31,14 @@ import (
|
|||||||
const defaultMigrationPath = "migrations/postgresql/"
|
const defaultMigrationPath = "migrations/postgresql/"
|
||||||
|
|
||||||
type pgsql struct {
|
type pgsql struct {
|
||||||
host string
|
host string
|
||||||
port string
|
port string
|
||||||
usr string
|
usr string
|
||||||
pwd string
|
pwd string
|
||||||
database string
|
database string
|
||||||
sslmode string
|
sslmode string
|
||||||
|
maxIdleConns int
|
||||||
|
maxOpenConns int
|
||||||
}
|
}
|
||||||
|
|
||||||
// Name returns the name of PostgreSQL
|
// Name returns the name of PostgreSQL
|
||||||
@ -51,17 +53,19 @@ func (p *pgsql) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewPGSQL returns an instance of postgres
|
// NewPGSQL returns an instance of postgres
|
||||||
func NewPGSQL(host string, port string, usr string, pwd string, database string, sslmode string) Database {
|
func NewPGSQL(host string, port string, usr string, pwd string, database string, sslmode string, maxIdleConns int, maxOpenConns int) Database {
|
||||||
if len(sslmode) == 0 {
|
if len(sslmode) == 0 {
|
||||||
sslmode = "disable"
|
sslmode = "disable"
|
||||||
}
|
}
|
||||||
return &pgsql{
|
return &pgsql{
|
||||||
host: host,
|
host: host,
|
||||||
port: port,
|
port: port,
|
||||||
usr: usr,
|
usr: usr,
|
||||||
pwd: pwd,
|
pwd: pwd,
|
||||||
database: database,
|
database: database,
|
||||||
sslmode: sslmode,
|
sslmode: sslmode,
|
||||||
|
maxIdleConns: maxIdleConns,
|
||||||
|
maxOpenConns: maxOpenConns,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -82,7 +86,7 @@ func (p *pgsql) Register(alias ...string) error {
|
|||||||
info := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=%s",
|
info := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=%s",
|
||||||
p.host, p.port, p.usr, p.pwd, p.database, p.sslmode)
|
p.host, p.port, p.usr, p.pwd, p.database, p.sslmode)
|
||||||
|
|
||||||
return orm.RegisterDataBase(an, "postgres", info)
|
return orm.RegisterDataBase(an, "postgres", info, p.maxIdleConns, p.maxOpenConns)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpgradeSchema calls migrate tool to upgrade schema to the latest based on the SQL scripts.
|
// UpgradeSchema calls migrate tool to upgrade schema to the latest based on the SQL scripts.
|
||||||
|
@ -44,7 +44,7 @@ func DeleteProjectMetadata(projectID int64, name ...string) error {
|
|||||||
params = append(params, projectID)
|
params = append(params, projectID)
|
||||||
|
|
||||||
if len(name) > 0 {
|
if len(name) > 0 {
|
||||||
sql += fmt.Sprintf(` and name in ( %s )`, paramPlaceholder(len(name)))
|
sql += fmt.Sprintf(` and name in ( %s )`, ParamPlaceholderForIn(len(name)))
|
||||||
params = append(params, name)
|
params = append(params, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -74,7 +74,7 @@ func GetProjectMetadata(projectID int64, name ...string) ([]*models.ProjectMetad
|
|||||||
params = append(params, projectID)
|
params = append(params, projectID)
|
||||||
|
|
||||||
if len(name) > 0 {
|
if len(name) > 0 {
|
||||||
sql += fmt.Sprintf(` and name in ( %s )`, paramPlaceholder(len(name)))
|
sql += fmt.Sprintf(` and name in ( %s )`, ParamPlaceholderForIn(len(name)))
|
||||||
params = append(params, name)
|
params = append(params, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -82,7 +82,9 @@ func GetProjectMetadata(projectID int64, name ...string) ([]*models.ProjectMetad
|
|||||||
return proMetas, err
|
return proMetas, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func paramPlaceholder(n int) string {
|
// ParamPlaceholderForIn returns a string that contains placeholders for sql keyword "in"
|
||||||
|
// e.g. n=3, returns "?,?,?"
|
||||||
|
func ParamPlaceholderForIn(n int) string {
|
||||||
placeholders := []string{}
|
placeholders := []string{}
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
placeholders = append(placeholders, "?")
|
placeholders = append(placeholders, "?")
|
||||||
|
@ -156,19 +156,21 @@ func GetProjects(query *models.ProjectQueryParam) ([]*models.Project, error) {
|
|||||||
|
|
||||||
// GetGroupProjects - Get user's all projects, including user is the user member of this project
|
// GetGroupProjects - Get user's all projects, including user is the user member of this project
|
||||||
// and the user is in the group which is a group member of this project.
|
// and the user is in the group which is a group member of this project.
|
||||||
func GetGroupProjects(groupDNCondition string, query *models.ProjectQueryParam) ([]*models.Project, error) {
|
func GetGroupProjects(groupIDs []int, query *models.ProjectQueryParam) ([]*models.Project, error) {
|
||||||
sql, params := projectQueryConditions(query)
|
sql, params := projectQueryConditions(query)
|
||||||
sql = `select distinct p.project_id, p.name, p.owner_id,
|
sql = `select distinct p.project_id, p.name, p.owner_id,
|
||||||
p.creation_time, p.update_time ` + sql
|
p.creation_time, p.update_time ` + sql
|
||||||
if len(groupDNCondition) > 0 {
|
groupIDCondition := JoinNumberConditions(groupIDs)
|
||||||
|
if len(groupIDs) > 0 {
|
||||||
sql = fmt.Sprintf(
|
sql = fmt.Sprintf(
|
||||||
`%s union select distinct p.project_id, p.name, p.owner_id, p.creation_time, p.update_time
|
`%s union select distinct p.project_id, p.name, p.owner_id, p.creation_time, p.update_time
|
||||||
from project p
|
from project p
|
||||||
left join project_member pm on p.project_id = pm.project_id
|
left join project_member pm on p.project_id = pm.project_id
|
||||||
left join user_group ug on ug.id = pm.entity_id and pm.entity_type = 'g' and ug.group_type = 1
|
left join user_group ug on ug.id = pm.entity_id and pm.entity_type = 'g'
|
||||||
where ug.ldap_group_dn in ( %s ) order by name`,
|
where ug.id in ( %s )`,
|
||||||
sql, groupDNCondition)
|
sql, groupIDCondition)
|
||||||
}
|
}
|
||||||
|
sql = sql + ` order by name`
|
||||||
sqlStr, queryParams := CreatePagination(query, sql, params)
|
sqlStr, queryParams := CreatePagination(query, sql, params)
|
||||||
log.Debugf("query sql:%v", sql)
|
log.Debugf("query sql:%v", sql)
|
||||||
var projects []*models.Project
|
var projects []*models.Project
|
||||||
@ -178,10 +180,11 @@ func GetGroupProjects(groupDNCondition string, query *models.ProjectQueryParam)
|
|||||||
|
|
||||||
// GetTotalGroupProjects - Get the total count of projects, including user is the member of this project and the
|
// GetTotalGroupProjects - Get the total count of projects, including user is the member of this project and the
|
||||||
// user is in the group, which is the group member of this project.
|
// user is in the group, which is the group member of this project.
|
||||||
func GetTotalGroupProjects(groupDNCondition string, query *models.ProjectQueryParam) (int, error) {
|
func GetTotalGroupProjects(groupIDs []int, query *models.ProjectQueryParam) (int, error) {
|
||||||
var sql string
|
var sql string
|
||||||
sqlCondition, params := projectQueryConditions(query)
|
sqlCondition, params := projectQueryConditions(query)
|
||||||
if len(groupDNCondition) == 0 {
|
groupIDCondition := JoinNumberConditions(groupIDs)
|
||||||
|
if len(groupIDs) == 0 {
|
||||||
sql = `select count(1) ` + sqlCondition
|
sql = `select count(1) ` + sqlCondition
|
||||||
} else {
|
} else {
|
||||||
sql = fmt.Sprintf(
|
sql = fmt.Sprintf(
|
||||||
@ -189,9 +192,9 @@ func GetTotalGroupProjects(groupDNCondition string, query *models.ProjectQueryPa
|
|||||||
from ( select p.project_id %s union select p.project_id
|
from ( select p.project_id %s union select p.project_id
|
||||||
from project p
|
from project p
|
||||||
left join project_member pm on p.project_id = pm.project_id
|
left join project_member pm on p.project_id = pm.project_id
|
||||||
left join user_group ug on ug.id = pm.entity_id and pm.entity_type = 'g' and ug.group_type = 1
|
left join user_group ug on ug.id = pm.entity_id and pm.entity_type = 'g'
|
||||||
where ug.ldap_group_dn in ( %s )) t`,
|
where ug.id in ( %s )) t`,
|
||||||
sqlCondition, groupDNCondition)
|
sqlCondition, groupIDCondition)
|
||||||
}
|
}
|
||||||
log.Debugf("query sql:%v", sql)
|
log.Debugf("query sql:%v", sql)
|
||||||
var count int
|
var count int
|
||||||
@ -257,7 +260,7 @@ func projectQueryConditions(query *models.ProjectQueryParam) (string, []interfac
|
|||||||
}
|
}
|
||||||
if len(query.ProjectIDs) > 0 {
|
if len(query.ProjectIDs) > 0 {
|
||||||
sql += fmt.Sprintf(` and p.project_id in ( %s )`,
|
sql += fmt.Sprintf(` and p.project_id in ( %s )`,
|
||||||
paramPlaceholder(len(query.ProjectIDs)))
|
ParamPlaceholderForIn(len(query.ProjectIDs)))
|
||||||
params = append(params, query.ProjectIDs)
|
params = append(params, query.ProjectIDs)
|
||||||
}
|
}
|
||||||
return sql, params
|
return sql, params
|
||||||
@ -291,29 +294,24 @@ func DeleteProject(id int64) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRolesByLDAPGroup - Get Project roles of the
|
// GetRolesByGroupID - Get Project roles of the
|
||||||
// specified group DN is a member of current project
|
// specified group is a member of current project
|
||||||
func GetRolesByLDAPGroup(projectID int64, groupDNCondition string) ([]int, error) {
|
func GetRolesByGroupID(projectID int64, groupIDs []int) ([]int, error) {
|
||||||
var roles []int
|
var roles []int
|
||||||
if len(groupDNCondition) == 0 {
|
if len(groupIDs) == 0 {
|
||||||
return roles, nil
|
return roles, nil
|
||||||
}
|
}
|
||||||
|
groupIDCondition := JoinNumberConditions(groupIDs)
|
||||||
o := GetOrmer()
|
o := GetOrmer()
|
||||||
// Because an LDAP user can be memberof multiple groups,
|
|
||||||
// the role is in descent order (1-admin, 2-developer, 3-guest, 4-master), use min to select the max privilege role.
|
|
||||||
sql := fmt.Sprintf(
|
sql := fmt.Sprintf(
|
||||||
`select min(pm.role) from project_member pm
|
`select distinct pm.role from project_member pm
|
||||||
left join user_group ug on pm.entity_type = 'g' and pm.entity_id = ug.id
|
left join user_group ug on pm.entity_type = 'g' and pm.entity_id = ug.id
|
||||||
where ug.ldap_group_dn in ( %s ) and pm.project_id = ? `,
|
where ug.id in ( %s ) and pm.project_id = ?`,
|
||||||
groupDNCondition)
|
groupIDCondition)
|
||||||
log.Debugf("sql:%v", sql)
|
log.Debugf("sql for GetRolesByGroupID(project ID: %d, group ids: %v):%v", projectID, groupIDs, sql)
|
||||||
if _, err := o.Raw(sql, projectID).QueryRows(&roles); err != nil {
|
if _, err := o.Raw(sql, projectID).QueryRows(&roles); err != nil {
|
||||||
log.Warningf("Error in GetRolesByLDAPGroup, error: %v", err)
|
log.Warningf("Error in GetRolesByGroupID, error: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// If there is no row selected, the min returns an empty row, to avoid return 0 as role
|
|
||||||
if len(roles) == 1 && roles[0] == 0 {
|
|
||||||
return []int{}, nil
|
|
||||||
}
|
|
||||||
return roles, nil
|
return roles, nil
|
||||||
}
|
}
|
||||||
|
@ -30,13 +30,13 @@ func GetProjectMember(queryMember models.Member) ([]*models.Member, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
o := dao.GetOrmer()
|
o := dao.GetOrmer()
|
||||||
sql := ` select a.* from (select pm.id as id, pm.project_id as project_id, ug.id as entity_id, ug.group_name as entity_name, ug.creation_time, ug.update_time, r.name as rolename,
|
sql := ` select a.* from (select pm.id as id, pm.project_id as project_id, ug.id as entity_id, ug.group_name as entity_name, ug.creation_time, ug.update_time, r.name as rolename,
|
||||||
r.role_id as role, pm.entity_type as entity_type from user_group ug join project_member pm
|
r.role_id as role, pm.entity_type as entity_type from user_group ug join project_member pm
|
||||||
on pm.project_id = ? and ug.id = pm.entity_id join role r on pm.role = r.role_id where pm.entity_type = 'g'
|
on pm.project_id = ? and ug.id = pm.entity_id join role r on pm.role = r.role_id where pm.entity_type = 'g'
|
||||||
union
|
union
|
||||||
select pm.id as id, pm.project_id as project_id, u.user_id as entity_id, u.username as entity_name, u.creation_time, u.update_time, r.name as rolename,
|
select pm.id as id, pm.project_id as project_id, u.user_id as entity_id, u.username as entity_name, u.creation_time, u.update_time, r.name as rolename,
|
||||||
r.role_id as role, pm.entity_type as entity_type from harbor_user u join project_member pm
|
r.role_id as role, pm.entity_type as entity_type from harbor_user u join project_member pm
|
||||||
on pm.project_id = ? and u.user_id = pm.entity_id
|
on pm.project_id = ? and u.user_id = pm.entity_id
|
||||||
join role r on pm.role = r.role_id where u.deleted = false and pm.entity_type = 'u') as a where a.project_id = ? `
|
join role r on pm.role = r.role_id where u.deleted = false and pm.entity_type = 'u') as a where a.project_id = ? `
|
||||||
|
|
||||||
queryParam := make([]interface{}, 1)
|
queryParam := make([]interface{}, 1)
|
||||||
@ -70,6 +70,27 @@ func GetProjectMember(queryMember models.Member) ([]*models.Member, error) {
|
|||||||
return members, err
|
return members, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetTotalOfProjectMembers returns total of project members
|
||||||
|
func GetTotalOfProjectMembers(projectID int64, roles ...int) (int64, error) {
|
||||||
|
log.Debugf("Query condition %+v", projectID)
|
||||||
|
if projectID == 0 {
|
||||||
|
return 0, fmt.Errorf("failed to get total of project members, project id required %v", projectID)
|
||||||
|
}
|
||||||
|
|
||||||
|
sql := "SELECT COUNT(1) FROM project_member WHERE project_id = ?"
|
||||||
|
|
||||||
|
queryParam := []interface{}{projectID}
|
||||||
|
|
||||||
|
if len(roles) > 0 {
|
||||||
|
sql += " AND role = ?"
|
||||||
|
queryParam = append(queryParam, roles[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
var count int64
|
||||||
|
err := dao.GetOrmer().Raw(sql, queryParam).QueryRow(&count)
|
||||||
|
return count, err
|
||||||
|
}
|
||||||
|
|
||||||
// AddProjectMember inserts a record to table project_member
|
// AddProjectMember inserts a record to table project_member
|
||||||
func AddProjectMember(member models.Member) (int, error) {
|
func AddProjectMember(member models.Member) (int, error) {
|
||||||
|
|
||||||
@ -120,23 +141,23 @@ func DeleteProjectMemberByID(pmid int) error {
|
|||||||
// SearchMemberByName search members of the project by entity_name
|
// SearchMemberByName search members of the project by entity_name
|
||||||
func SearchMemberByName(projectID int64, entityName string) ([]*models.Member, error) {
|
func SearchMemberByName(projectID int64, entityName string) ([]*models.Member, error) {
|
||||||
o := dao.GetOrmer()
|
o := dao.GetOrmer()
|
||||||
sql := `select pm.id, pm.project_id,
|
sql := `select pm.id, pm.project_id,
|
||||||
u.username as entity_name,
|
u.username as entity_name,
|
||||||
r.name as rolename,
|
r.name as rolename,
|
||||||
pm.role, pm.entity_id, pm.entity_type
|
pm.role, pm.entity_id, pm.entity_type
|
||||||
from project_member pm
|
from project_member pm
|
||||||
left join harbor_user u on pm.entity_id = u.user_id and pm.entity_type = 'u'
|
left join harbor_user u on pm.entity_id = u.user_id and pm.entity_type = 'u'
|
||||||
left join role r on pm.role = r.role_id
|
left join role r on pm.role = r.role_id
|
||||||
where u.deleted = false and pm.project_id = ? and u.username like ?
|
where u.deleted = false and pm.project_id = ? and u.username like ?
|
||||||
union
|
union
|
||||||
select pm.id, pm.project_id,
|
select pm.id, pm.project_id,
|
||||||
ug.group_name as entity_name,
|
ug.group_name as entity_name,
|
||||||
r.name as rolename,
|
r.name as rolename,
|
||||||
pm.role, pm.entity_id, pm.entity_type
|
pm.role, pm.entity_id, pm.entity_type
|
||||||
from project_member pm
|
from project_member pm
|
||||||
left join user_group ug on pm.entity_id = ug.id and pm.entity_type = 'g'
|
left join user_group ug on pm.entity_id = ug.id and pm.entity_type = 'g'
|
||||||
left join role r on pm.role = r.role_id
|
left join role r on pm.role = r.role_id
|
||||||
where pm.project_id = ? and ug.group_name like ?
|
where pm.project_id = ? and ug.group_name like ?
|
||||||
order by entity_name `
|
order by entity_name `
|
||||||
queryParam := make([]interface{}, 4)
|
queryParam := make([]interface{}, 4)
|
||||||
queryParam = append(queryParam, projectID)
|
queryParam = append(queryParam, projectID)
|
||||||
@ -148,16 +169,3 @@ func SearchMemberByName(projectID int64, entityName string) ([]*models.Member, e
|
|||||||
_, err := o.Raw(sql, queryParam).QueryRows(&members)
|
_, err := o.Raw(sql, queryParam).QueryRows(&members)
|
||||||
return members, err
|
return members, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRolesByGroup -- Query group roles
|
|
||||||
func GetRolesByGroup(projectID int64, groupDNCondition string) []int {
|
|
||||||
var roles []int
|
|
||||||
o := dao.GetOrmer()
|
|
||||||
sql := `select role from project_member pm
|
|
||||||
left join user_group ug on pm.project_id = ?
|
|
||||||
where ug.group_type = 1 and ug.ldap_group_dn in (` + groupDNCondition + `)`
|
|
||||||
if _, err := o.Raw(sql, projectID).QueryRows(&roles); err != nil {
|
|
||||||
return roles
|
|
||||||
}
|
|
||||||
return roles
|
|
||||||
}
|
|
||||||
|
@ -51,11 +51,18 @@ func TestMain(m *testing.M) {
|
|||||||
"update project set owner_id = (select user_id from harbor_user where username = 'member_test_01') where name = 'member_test_01'",
|
"update project set owner_id = (select user_id from harbor_user where username = 'member_test_01') where name = 'member_test_01'",
|
||||||
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from harbor_user where username = 'member_test_01'), 'u', 1)",
|
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from harbor_user where username = 'member_test_01'), 'u', 1)",
|
||||||
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_group_01'), 'g', 1)",
|
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_group_01'), 'g', 1)",
|
||||||
|
|
||||||
|
"insert into harbor_user (username, email, password, realname) values ('member_test_02', 'member_test_02@example.com', '123456', 'member_test_02')",
|
||||||
|
"insert into project (name, owner_id) values ('member_test_02', 1)",
|
||||||
|
"insert into user_group (group_name, group_type, ldap_group_dn) values ('test_group_02', 1, 'CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com')",
|
||||||
|
"update project set owner_id = (select user_id from harbor_user where username = 'member_test_02') where name = 'member_test_02'",
|
||||||
|
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_02') , (select user_id from harbor_user where username = 'member_test_02'), 'u', 1)",
|
||||||
|
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_02') , (select id from user_group where group_name = 'test_group_02'), 'g', 1)",
|
||||||
}
|
}
|
||||||
|
|
||||||
clearSqls := []string{
|
clearSqls := []string{
|
||||||
"delete from project where name='member_test_01'",
|
"delete from project where name='member_test_01' or name='member_test_02'",
|
||||||
"delete from harbor_user where username='member_test_01' or username='pm_sample'",
|
"delete from harbor_user where username='member_test_01' or username='member_test_02' or username='pm_sample'",
|
||||||
"delete from user_group",
|
"delete from user_group",
|
||||||
"delete from project_member",
|
"delete from project_member",
|
||||||
}
|
}
|
||||||
@ -285,6 +292,39 @@ func TestGetProjectMember(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGetTotalOfProjectMembers(t *testing.T) {
|
||||||
|
currentProject, _ := dao.GetProjectByName("member_test_02")
|
||||||
|
|
||||||
|
type args struct {
|
||||||
|
projectID int64
|
||||||
|
roles []int
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
want int64
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{"Get total of project admin", args{currentProject.ProjectID, []int{common.RoleProjectAdmin}}, 2, false},
|
||||||
|
{"Get total of master", args{currentProject.ProjectID, []int{common.RoleMaster}}, 0, false},
|
||||||
|
{"Get total of developer", args{currentProject.ProjectID, []int{common.RoleDeveloper}}, 0, false},
|
||||||
|
{"Get total of guest", args{currentProject.ProjectID, []int{common.RoleGuest}}, 0, false},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := GetTotalOfProjectMembers(tt.args.projectID, tt.args.roles...)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("GetTotalOfProjectMembers() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if got != tt.want {
|
||||||
|
t.Errorf("GetTotalOfProjectMembers() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func PrepareGroupTest() {
|
func PrepareGroupTest() {
|
||||||
initSqls := []string{
|
initSqls := []string{
|
||||||
`insert into user_group (group_name, group_type, ldap_group_dn) values ('harbor_group_01', 1, 'cn=harbor_user,dc=example,dc=com')`,
|
`insert into user_group (group_name, group_type, ldap_group_dn) values ('harbor_group_01', 1, 'cn=harbor_user,dc=example,dc=com')`,
|
||||||
@ -305,30 +345,3 @@ func PrepareGroupTest() {
|
|||||||
}
|
}
|
||||||
dao.PrepareTestData(clearSqls, initSqls)
|
dao.PrepareTestData(clearSqls, initSqls)
|
||||||
}
|
}
|
||||||
func TestGetRolesByGroup(t *testing.T) {
|
|
||||||
PrepareGroupTest()
|
|
||||||
|
|
||||||
project, err := dao.GetProjectByName("group_project")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Error occurred when GetProjectByName : %v", err)
|
|
||||||
}
|
|
||||||
type args struct {
|
|
||||||
projectID int64
|
|
||||||
groupDNCondition string
|
|
||||||
}
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
args args
|
|
||||||
want []int
|
|
||||||
}{
|
|
||||||
{"Query group with role", args{project.ProjectID, "'cn=harbor_user,dc=example,dc=com'"}, []int{2}},
|
|
||||||
{"Query group no role", args{project.ProjectID, "'cn=another_user,dc=example,dc=com'"}, []int{}},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
if got := GetRolesByGroup(tt.args.projectID, tt.args.groupDNCondition); !dao.ArrayEqual(got, tt.want) {
|
|
||||||
t.Errorf("GetRolesByGroup() = %v, want %v", got, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
122
src/common/dao/project_blob.go
Normal file
122
src/common/dao/project_blob.go
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
// Copyright Project Harbor Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package dao
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/goharbor/harbor/src/common/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AddBlobToProject ...
|
||||||
|
func AddBlobToProject(blobID, projectID int64) (int64, error) {
|
||||||
|
pb := &models.ProjectBlob{
|
||||||
|
BlobID: blobID,
|
||||||
|
ProjectID: projectID,
|
||||||
|
CreationTime: time.Now(),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, id, err := GetOrmer().ReadOrCreate(pb, "blob_id", "project_id")
|
||||||
|
return id, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddBlobsToProject ...
|
||||||
|
func AddBlobsToProject(projectID int64, blobs ...*models.Blob) (int64, error) {
|
||||||
|
if len(blobs) == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
var projectBlobs []*models.ProjectBlob
|
||||||
|
for _, blob := range blobs {
|
||||||
|
projectBlobs = append(projectBlobs, &models.ProjectBlob{
|
||||||
|
BlobID: blob.ID,
|
||||||
|
ProjectID: projectID,
|
||||||
|
CreationTime: now,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return GetOrmer().InsertMulti(len(projectBlobs), projectBlobs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveBlobsFromProject ...
|
||||||
|
func RemoveBlobsFromProject(projectID int64, blobs ...*models.Blob) error {
|
||||||
|
var blobIDs []interface{}
|
||||||
|
for _, blob := range blobs {
|
||||||
|
blobIDs = append(blobIDs, blob.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(blobIDs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
sql := fmt.Sprintf(`DELETE FROM project_blob WHERE blob_id IN (%s)`, ParamPlaceholderForIn(len(blobIDs)))
|
||||||
|
|
||||||
|
_, err := GetOrmer().Raw(sql, blobIDs).Exec()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasBlobInProject ...
|
||||||
|
func HasBlobInProject(projectID int64, digest string) (bool, error) {
|
||||||
|
sql := `SELECT COUNT(*) FROM project_blob JOIN blob ON project_blob.blob_id = blob.id AND project_id = ? AND digest = ?`
|
||||||
|
|
||||||
|
var count int64
|
||||||
|
if err := GetOrmer().Raw(sql, projectID, digest).QueryRow(&count); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return count > 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlobsNotInProject returns blobs not in project
|
||||||
|
func GetBlobsNotInProject(projectID int64, blobDigests ...string) ([]*models.Blob, error) {
|
||||||
|
if len(blobDigests) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
sql := fmt.Sprintf("SELECT * FROM blob WHERE id NOT IN (SELECT blob_id FROM project_blob WHERE project_id = ?) AND digest IN (%s)",
|
||||||
|
ParamPlaceholderForIn(len(blobDigests)))
|
||||||
|
|
||||||
|
params := []interface{}{projectID}
|
||||||
|
for _, digest := range blobDigests {
|
||||||
|
params = append(params, digest)
|
||||||
|
}
|
||||||
|
|
||||||
|
var blobs []*models.Blob
|
||||||
|
if _, err := GetOrmer().Raw(sql, params...).QueryRows(&blobs); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return blobs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountSizeOfProject ...
|
||||||
|
func CountSizeOfProject(pid int64) (int64, error) {
|
||||||
|
var blobs []models.Blob
|
||||||
|
|
||||||
|
_, err := GetOrmer().Raw(`SELECT bb.id, bb.digest, bb.content_type, bb.size, bb.creation_time FROM project_blob pb LEFT JOIN blob bb ON pb.blob_id = bb.id WHERE pb.project_id = ? `, pid).QueryRows(&blobs)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var size int64
|
||||||
|
for _, blob := range blobs {
|
||||||
|
size += blob.Size
|
||||||
|
}
|
||||||
|
|
||||||
|
return size, err
|
||||||
|
}
|
68
src/common/dao/project_blob_test.go
Normal file
68
src/common/dao/project_blob_test.go
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
// Copyright Project Harbor Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package dao
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/goharbor/harbor/src/common/models"
|
||||||
|
"github.com/goharbor/harbor/src/common/utils"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHasBlobInProject(t *testing.T) {
|
||||||
|
_, blob, err := GetOrCreateBlob(&models.Blob{
|
||||||
|
Digest: digest.FromString(utils.GenerateRandomString()).String(),
|
||||||
|
Size: 100,
|
||||||
|
})
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
_, err = AddBlobToProject(blob.ID, 1)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
has, err := HasBlobInProject(1, blob.Digest)
|
||||||
|
require.Nil(t, err)
|
||||||
|
assert.True(t, has)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCountSizeOfProject(t *testing.T) {
|
||||||
|
id1, err := AddBlob(&models.Blob{
|
||||||
|
Digest: "CountSizeOfProject_blob1",
|
||||||
|
Size: 101,
|
||||||
|
})
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
id2, err := AddBlob(&models.Blob{
|
||||||
|
Digest: "CountSizeOfProject_blob2",
|
||||||
|
Size: 202,
|
||||||
|
})
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
pid1, err := AddProject(models.Project{
|
||||||
|
Name: "CountSizeOfProject_project1",
|
||||||
|
OwnerID: 1,
|
||||||
|
})
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
_, err = AddBlobToProject(id1, pid1)
|
||||||
|
require.Nil(t, err)
|
||||||
|
_, err = AddBlobToProject(id2, pid1)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
pSize, err := CountSizeOfProject(pid1)
|
||||||
|
assert.Equal(t, pSize, int64(303))
|
||||||
|
}
|
@ -118,124 +118,6 @@ func Test_projectQueryConditions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetGroupProjects(t *testing.T) {
|
|
||||||
prepareGroupTest()
|
|
||||||
query := &models.ProjectQueryParam{Member: &models.MemberQuery{Name: "sample_group"}}
|
|
||||||
type args struct {
|
|
||||||
groupDNCondition string
|
|
||||||
query *models.ProjectQueryParam
|
|
||||||
}
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
args args
|
|
||||||
wantSize int
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
{"Verify correct sql", args{groupDNCondition: "'cn=harbor_user,dc=example,dc=com'", query: query}, 1, false},
|
|
||||||
{"Verify missed sql", args{groupDNCondition: "'cn=another_user,dc=example,dc=com'", query: query}, 0, false},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got, err := GetGroupProjects(tt.args.groupDNCondition, tt.args.query)
|
|
||||||
if (err != nil) != tt.wantErr {
|
|
||||||
t.Errorf("GetGroupProjects() error = %v, wantErr %v", err, tt.wantErr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(got) != tt.wantSize {
|
|
||||||
t.Errorf("GetGroupProjects() = %v, want %v", got, tt.wantSize)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareGroupTest() {
|
|
||||||
initSqls := []string{
|
|
||||||
`insert into user_group (group_name, group_type, ldap_group_dn) values ('harbor_group_01', 1, 'cn=harbor_user,dc=example,dc=com')`,
|
|
||||||
`insert into harbor_user (username, email, password, realname) values ('sample01', 'sample01@example.com', 'harbor12345', 'sample01')`,
|
|
||||||
`insert into project (name, owner_id) values ('group_project', 1)`,
|
|
||||||
`insert into project (name, owner_id) values ('group_project_private', 1)`,
|
|
||||||
`insert into project_metadata (project_id, name, value) values ((select project_id from project where name = 'group_project'), 'public', 'false')`,
|
|
||||||
`insert into project_metadata (project_id, name, value) values ((select project_id from project where name = 'group_project_private'), 'public', 'false')`,
|
|
||||||
`insert into project_member (project_id, entity_id, entity_type, role) values ((select project_id from project where name = 'group_project'), (select id from user_group where group_name = 'harbor_group_01'),'g', 2)`,
|
|
||||||
}
|
|
||||||
|
|
||||||
clearSqls := []string{
|
|
||||||
`delete from project_metadata where project_id in (select project_id from project where name in ('group_project', 'group_project_private'))`,
|
|
||||||
`delete from project where name in ('group_project', 'group_project_private')`,
|
|
||||||
`delete from project_member where project_id in (select project_id from project where name in ('group_project', 'group_project_private'))`,
|
|
||||||
`delete from user_group where group_name = 'harbor_group_01'`,
|
|
||||||
`delete from harbor_user where username = 'sample01'`,
|
|
||||||
}
|
|
||||||
PrepareTestData(clearSqls, initSqls)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetTotalGroupProjects(t *testing.T) {
|
|
||||||
prepareGroupTest()
|
|
||||||
query := &models.ProjectQueryParam{Member: &models.MemberQuery{Name: "sample_group"}}
|
|
||||||
type args struct {
|
|
||||||
groupDNCondition string
|
|
||||||
query *models.ProjectQueryParam
|
|
||||||
}
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
args args
|
|
||||||
want int
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
{"Verify correct sql", args{groupDNCondition: "'cn=harbor_user,dc=example,dc=com'", query: query}, 1, false},
|
|
||||||
{"Verify missed sql", args{groupDNCondition: "'cn=another_user,dc=example,dc=com'", query: query}, 0, false},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got, err := GetTotalGroupProjects(tt.args.groupDNCondition, tt.args.query)
|
|
||||||
if (err != nil) != tt.wantErr {
|
|
||||||
t.Errorf("GetTotalGroupProjects() error = %v, wantErr %v", err, tt.wantErr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if got != tt.want {
|
|
||||||
t.Errorf("GetTotalGroupProjects() = %v, want %v", got, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetRolesByLDAPGroup(t *testing.T) {
|
|
||||||
prepareGroupTest()
|
|
||||||
project, err := GetProjectByName("group_project")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Error occurred when Get project by name: %v", err)
|
|
||||||
}
|
|
||||||
privateProject, err := GetProjectByName("group_project_private")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Error occurred when Get project by name: %v", err)
|
|
||||||
}
|
|
||||||
type args struct {
|
|
||||||
projectID int64
|
|
||||||
groupDNCondition string
|
|
||||||
}
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
args args
|
|
||||||
wantSize int
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
{"Check normal", args{project.ProjectID, "'cn=harbor_user,dc=example,dc=com'"}, 1, false},
|
|
||||||
{"Check non exist", args{privateProject.ProjectID, "'cn=not_harbor_user,dc=example,dc=com'"}, 0, false},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got, err := GetRolesByLDAPGroup(tt.args.projectID, tt.args.groupDNCondition)
|
|
||||||
if (err != nil) != tt.wantErr {
|
|
||||||
t.Errorf("TestGetRolesByLDAPGroup() error = %v, wantErr %v", err, tt.wantErr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(got) != tt.wantSize {
|
|
||||||
t.Errorf("TestGetRolesByLDAPGroup() = %v, want %v", len(got), tt.wantSize)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProjetExistsByName(t *testing.T) {
|
func TestProjetExistsByName(t *testing.T) {
|
||||||
name := "project_exist_by_name_test"
|
name := "project_exist_by_name_test"
|
||||||
exist := ProjectExistsByName(name)
|
exist := ProjectExistsByName(name)
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user