mirror of
https://github.com/goharbor/harbor.git
synced 2025-03-02 10:41:59 +01:00
Merge remote-tracking branch 'upstream/master' into 190324_sync
Signed-off-by: Wenkai Yin <yinw@vmware.com>
This commit is contained in:
commit
49cf50adb1
@ -32,7 +32,7 @@ env:
|
||||
- HARBOR_ADMIN: admin
|
||||
- HARBOR_ADMIN_PASSWD: Harbor12345
|
||||
- CORE_SECRET: tempString
|
||||
- KEY_PATH: "/data/secretkey"
|
||||
- KEY_PATH: "/data/secret/keys/secretkey"
|
||||
- REDIS_HOST: localhost
|
||||
- REG_VERSION: v2.7.1
|
||||
- UI_BUILDER_VERSION: 1.6.0
|
||||
|
111
Makefile
111
Makefile
@ -65,6 +65,7 @@ SHELL := /bin/bash
|
||||
BUILDPATH=$(CURDIR)
|
||||
MAKEPATH=$(BUILDPATH)/make
|
||||
MAKEDEVPATH=$(MAKEPATH)/dev
|
||||
MAKE_PREPARE_PATH=$(MAKEPATH)/photon/prepare
|
||||
SRCPATH=./src
|
||||
TOOLSPATH=$(BUILDPATH)/tools
|
||||
CORE_PATH=$(BUILDPATH)/src/core
|
||||
@ -94,6 +95,8 @@ UIVERSIONTAG=dev
|
||||
VERSIONFILEPATH=$(CURDIR)
|
||||
VERSIONFILENAME=UIVERSION
|
||||
|
||||
PREPARE_VERSION_NAME=versions
|
||||
|
||||
#versions
|
||||
REGISTRYVERSION=v2.7.1
|
||||
NGINXVERSION=$(VERSIONTAG)
|
||||
@ -107,6 +110,14 @@ NOTARYMIGRATEVERSION=v3.5.4
|
||||
# version of chartmuseum
|
||||
CHARTMUSEUMVERSION=v0.8.1
|
||||
|
||||
define VERSIONS_FOR_PREPARE
|
||||
VERSION_TAG: $(VERSIONTAG)
|
||||
REGISTRY_VERSION: $(REGISTRYVERSION)
|
||||
NOTARY_VERSION: $(NOTARYVERSION)
|
||||
CLAIR_VERSION: $(CLAIRVERSION)
|
||||
CHARTMUSEUM_VERSION: $(CHARTMUSEUMVERSION)
|
||||
endef
|
||||
|
||||
# docker parameters
|
||||
DOCKERCMD=$(shell which docker)
|
||||
DOCKERBUILD=$(DOCKERCMD) build
|
||||
@ -150,12 +161,13 @@ MIGRATEPATCHBINARYNAME=migrate-patch
|
||||
|
||||
# configfile
|
||||
CONFIGPATH=$(MAKEPATH)
|
||||
CONFIGFILE=harbor.cfg
|
||||
INSIDE_CONFIGPATH=/compose_location
|
||||
CONFIGFILE=harbor.yml
|
||||
|
||||
# prepare parameters
|
||||
PREPAREPATH=$(TOOLSPATH)
|
||||
PREPARECMD=prepare
|
||||
PREPARECMD_PARA=--conf $(CONFIGPATH)/$(CONFIGFILE)
|
||||
PREPARECMD_PARA=--conf $(INSIDE_CONFIGPATH)/$(CONFIGFILE)
|
||||
ifeq ($(NOTARYFLAG), true)
|
||||
PREPARECMD_PARA+= --with-notary
|
||||
endif
|
||||
@ -174,6 +186,7 @@ MAKEFILEPATH_PHOTON=$(MAKEPATH)/photon
|
||||
DOCKERFILEPATH_COMMON=$(MAKEPATH)/common
|
||||
|
||||
# docker image name
|
||||
DOCKER_IMAGE_NAME_PREPARE=goharbor/prepare
|
||||
DOCKERIMAGENAME_PORTAL=goharbor/harbor-portal
|
||||
DOCKERIMAGENAME_CORE=goharbor/harbor-core
|
||||
DOCKERIMAGENAME_JOBSERVICE=goharbor/harbor-jobservice
|
||||
@ -208,7 +221,8 @@ REGISTRYUSER=user
|
||||
REGISTRYPASSWORD=default
|
||||
|
||||
# cmds
|
||||
DOCKERSAVE_PARA= $(DOCKERIMAGENAME_PORTAL):$(VERSIONTAG) \
|
||||
DOCKERSAVE_PARA=$(DOCKER_IMAGE_NAME_PREPARE):$(VERSIONTAG) \
|
||||
$(DOCKERIMAGENAME_PORTAL):$(VERSIONTAG) \
|
||||
$(DOCKERIMAGENAME_CORE):$(VERSIONTAG) \
|
||||
$(DOCKERIMAGENAME_LOG):$(VERSIONTAG) \
|
||||
$(DOCKERIMAGENAME_DB):$(VERSIONTAG) \
|
||||
@ -218,30 +232,24 @@ DOCKERSAVE_PARA= $(DOCKERIMAGENAME_PORTAL):$(VERSIONTAG) \
|
||||
goharbor/nginx-photon:$(NGINXVERSION) goharbor/registry-photon:$(REGISTRYVERSION)-$(VERSIONTAG)
|
||||
|
||||
PACKAGE_OFFLINE_PARA=-zcvf harbor-offline-installer-$(PKGVERSIONTAG).tgz \
|
||||
$(HARBORPKG)/common/templates $(HARBORPKG)/$(DOCKERIMGFILE).$(VERSIONTAG).tar.gz \
|
||||
$(HARBORPKG)/prepare \
|
||||
$(HARBORPKG)/LICENSE $(HARBORPKG)/install.sh \
|
||||
$(HARBORPKG)/harbor.cfg $(HARBORPKG)/$(DOCKERCOMPOSEFILENAME)
|
||||
|
||||
$(HARBORPKG)/$(DOCKERIMGFILE).$(VERSIONTAG).tar.gz \
|
||||
$(HARBORPKG)/prepare \
|
||||
$(HARBORPKG)/LICENSE $(HARBORPKG)/install.sh \
|
||||
$(HARBORPKG)/harbor.yml
|
||||
|
||||
PACKAGE_ONLINE_PARA=-zcvf harbor-online-installer-$(PKGVERSIONTAG).tgz \
|
||||
$(HARBORPKG)/common/templates $(HARBORPKG)/prepare \
|
||||
$(HARBORPKG)/LICENSE \
|
||||
$(HARBORPKG)/install.sh $(HARBORPKG)/$(DOCKERCOMPOSEFILENAME) \
|
||||
$(HARBORPKG)/harbor.cfg
|
||||
|
||||
$(HARBORPKG)/prepare \
|
||||
$(HARBORPKG)/LICENSE \
|
||||
$(HARBORPKG)/install.sh \
|
||||
$(HARBORPKG)/harbor.yml
|
||||
|
||||
DOCKERCOMPOSE_LIST=-f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||
|
||||
ifeq ($(NOTARYFLAG), true)
|
||||
DOCKERSAVE_PARA+= goharbor/notary-server-photon:$(NOTARYVERSION)-$(VERSIONTAG) goharbor/notary-signer-photon:$(NOTARYVERSION)-$(VERSIONTAG)
|
||||
PACKAGE_OFFLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSENOTARYFILENAME)
|
||||
PACKAGE_ONLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSENOTARYFILENAME)
|
||||
DOCKERCOMPOSE_LIST+= -f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSENOTARYFILENAME)
|
||||
endif
|
||||
ifeq ($(CLAIRFLAG), true)
|
||||
DOCKERSAVE_PARA+= goharbor/clair-photon:$(CLAIRVERSION)-$(VERSIONTAG)
|
||||
PACKAGE_OFFLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSECLAIRFILENAME)
|
||||
PACKAGE_ONLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSECLAIRFILENAME)
|
||||
DOCKERCOMPOSE_LIST+= -f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECLAIRFILENAME)
|
||||
endif
|
||||
ifeq ($(MIGRATORFLAG), true)
|
||||
DOCKERSAVE_PARA+= goharbor/harbor-migrator:$(MIGRATORVERSION)
|
||||
@ -249,14 +257,15 @@ endif
|
||||
# append chartmuseum parameters if set
|
||||
ifeq ($(CHARTFLAG), true)
|
||||
DOCKERSAVE_PARA+= $(DOCKERIMAGENAME_CHART_SERVER):$(CHARTMUSEUMVERSION)-$(VERSIONTAG)
|
||||
PACKAGE_OFFLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSECHARTMUSEUMFILENAME)
|
||||
PACKAGE_ONLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSECHARTMUSEUMFILENAME)
|
||||
DOCKERCOMPOSE_LIST+= -f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECHARTMUSEUMFILENAME)
|
||||
endif
|
||||
|
||||
export VERSIONS_FOR_PREPARE
|
||||
ui_version:
|
||||
@printf $(UIVERSIONTAG) > $(VERSIONFILEPATH)/$(VERSIONFILENAME);
|
||||
|
||||
versions_prepare:
|
||||
@echo "$$VERSIONS_FOR_PREPARE" > $(MAKE_PREPARE_PATH)/$(PREPARE_VERSION_NAME)
|
||||
|
||||
check_environment:
|
||||
@$(MAKEPATH)/$(CHECKENVCMD)
|
||||
|
||||
@ -282,9 +291,13 @@ compile_notary_migrate_patch:
|
||||
@$(DOCKERCMD) run --rm -v $(BUILDPATH):$(GOBUILDPATH) -w $(GOBUILDPATH_MIGRATEPATCH) $(GOBUILDIMAGE) $(GOIMAGEBUILD) -o $(GOBUILDMAKEPATH_NOTARY)/$(MIGRATEPATCHBINARYNAME)
|
||||
@echo "Done."
|
||||
|
||||
compile:check_environment compile_core compile_jobservice compile_registryctl compile_notary_migrate_patch
|
||||
|
||||
prepare:
|
||||
compile: check_environment versions_prepare compile_core compile_jobservice compile_registryctl compile_notary_migrate_patch
|
||||
|
||||
update_prepare_version:
|
||||
@echo "substitude the prepare version tag in prepare file..."
|
||||
$(SEDCMD) -i -e 's/goharbor\/prepare:.*[[:space:]]\+/goharbor\/prepare:$(VERSIONTAG) /' $(MAKEPATH)/prepare ;
|
||||
|
||||
prepare: update_prepare_version
|
||||
@echo "preparing..."
|
||||
@$(MAKEPATH)/$(PREPARECMD) $(PREPARECMD_PARA)
|
||||
|
||||
@ -295,41 +308,9 @@ build:
|
||||
-e BUILDBIN=$(BUILDBIN) -e REDISVERSION=$(REDISVERSION) -e MIGRATORVERSION=$(MIGRATORVERSION) \
|
||||
-e CHARTMUSEUMVERSION=$(CHARTMUSEUMVERSION) -e DOCKERIMAGENAME_CHART_SERVER=$(DOCKERIMAGENAME_CHART_SERVER)
|
||||
|
||||
modify_composefile: modify_composefile_notary modify_composefile_clair modify_composefile_chartmuseum
|
||||
@echo "preparing docker-compose file..."
|
||||
@cp $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSETPLFILENAME) $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||
@$(SEDCMD) -i -e 's/__version__/$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||
@$(SEDCMD) -i -e 's/__postgresql_version__/$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||
@$(SEDCMD) -i -e 's/__reg_version__/$(REGISTRYVERSION)-$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||
@$(SEDCMD) -i -e 's/__nginx_version__/$(NGINXVERSION)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||
@$(SEDCMD) -i -e 's/__redis_version__/$(REDISVERSION)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||
install: compile ui_version build prepare start
|
||||
|
||||
modify_composefile_notary:
|
||||
@echo "preparing docker-compose notary file..."
|
||||
@cp $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSENOTARYTPLFILENAME) $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSENOTARYFILENAME)
|
||||
@$(SEDCMD) -i -e 's/__notary_version__/$(NOTARYVERSION)-$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSENOTARYFILENAME)
|
||||
|
||||
modify_composefile_clair:
|
||||
@echo "preparing docker-compose clair file..."
|
||||
@cp $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECLAIRTPLFILENAME) $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECLAIRFILENAME)
|
||||
@$(SEDCMD) -i -e 's/__postgresql_version__/$(CLAIRDBVERSION)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECLAIRFILENAME)
|
||||
@$(SEDCMD) -i -e 's/__clair_version__/$(CLAIRVERSION)-$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECLAIRFILENAME)
|
||||
modify_composefile_chartmuseum:
|
||||
@echo "preparing docker-compose chartmuseum file..."
|
||||
@cp $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECHARTMUSEUMTPLFILENAME) $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECHARTMUSEUMFILENAME)
|
||||
@$(SEDCMD) -i -e 's/__chartmuseum_version__/$(CHARTMUSEUMVERSION)-$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECHARTMUSEUMFILENAME)
|
||||
|
||||
modify_sourcefiles:
|
||||
@echo "change mode of source files."
|
||||
@chmod 600 $(MAKEPATH)/common/templates/notary/notary-signer.key
|
||||
@chmod 600 $(MAKEPATH)/common/templates/notary/notary-signer.crt
|
||||
@chmod 600 $(MAKEPATH)/common/templates/notary/notary-signer-ca.crt
|
||||
@chmod 600 $(MAKEPATH)/common/templates/core/private_key.pem
|
||||
@chmod 600 $(MAKEPATH)/common/templates/registry/root.crt
|
||||
|
||||
install: compile ui_version build modify_sourcefiles prepare modify_composefile start
|
||||
|
||||
package_online: modify_composefile
|
||||
package_online: prepare
|
||||
@echo "packing online package ..."
|
||||
@cp -r make $(HARBORPKG)
|
||||
@if [ -n "$(REGISTRYSERVER)" ] ; then \
|
||||
@ -337,12 +318,13 @@ package_online: modify_composefile
|
||||
$(HARBORPKG)/docker-compose.yml ; \
|
||||
fi
|
||||
@cp LICENSE $(HARBORPKG)/LICENSE
|
||||
|
||||
|
||||
@$(TARCMD) $(PACKAGE_ONLINE_PARA)
|
||||
@rm -rf $(HARBORPKG)
|
||||
@echo "Done."
|
||||
|
||||
package_offline: compile ui_version build modify_sourcefiles modify_composefile
|
||||
|
||||
package_offline: update_prepare_version compile ui_version build
|
||||
|
||||
@echo "packing offline package ..."
|
||||
@cp -r make $(HARBORPKG)
|
||||
@cp LICENSE $(HARBORPKG)/LICENSE
|
||||
@ -400,6 +382,11 @@ govet:
|
||||
|
||||
pushimage:
|
||||
@echo "pushing harbor images ..."
|
||||
@$(DOCKERTAG) $(DOCKER_IMAGE_NAME_PREPARE):$(VERSIONTAG) $(REGISTRYSERVER)$(DOCKER_IMAGE_NAME_PREPARE):$(VERSIONTAG)
|
||||
@$(PUSHSCRIPTPATH)/$(PUSHSCRIPTNAME) $(REGISTRYSERVER)$(DOCKER_IMAGE_NAME_PREPARE):$(VERSIONTAG) \
|
||||
$(REGISTRYUSER) $(REGISTRYPASSWORD) $(REGISTRYSERVER)
|
||||
@$(DOCKERRMIMAGE) $(REGISTRYSERVER)$(DOCKER_IMAGE_NAME_PREPARE):$(VERSIONTAG)
|
||||
|
||||
@$(DOCKERTAG) $(DOCKERIMAGENAME_PORTAL):$(VERSIONTAG) $(REGISTRYSERVER)$(DOCKERIMAGENAME_PORTAL):$(VERSIONTAG)
|
||||
@$(PUSHSCRIPTPATH)/$(PUSHSCRIPTNAME) $(REGISTRYSERVER)$(DOCKERIMAGENAME_PORTAL):$(VERSIONTAG) \
|
||||
$(REGISTRYUSER) $(REGISTRYPASSWORD) $(REGISTRYSERVER)
|
||||
|
@ -1405,33 +1405,6 @@ paths:
|
||||
$ref: '#/responses/UnsupportedMediaType'
|
||||
'503':
|
||||
description: Harbor is not deployed with Clair.
|
||||
/repositories/scanAll:
|
||||
post:
|
||||
summary: Scan all images of the registry.
|
||||
description: |
|
||||
The server will launch different jobs to scan each image on the regisitry, so this is equivalent to calling the API to scan the image one by one in background, so there's no way to track the overall status of the "scan all" action. Only system adim has permission to call this API.
|
||||
parameters:
|
||||
- name: project_id
|
||||
in: query
|
||||
type: integer
|
||||
description: When this parm is set only the images under the project identified by the project_id will be scanned.
|
||||
tags:
|
||||
- Products
|
||||
responses:
|
||||
'202':
|
||||
description: The action is successully taken in the background. If some images are failed to scan it will only be reflected in the job status.
|
||||
'401':
|
||||
description: User needs to login or call the API with correct credentials.
|
||||
'403':
|
||||
description: User doesn't have permission to perform the action.
|
||||
'409':
|
||||
description: There is a "scanall" job in progress, so the request cannot be served.
|
||||
'415':
|
||||
$ref: '#/responses/UnsupportedMediaType'
|
||||
'500':
|
||||
description: Failed to initiate the action.
|
||||
'503':
|
||||
description: Harbor is not deployed with Clair.
|
||||
'/repositories/{repo_name}/tags/{tag}/vulnerability/details':
|
||||
get:
|
||||
summary: Get vulnerability details of the image.
|
||||
@ -2986,8 +2959,6 @@ paths:
|
||||
'200':
|
||||
description: Get gc results successfully.
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/definitions/GCResult'
|
||||
'401':
|
||||
description: User need to log in first.
|
||||
@ -3033,9 +3004,7 @@ paths:
|
||||
'200':
|
||||
description: Get gc's schedule.
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/definitions/GCSchedule'
|
||||
$ref: '#/definitions/AdminJobSchedule'
|
||||
'401':
|
||||
description: User need to log in first.
|
||||
'403':
|
||||
@ -3051,21 +3020,19 @@ paths:
|
||||
in: body
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/GCSchedule'
|
||||
description: Updates of gs's schedule.
|
||||
$ref: '#/definitions/AdminJobSchedule'
|
||||
description: Updates of gc's schedule.
|
||||
tags:
|
||||
- Products
|
||||
responses:
|
||||
'200':
|
||||
description: Updated gc's schedule successfully.
|
||||
'400':
|
||||
description: Bad params.
|
||||
description: Invalid schedule type.
|
||||
'401':
|
||||
description: User need to log in first.
|
||||
'403':
|
||||
description: User does not have permission of admin role.
|
||||
'404':
|
||||
description: GC schedule does not exist.
|
||||
'500':
|
||||
description: Unexpected internal errors.
|
||||
post:
|
||||
@ -3077,23 +3044,92 @@ paths:
|
||||
in: body
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/GCSchedule'
|
||||
description: Updates of gs's schedule.
|
||||
$ref: '#/definitions/AdminJobSchedule'
|
||||
description: Updates of gc's schedule.
|
||||
tags:
|
||||
- Products
|
||||
responses:
|
||||
'200':
|
||||
description: Updated replication's target successfully.
|
||||
description: GC schedule successfully.
|
||||
'400':
|
||||
description: The target is associated with policy which is enabled.
|
||||
description: Invalid schedule type.
|
||||
'401':
|
||||
description: User need to log in first.
|
||||
'403':
|
||||
description: User does not have permission of admin role.
|
||||
'404':
|
||||
description: Target ID does not exist.
|
||||
'409':
|
||||
description: There is a "gc" job in progress, so the request cannot be served.
|
||||
'500':
|
||||
description: Unexpected internal errors.
|
||||
/system/scanAll/schedule:
|
||||
get:
|
||||
summary: Get scan_all's schedule.
|
||||
description: This endpoint is for getting a schedule for the scan all job, which scans all of images in Harbor.
|
||||
tags:
|
||||
- Products
|
||||
responses:
|
||||
'200':
|
||||
description: Get a schedule for the scan all job, which scans all of images in Harbor.
|
||||
schema:
|
||||
$ref: '#/definitions/AdminJobSchedule'
|
||||
'401':
|
||||
description: User need to log in first.
|
||||
'403':
|
||||
description: Only admin has this authority.
|
||||
'500':
|
||||
description: Unexpected internal errors.
|
||||
put:
|
||||
summary: Update scan all's schedule.
|
||||
description: |
|
||||
This endpoint is for updating the schedule of scan all job, which scans all of images in Harbor.
|
||||
parameters:
|
||||
- name: schedule
|
||||
in: body
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/AdminJobSchedule'
|
||||
description: Updates the schedule of scan all job, which scans all of images in Harbor.
|
||||
tags:
|
||||
- Products
|
||||
responses:
|
||||
'200':
|
||||
description: Updated scan_all's schedule successfully.
|
||||
'400':
|
||||
description: Invalid schedule type.
|
||||
'401':
|
||||
description: User need to log in first.
|
||||
'403':
|
||||
description: User does not have permission of admin role.
|
||||
'500':
|
||||
description: Unexpected internal errors.
|
||||
post:
|
||||
summary: Create a schedule or a manual trigger for the scan all job.
|
||||
description: |
|
||||
This endpoint is for creating a schedule or a manual trigger for the scan all job, which scans all of images in Harbor.
|
||||
parameters:
|
||||
- name: schedule
|
||||
in: body
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/AdminJobSchedule'
|
||||
description: Create a schedule or a manual trigger for the scan all job.
|
||||
tags:
|
||||
- Products
|
||||
responses:
|
||||
'200':
|
||||
description: Updated scan_all's schedule successfully.
|
||||
'400':
|
||||
description: Invalid schedule type.
|
||||
'401':
|
||||
description: User need to log in first.
|
||||
'403':
|
||||
description: User does not have permission of admin role.
|
||||
'409':
|
||||
description: There is a "scanall" job in progress, so the request cannot be served.
|
||||
'500':
|
||||
description: Unexpected internal errors.
|
||||
'503':
|
||||
description: Harbor is not deployed with Clair.
|
||||
/configurations:
|
||||
get:
|
||||
summary: Get system configurations.
|
||||
@ -5099,7 +5135,7 @@ definitions:
|
||||
type: string
|
||||
description: the job kind of gc job.
|
||||
schedule:
|
||||
$ref: '#/definitions/GCScheduleSchedule'
|
||||
$ref: '#/definitions/AdminJobScheduleObj'
|
||||
job_status:
|
||||
type: string
|
||||
description: the status of gc job.
|
||||
@ -5111,13 +5147,13 @@ definitions:
|
||||
description: the creation time of gc job.
|
||||
update_time:
|
||||
type: string
|
||||
description: the update time of gc job.
|
||||
GCSchedule:
|
||||
description: the update time of gc job.
|
||||
AdminJobSchedule:
|
||||
type: object
|
||||
properties:
|
||||
schedule:
|
||||
$ref: '#/definitions/GCScheduleSchedule'
|
||||
GCScheduleSchedule:
|
||||
$ref: '#/definitions/AdminJobScheduleObj'
|
||||
AdminJobScheduleObj:
|
||||
type: object
|
||||
properties:
|
||||
type:
|
||||
|
@ -1,3 +0,0 @@
|
||||
http_proxy=$http_proxy
|
||||
https_proxy=$https_proxy
|
||||
no_proxy=$no_proxy
|
@ -1 +0,0 @@
|
||||
POSTGRES_PASSWORD=$password
|
@ -1,68 +0,0 @@
|
||||
PORT=8080
|
||||
LOG_LEVEL=info
|
||||
EXT_ENDPOINT=$public_url
|
||||
AUTH_MODE=$auth_mode
|
||||
SELF_REGISTRATION=$self_registration
|
||||
LDAP_URL=$ldap_url
|
||||
LDAP_SEARCH_DN=$ldap_searchdn
|
||||
LDAP_SEARCH_PWD=$ldap_search_pwd
|
||||
LDAP_BASE_DN=$ldap_basedn
|
||||
LDAP_FILTER=$ldap_filter
|
||||
LDAP_UID=$ldap_uid
|
||||
LDAP_SCOPE=$ldap_scope
|
||||
LDAP_TIMEOUT=$ldap_timeout
|
||||
LDAP_VERIFY_CERT=$ldap_verify_cert
|
||||
DATABASE_TYPE=postgresql
|
||||
POSTGRESQL_HOST=$db_host
|
||||
POSTGRESQL_PORT=$db_port
|
||||
POSTGRESQL_USERNAME=$db_user
|
||||
POSTGRESQL_PASSWORD=$db_password
|
||||
POSTGRESQL_DATABASE=registry
|
||||
POSTGRESQL_SSLMODE=disable
|
||||
LDAP_GROUP_BASEDN=$ldap_group_basedn
|
||||
LDAP_GROUP_FILTER=$ldap_group_filter
|
||||
LDAP_GROUP_GID=$ldap_group_gid
|
||||
LDAP_GROUP_SCOPE=$ldap_group_scope
|
||||
REGISTRY_URL=$registry_url
|
||||
TOKEN_SERVICE_URL=$token_service_url
|
||||
EMAIL_HOST=$email_host
|
||||
EMAIL_PORT=$email_port
|
||||
EMAIL_USR=$email_usr
|
||||
EMAIL_PWD=$email_pwd
|
||||
EMAIL_SSL=$email_ssl
|
||||
EMAIL_FROM=$email_from
|
||||
EMAIL_IDENTITY=$email_identity
|
||||
EMAIL_INSECURE=$email_insecure
|
||||
HARBOR_ADMIN_PASSWORD=$harbor_admin_password
|
||||
PROJECT_CREATION_RESTRICTION=$project_creation_restriction
|
||||
MAX_JOB_WORKERS=$max_job_workers
|
||||
CORE_SECRET=$core_secret
|
||||
JOBSERVICE_SECRET=$jobservice_secret
|
||||
TOKEN_EXPIRATION=$token_expiration
|
||||
CFG_EXPIRATION=5
|
||||
ADMIRAL_URL=$admiral_url
|
||||
WITH_NOTARY=$with_notary
|
||||
WITH_CLAIR=$with_clair
|
||||
CLAIR_DB_PASSWORD=$clair_db_password
|
||||
CLAIR_DB_HOST=$clair_db_host
|
||||
CLAIR_DB_PORT=$clair_db_port
|
||||
CLAIR_DB_USERNAME=$clair_db_username
|
||||
CLAIR_DB=$clair_db
|
||||
CLAIR_DB_SSLMODE=disable
|
||||
RESET=$reload_config
|
||||
UAA_ENDPOINT=$uaa_endpoint
|
||||
UAA_CLIENTID=$uaa_clientid
|
||||
UAA_CLIENTSECRET=$uaa_clientsecret
|
||||
UAA_VERIFY_CERT=$uaa_verify_cert
|
||||
CORE_URL=$core_url
|
||||
JOBSERVICE_URL=$jobservice_url
|
||||
CLAIR_URL=$clair_url
|
||||
NOTARY_URL=$notary_url
|
||||
REGISTRY_STORAGE_PROVIDER_NAME=$storage_provider_name
|
||||
READ_ONLY=false
|
||||
SKIP_RELOAD_ENV_PATTERN=$skip_reload_env_pattern
|
||||
RELOAD_KEY=$reload_key
|
||||
CHART_REPOSITORY_URL=$chart_repository_url
|
||||
LDAP_GROUP_ADMIN_DN=$ldap_group_admin_dn
|
||||
REGISTRY_CONTROLLER_URL=$registry_controller_url
|
||||
WITH_CHARTMUSEUM=$with_chartmuseum
|
@ -1,10 +0,0 @@
|
||||
LOG_LEVEL=info
|
||||
CONFIG_PATH=/etc/core/app.conf
|
||||
CORE_SECRET=$core_secret
|
||||
JOBSERVICE_SECRET=$jobservice_secret
|
||||
UAA_CA_ROOT=/etc/core/certificates/uaa_ca.pem
|
||||
_REDIS_URL=$redis_host:$redis_port,100,$redis_password
|
||||
SYNC_REGISTRY=false
|
||||
CHART_CACHE_DRIVER=$chart_cache_driver
|
||||
_REDIS_URL_REG=$redis_url_reg
|
||||
|
@ -1,51 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIJKAIBAAKCAgEAtpMvyv153iSmwm6TrFpUOzsIGBEDbGtOOEZMEm08D8IC2n1G
|
||||
d6/XOZ5FxPAD6gIpE0EAcMojY5O0Hl4CDoyV3e/iKcBqFOgYtpogNtan7yT5J8gw
|
||||
KsPbU/8nBkK75GOq56nfvq4t9GVAclIDtHbuvmlh6O2n+fxtR0M9LbuotbSBdXYU
|
||||
hzXqiSsMclBvLyIk/z327VP5l0nUNOzPuKIwQjuxYKDkvq1oGy98oVlE6wl0ldh2
|
||||
ZYZLGAYbVhqBVUT1Un/PYqi9Nofa2RI5n1WOkUJQp87vb+PUPFhVOdvH/oAzV6/b
|
||||
9dzyhA5paDM06lj2gsg9hQWxCgbFh1x39c6pSI8hmVe6x2d4tAtSyOm3Qwz+zO2l
|
||||
bPDvkY8Svh5nxUYObrNreoO8wHr8MC6TGUQLnUt/RfdVKe5fYPFl6VYqJP/L3LDn
|
||||
Xj771nFq6PKiYbhBwJw3TM49gpKNS/Of70TP2m7nVlyuyMdE5T1j3xyXNkixXqqn
|
||||
JuSMqX/3Bmm0On9KEbemwn7KRYF/bqc50+RcGUdKNcOkN6vuMVZei4GbxALnVqac
|
||||
s+/UQAiQP4212UO7iZFwMaCNJ3r/b4GOlyalI1yEA4odoZov7k5zVOzHu8O6QmCj
|
||||
3R5TVOudpGiUh+lumRRpNqxDgjngLljvaWU6ttyIbjnAwCjnJoppZM2lkRkCAwEA
|
||||
AQKCAgAvsvCPlf2a3fR7Y6xNISRUfS22K+u7DaXX6fXB8qv4afWY45Xfex89vG35
|
||||
78L2Bi55C0h0LztjrpkmPeVHq88TtrJduhl88M5UFpxH93jUb9JwZErBQX4xyb2G
|
||||
UzUHjEqAT89W3+a9rR5TP74cDd59/MZJtp1mIF7keVqochi3sDsKVxkx4hIuWALe
|
||||
csk5hTApRyUWCBRzRCSe1yfF0wnMpA/JcP+SGXfTcmqbNNlelo/Q/kaga59+3UmT
|
||||
C0Wy41s8fIvP+MnGT2QLxkkrqYyfwrWTweqoTtuKEIHjpdnwUcoYJKfQ6jKp8aH0
|
||||
STyP5UIyFOKNuFjyh6ZfoPbuT1nGW+YKlUnK4hQ9N/GE0oMoecTaHTbqM+psQvbj
|
||||
6+CG/1ukA5ZTQyogNyuOApArFBQ+RRmVudPKA3JYygIhwctuB2oItsVEOEZMELCn
|
||||
g2aVFAVXGfGRDXvpa8oxs3Pc6RJEp/3tON6+w7cMCx0lwN/Jk2Ie6RgTzUycT3k6
|
||||
MoTQJRoO6/ZHcx3hTut/CfnrWiltyAUZOsefLuLg+Pwf9GHhOycLRI6gHfgSwdIV
|
||||
S77UbbELWdscVr1EoPIasUm1uYWBBcFRTturRW+GHJ8TZX+mcWSBcWwBhp15LjEl
|
||||
tJf+9U6lWMOSB2LvT+vFmR0M9q56fo7UeKFIR7mo7/GpiVu5AQKCAQEA6Qs7G9mw
|
||||
N/JZOSeQO6xIQakC+sKApPyXO58fa7WQzri+l2UrLNp0DEQfZCujqDgwys6OOzR/
|
||||
xg8ZKQWVoad08Ind3ZwoJgnLn6QLENOcE6PpWxA/JjnVGP4JrXCYR98cP0sf9jEI
|
||||
xkR1qT50GbeqU3RDFliI4kGRvbZ8cekzuWppfQcjstSBPdvuxqAcUVmTnTw83nvD
|
||||
FmBbhlLiEgI3iKtJ97UB7480ivnWnOuusduk7FO4jF3hkrOa+YRidinTCi8JBo0Y
|
||||
jx4Ci3Y5x6nvwkXhKzXapd7YmPNisUc5xA7/a+W71cyC0IKUwRc/8pYWLL3R3CpR
|
||||
YiV8gf6gwzOckQKCAQEAyI9CSNoAQH4zpS8B9PF8zILqEEuun8m1f5JB3hQnfWzm
|
||||
7uz/zg6I0TkcCE0AJVSKPHQm1V9+TRbF9+DiOWHEYYzPmK8h63SIufaWxZPqai4E
|
||||
PUj6eQWykBUVJ96n6/AW0JHRZ+WrJ5RXBqCLuY7NP6wDhORrCJjBwaGMohNpbKPS
|
||||
H3QewsoxCh+CEXKdKyy+/yU/f4E89PlHapkW1/bDJ5u7puSD+KvmiDDIXSBncdOO
|
||||
uFT8n+XH5IwgjdXFSDim15rQ8jD2l2xLcwKboTpx5GeRl8oB1VGm0fUbBn1dvGPG
|
||||
4WfHGyrp9VNZtP160WoHr+vRVPqvHNkoeAlCfEwQCQKCAQBN1dtzLN0HgqE8TrOE
|
||||
ysEDdTCykj4nXNoiJr522hi4gsndhQPLolb6NdKKQW0S5Vmekyi8K4e1nhtYMS5N
|
||||
5MFRCasZtmtOcR0af87WWucZRDjPmniNCunaxBZ1YFLsRl+H4E6Xir8UgY8O7PYY
|
||||
FNkFsKIrl3x4nU/RHl8oKKyG9Dyxbq4Er6dPAuMYYiezIAkGjjUCVjHNindnQM2T
|
||||
GDx2IEe/PSydV6ZD+LguhyU88FCAQmI0N7L8rZJIXmgIcWW0VAterceTHYHaFK2t
|
||||
u1uB9pcDOKSDnA+Z3kiLT2/CxQOYhQ2clgbnH4YRi/Nm0awsW2X5dATklAKm5GXL
|
||||
bLSRAoIBAQClaNnPQdTBXBR2IN3pSZ2XAkXPKMwdxvtk+phOc6raHA4eceLL7FrU
|
||||
y9gd1HvRTfcwws8gXcDKDYU62gNaNhMELWEt2QsNqS/2x7Qzwbms1sTyUpUZaSSL
|
||||
BohLOKyfv4ThgdIGcXoGi6Z2tcRnRqpq4BCK8uR/05TBgN5+8amaS0ZKYLfaCW4G
|
||||
nlPk1fVgHWhtAChtnYZLuKg494fKmB7+NMfAbmmVlxjrq+gkPkxyqXvk9Vrg+V8y
|
||||
VIuozu0Fkouv+GRpyw4ldtCHS1hV0eEK8ow2dwmqCMygDxm58X10mYn2b2PcOTl5
|
||||
9sNerUw1GNC8O66K+rGgBk4FKgXmg8kZAoIBABBcuisK250fXAfjAWXGqIMs2+Di
|
||||
vqAdT041SNZEOJSGNFsLJbhd/3TtCLf29PN/YXtnvBmC37rqryTsqjSbx/YT2Jbr
|
||||
Bk3jOr9JVbmcoSubXl8d/uzf7IGs91qaCgBwPZHgeH+kK13FCLexz+U9zYMZ78fF
|
||||
/yO82CpoekT+rcl1jzYn43b6gIklHABQU1uCD6MMyMhJ9Op2WmbDk3X+py359jMc
|
||||
+Cr2zfzdHAIVff2dOV3OL+ZHEWbwtnn3htKUdOmjoTJrciFx0xNZJS5Q7QYHMONj
|
||||
yPqbajyhopiN01aBQpCSGF1F1uRpWeIjTrAZPbrwLl9YSYXz0AT05QeFEFk=
|
||||
-----END RSA PRIVATE KEY-----
|
@ -1 +0,0 @@
|
||||
POSTGRES_PASSWORD=$db_password
|
@ -1,3 +0,0 @@
|
||||
CORE_SECRET=$core_secret
|
||||
JOBSERVICE_SECRET=$jobservice_secret
|
||||
CORE_URL=$core_url
|
@ -1,28 +0,0 @@
|
||||
{
|
||||
"server": {
|
||||
"http_addr": ":4443"
|
||||
},
|
||||
"trust_service": {
|
||||
"type": "remote",
|
||||
"hostname": "notarysigner",
|
||||
"port": "7899",
|
||||
"tls_ca_file": "./notary-signer-ca.crt",
|
||||
"key_algorithm": "ecdsa"
|
||||
},
|
||||
"logging": {
|
||||
"level": "debug"
|
||||
},
|
||||
"storage": {
|
||||
"backend": "mysql",
|
||||
"db_url": "server@tcp(mysql:3306)/notaryserver?parseTime=True"
|
||||
},
|
||||
"auth": {
|
||||
"type": "token",
|
||||
"options": {
|
||||
"realm": "$token_endpoint/service/token",
|
||||
"service": "harbor-notary",
|
||||
"issuer": "harbor-token-issuer",
|
||||
"rootcertbundle": "/etc/notary/root.crt"
|
||||
}
|
||||
}
|
||||
}
|
@ -1,15 +0,0 @@
|
||||
{
|
||||
"server": {
|
||||
"grpc_addr": ":7899",
|
||||
"tls_cert_file": "./notary-signer.crt",
|
||||
"tls_key_file": "./notary-signer.key"
|
||||
},
|
||||
"logging": {
|
||||
"level": "debug"
|
||||
},
|
||||
"storage": {
|
||||
"backend": "mysql",
|
||||
"db_url": "signer@tcp(mysql:3306)/notarysigner?parseTime=True",
|
||||
"default_alias":"defaultalias"
|
||||
}
|
||||
}
|
@ -1,35 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIGBzCCA++gAwIBAgIJAKB8CNqCxhr7MA0GCSqGSIb3DQEBCwUAMIGZMQswCQYD
|
||||
VQQGEwJDTjEOMAwGA1UECAwFU3RhdGUxCzAJBgNVBAcMAkNOMRUwEwYDVQQKDAxv
|
||||
cmdhbml6YXRpb24xHDAaBgNVBAsME29yZ2FuaXphdGlvbmFsIHVuaXQxFDASBgNV
|
||||
BAMMC2V4YW1wbGUuY29tMSIwIAYJKoZIhvcNAQkBFhNleGFtcGxlQGV4YW1wbGUu
|
||||
Y29tMB4XDTE2MDUxNjAyNDY1NVoXDTI2MDUxNDAyNDY1NVowgZkxCzAJBgNVBAYT
|
||||
AkNOMQ4wDAYDVQQIDAVTdGF0ZTELMAkGA1UEBwwCQ04xFTATBgNVBAoMDG9yZ2Fu
|
||||
aXphdGlvbjEcMBoGA1UECwwTb3JnYW5pemF0aW9uYWwgdW5pdDEUMBIGA1UEAwwL
|
||||
ZXhhbXBsZS5jb20xIjAgBgkqhkiG9w0BCQEWE2V4YW1wbGVAZXhhbXBsZS5jb20w
|
||||
ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2ky/K/XneJKbCbpOsWlQ7
|
||||
OwgYEQNsa044RkwSbTwPwgLafUZ3r9c5nkXE8APqAikTQQBwyiNjk7QeXgIOjJXd
|
||||
7+IpwGoU6Bi2miA21qfvJPknyDAqw9tT/ycGQrvkY6rnqd++ri30ZUByUgO0du6+
|
||||
aWHo7af5/G1HQz0tu6i1tIF1dhSHNeqJKwxyUG8vIiT/PfbtU/mXSdQ07M+4ojBC
|
||||
O7FgoOS+rWgbL3yhWUTrCXSV2HZlhksYBhtWGoFVRPVSf89iqL02h9rZEjmfVY6R
|
||||
QlCnzu9v49Q8WFU528f+gDNXr9v13PKEDmloMzTqWPaCyD2FBbEKBsWHXHf1zqlI
|
||||
jyGZV7rHZ3i0C1LI6bdDDP7M7aVs8O+RjxK+HmfFRg5us2t6g7zAevwwLpMZRAud
|
||||
S39F91Up7l9g8WXpViok/8vcsOdePvvWcWro8qJhuEHAnDdMzj2Cko1L85/vRM/a
|
||||
budWXK7Ix0TlPWPfHJc2SLFeqqcm5Iypf/cGabQ6f0oRt6bCfspFgX9upznT5FwZ
|
||||
R0o1w6Q3q+4xVl6LgZvEAudWppyz79RACJA/jbXZQ7uJkXAxoI0nev9vgY6XJqUj
|
||||
XIQDih2hmi/uTnNU7Me7w7pCYKPdHlNU652kaJSH6W6ZFGk2rEOCOeAuWO9pZTq2
|
||||
3IhuOcDAKOcmimlkzaWRGQIDAQABo1AwTjAdBgNVHQ4EFgQUPJF++WMsv1OJvf7F
|
||||
oCew37JTnfQwHwYDVR0jBBgwFoAUPJF++WMsv1OJvf7FoCew37JTnfQwDAYDVR0T
|
||||
BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAb5LvqukMxWd5Zajbh3orfYsXmhWn
|
||||
UWiwG176+bd3b5xMlG9iLd4vQ11lTZoIhFOfprRQzbizQ8BzR2JBQckpLcy+5hyA
|
||||
D3M9vLL37OwA0wT6kxFnd6LtlFaH5gG++huw2ts2PDXFz0jqw+0YE/R8ov2+YdaZ
|
||||
aPSEMunmAuEY1TbYWzz4u6PxycxhQzDQ34ZmJZ34Elvw1NYMfPMGTKp34PsxIcgT
|
||||
ao5jqb9RMU6JAumfXrOvXRjjl573vX2hgMZzEU6OF2/+uyg95chn6nO1GUQrT2+F
|
||||
/1xIqfHfFCm8+jujSDgqfBtGI+2C7No+Dq8LEyEINZe6wSQ81+ryt5jy5SZmAsnj
|
||||
V4OsSIwlpR5fLUwrFStVoUWHEKl1DflkYki/cAC1TL0Om+ldJ219kcOnaXDNaq66
|
||||
3I75BvRY7/88MYLl4Fgt7sn05Mn3uNPrCrci8d0R1tlXIcwMdCowIHeZdWHX43f7
|
||||
NsVk/7VSOxJ343csgaQc+3WxEFK0tBxGO6GP+Xj0XmdVGLhalVBsEhPjnmx+Yyrn
|
||||
oMsTA1Yrs88C8ItQn7zuO/30eKNGTnby0gptHiS6sa/c3O083Mpi8y33GPVZDvBl
|
||||
l9PfSZT8LG7SvpjsdgdNZlyFvTY4vsB+Vd5Howh7gXYPVXdCs4k7HMyo7zvzliZS
|
||||
ekCw9NGLoNqQqnA=
|
||||
-----END CERTIFICATE-----
|
@ -1,3 +0,0 @@
|
||||
CORE_SECRET=$core_secret
|
||||
JOBSERVICE_SECRET=$jobservice_secret
|
||||
|
@ -1,42 +0,0 @@
|
||||
version: '2'
|
||||
services:
|
||||
core:
|
||||
networks:
|
||||
harbor-chartmuseum:
|
||||
aliases:
|
||||
- harbor-core
|
||||
redis:
|
||||
networks:
|
||||
harbor-chartmuseum:
|
||||
aliases:
|
||||
- redis
|
||||
chartmuseum:
|
||||
container_name: chartmuseum
|
||||
image: goharbor/chartmuseum-photon:__chartmuseum_version__
|
||||
restart: always
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- CHOWN
|
||||
- DAC_OVERRIDE
|
||||
- SETGID
|
||||
- SETUID
|
||||
networks:
|
||||
- harbor-chartmuseum
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- redis
|
||||
volumes:
|
||||
- /data/chart_storage:/chart_storage:z
|
||||
- ./common/config/chartserver:/etc/chartserver:z
|
||||
- ./common/config/custom-ca-bundle.crt:/harbor_cust_cert/custom-ca-bundle.crt:z
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "chartmuseum"
|
||||
env_file:
|
||||
./common/config/chartserver/env
|
||||
networks:
|
||||
harbor-chartmuseum:
|
||||
external: false
|
@ -1,47 +0,0 @@
|
||||
version: '2'
|
||||
services:
|
||||
core:
|
||||
networks:
|
||||
harbor-clair:
|
||||
aliases:
|
||||
- harbor-core
|
||||
jobservice:
|
||||
networks:
|
||||
- harbor-clair
|
||||
registry:
|
||||
networks:
|
||||
- harbor-clair
|
||||
postgresql:
|
||||
networks:
|
||||
harbor-clair:
|
||||
aliases:
|
||||
- harbor-db
|
||||
clair:
|
||||
networks:
|
||||
- harbor-clair
|
||||
container_name: clair
|
||||
image: goharbor/clair-photon:__clair_version__
|
||||
restart: always
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- DAC_OVERRIDE
|
||||
- SETGID
|
||||
- SETUID
|
||||
cpu_quota: 50000
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- postgresql
|
||||
volumes:
|
||||
- ./common/config/clair/config.yaml:/etc/clair/config.yaml:z
|
||||
- ./common/config/custom-ca-bundle.crt:/harbor_cust_cert/custom-ca-bundle.crt:z
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "clair"
|
||||
env_file:
|
||||
./common/config/clair/clair_env
|
||||
networks:
|
||||
harbor-clair:
|
||||
external: false
|
@ -1,69 +0,0 @@
|
||||
version: '2'
|
||||
services:
|
||||
core:
|
||||
networks:
|
||||
- harbor-notary
|
||||
proxy:
|
||||
networks:
|
||||
- harbor-notary
|
||||
postgresql:
|
||||
networks:
|
||||
harbor-notary:
|
||||
aliases:
|
||||
- harbor-db
|
||||
notary-server:
|
||||
image: goharbor/notary-server-photon:__notary_version__
|
||||
container_name: notary-server
|
||||
restart: always
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- SETGID
|
||||
- SETUID
|
||||
networks:
|
||||
- notary-sig
|
||||
- harbor-notary
|
||||
dns_search: .
|
||||
volumes:
|
||||
- ./common/config/notary:/etc/notary:z
|
||||
env_file:
|
||||
- ./common/config/notary/server_env
|
||||
depends_on:
|
||||
- postgresql
|
||||
- notary-signer
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "notary-server"
|
||||
notary-signer:
|
||||
image: goharbor/notary-signer-photon:__notary_version__
|
||||
container_name: notary-signer
|
||||
restart: always
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- SETGID
|
||||
- SETUID
|
||||
networks:
|
||||
harbor-notary:
|
||||
notary-sig:
|
||||
aliases:
|
||||
- notarysigner
|
||||
dns_search: .
|
||||
volumes:
|
||||
- ./common/config/notary:/etc/notary:z
|
||||
env_file:
|
||||
- ./common/config/notary/signer_env
|
||||
depends_on:
|
||||
- postgresql
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "notary-signer"
|
||||
networks:
|
||||
harbor-notary:
|
||||
external: false
|
||||
notary-sig:
|
||||
external: false
|
204
make/harbor.cfg
204
make/harbor.cfg
@ -1,204 +0,0 @@
|
||||
## Configuration file of Harbor
|
||||
|
||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||
_version = 1.7.0
|
||||
#The IP address or hostname to access admin UI and registry service.
|
||||
#DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
|
||||
#DO NOT comment out this line, modify the value of "hostname" directly, or the installation will fail.
|
||||
hostname = reg.mydomain.com
|
||||
|
||||
#The protocol for accessing the UI and token/notification service, by default it is http.
|
||||
#It can be set to https if ssl is enabled on nginx.
|
||||
ui_url_protocol = http
|
||||
|
||||
#Maximum number of job workers in job service
|
||||
max_job_workers = 10
|
||||
|
||||
#Determine whether or not to generate certificate for the registry's token.
|
||||
#If the value is on, the prepare script creates new root cert and private key
|
||||
#for generating token to access the registry. If the value is off the default key/cert will be used.
|
||||
#This flag also controls the creation of the notary signer's cert.
|
||||
customize_crt = on
|
||||
|
||||
#The path of cert and key files for nginx, they are applied only the protocol is set to https
|
||||
ssl_cert = /data/cert/server.crt
|
||||
ssl_cert_key = /data/cert/server.key
|
||||
|
||||
#The path of secretkey storage
|
||||
secretkey_path = /data
|
||||
|
||||
#Admiral's url, comment this attribute, or set its value to NA when Harbor is standalone
|
||||
admiral_url = NA
|
||||
|
||||
#Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
log_rotate_count = 50
|
||||
#Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||
#If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||
#are all valid.
|
||||
log_rotate_size = 200M
|
||||
|
||||
#Config http proxy for Clair, e.g. http://my.proxy.com:3128
|
||||
#Clair doesn't need to connect to harbor internal components via http proxy.
|
||||
http_proxy =
|
||||
https_proxy =
|
||||
no_proxy = 127.0.0.1,localhost,core,registry
|
||||
|
||||
#NOTES: The properties between BEGIN INITIAL PROPERTIES and END INITIAL PROPERTIES
|
||||
#only take effect in the first boot, the subsequent changes of these properties
|
||||
#should be performed on web ui
|
||||
|
||||
#************************BEGIN INITIAL PROPERTIES************************
|
||||
|
||||
#Email account settings for sending out password resetting emails.
|
||||
|
||||
#Email server uses the given username and password to authenticate on TLS connections to host and act as identity.
|
||||
#Identity left blank to act as username.
|
||||
email_identity =
|
||||
|
||||
email_server = smtp.mydomain.com
|
||||
email_server_port = 25
|
||||
email_username = sample_admin@mydomain.com
|
||||
email_password = abc
|
||||
email_from = admin <sample_admin@mydomain.com>
|
||||
email_ssl = false
|
||||
email_insecure = false
|
||||
|
||||
##The initial password of Harbor admin, only works for the first time when Harbor starts.
|
||||
#It has no effect after the first launch of Harbor.
|
||||
#Change the admin password from UI after launching Harbor.
|
||||
harbor_admin_password = Harbor12345
|
||||
|
||||
##By default the auth mode is db_auth, i.e. the credentials are stored in a local database.
|
||||
#Set it to ldap_auth if you want to verify a user's credentials against an LDAP server.
|
||||
auth_mode = db_auth
|
||||
|
||||
#The url for an ldap endpoint.
|
||||
ldap_url = ldaps://ldap.mydomain.com
|
||||
|
||||
#A user's DN who has the permission to search the LDAP/AD server.
|
||||
#If your LDAP/AD server does not support anonymous search, you should configure this DN and ldap_search_pwd.
|
||||
#ldap_searchdn = uid=searchuser,ou=people,dc=mydomain,dc=com
|
||||
|
||||
#the password of the ldap_searchdn
|
||||
#ldap_search_pwd = password
|
||||
|
||||
#The base DN from which to look up a user in LDAP/AD
|
||||
ldap_basedn = ou=people,dc=mydomain,dc=com
|
||||
|
||||
#Search filter for LDAP/AD, make sure the syntax of the filter is correct.
|
||||
#ldap_filter = (objectClass=person)
|
||||
|
||||
# The attribute used in a search to match a user, it could be uid, cn, email, sAMAccountName or other attributes depending on your LDAP/AD
|
||||
ldap_uid = uid
|
||||
|
||||
#the scope to search for users, 0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE
|
||||
ldap_scope = 2
|
||||
|
||||
#Timeout (in seconds) when connecting to an LDAP Server. The default value (and most reasonable) is 5 seconds.
|
||||
ldap_timeout = 5
|
||||
|
||||
#Verify certificate from LDAP server
|
||||
ldap_verify_cert = true
|
||||
|
||||
#The base dn from which to lookup a group in LDAP/AD
|
||||
ldap_group_basedn = ou=group,dc=mydomain,dc=com
|
||||
|
||||
#filter to search LDAP/AD group
|
||||
ldap_group_filter = objectclass=group
|
||||
|
||||
#The attribute used to name a LDAP/AD group, it could be cn, name
|
||||
ldap_group_gid = cn
|
||||
|
||||
#The scope to search for ldap groups. 0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE
|
||||
ldap_group_scope = 2
|
||||
|
||||
#Turn on or off the self-registration feature
|
||||
self_registration = on
|
||||
|
||||
#The expiration time (in minute) of token created by token service, default is 30 minutes
|
||||
token_expiration = 30
|
||||
|
||||
#The flag to control what users have permission to create projects
|
||||
#The default value "everyone" allows everyone to creates a project.
|
||||
#Set to "adminonly" so that only admin user can create project.
|
||||
project_creation_restriction = everyone
|
||||
|
||||
#************************END INITIAL PROPERTIES************************
|
||||
|
||||
#######Harbor DB configuration section#######
|
||||
|
||||
#The address of the Harbor database. Only need to change when using external db.
|
||||
db_host = postgresql
|
||||
|
||||
#The password for the root user of Harbor DB. Change this before any production use.
|
||||
db_password = root123
|
||||
|
||||
#The port of Harbor database host
|
||||
db_port = 5432
|
||||
|
||||
#The user name of Harbor database
|
||||
db_user = postgres
|
||||
|
||||
##### End of Harbor DB configuration#######
|
||||
|
||||
##########Redis server configuration.############
|
||||
|
||||
#Redis connection address
|
||||
redis_host = redis
|
||||
|
||||
#Redis connection port
|
||||
redis_port = 6379
|
||||
|
||||
#Redis connection password
|
||||
redis_password =
|
||||
|
||||
#Redis connection db index
|
||||
#db_index 1,2,3 is for registry, jobservice and chartmuseum.
|
||||
#db_index 0 is for UI, it's unchangeable
|
||||
redis_db_index = 1,2,3
|
||||
|
||||
########## End of Redis server configuration ############
|
||||
|
||||
##########Clair DB configuration############
|
||||
|
||||
#Clair DB host address. Only change it when using an exteral DB.
|
||||
clair_db_host = postgresql
|
||||
#The password of the Clair's postgres database. Only effective when Harbor is deployed with Clair.
|
||||
#Please update it before deployment. Subsequent update will cause Clair's API server and Harbor unable to access Clair's database.
|
||||
clair_db_password = root123
|
||||
#Clair DB connect port
|
||||
clair_db_port = 5432
|
||||
#Clair DB username
|
||||
clair_db_username = postgres
|
||||
#Clair default database
|
||||
clair_db = postgres
|
||||
|
||||
#The interval of clair updaters, the unit is hour, set to 0 to disable the updaters.
|
||||
clair_updaters_interval = 12
|
||||
|
||||
##########End of Clair DB configuration############
|
||||
|
||||
#The following attributes only need to be set when auth mode is uaa_auth
|
||||
uaa_endpoint = uaa.mydomain.org
|
||||
uaa_clientid = id
|
||||
uaa_clientsecret = secret
|
||||
uaa_verify_cert = true
|
||||
uaa_ca_cert = /path/to/ca.pem
|
||||
|
||||
|
||||
### Harbor Storage settings ###
|
||||
#Please be aware that the following storage settings will be applied to both docker registry and helm chart repository.
|
||||
#registry_storage_provider can be: filesystem, s3, gcs, azure, etc.
|
||||
registry_storage_provider_name = filesystem
|
||||
#registry_storage_provider_config is a comma separated "key: value" pairs, e.g. "key1: value, key2: value2".
|
||||
#To avoid duplicated configurations, both docker registry and chart repository follow the same storage configuration specifications of docker registry.
|
||||
#Refer to https://docs.docker.com/registry/configuration/#storage for all available configuration.
|
||||
registry_storage_provider_config =
|
||||
#registry_custom_ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||
#of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||
registry_custom_ca_bundle =
|
||||
|
||||
#If reload_config=true, all settings which present in harbor.cfg take effect after prepare and restart harbor, it overwrites exsiting settings.
|
||||
#reload_config=true
|
||||
#Regular expression to match skipped environment variables
|
||||
#skip_reload_env_pattern=(^EMAIL.*)|(^LDAP.*)
|
114
make/harbor.yml
Normal file
114
make/harbor.yml
Normal file
@ -0,0 +1,114 @@
|
||||
## Configuration file of Harbor
|
||||
|
||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||
_version: 1.7.0
|
||||
#The IP address or hostname to access admin UI and registry service.
|
||||
#DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
|
||||
#DO NOT comment out this line, modify the value of "hostname" directly, or the installation will fail.
|
||||
hostname: reg.mydomain.com
|
||||
|
||||
#The protocol for accessing the UI and token/notification service, by default it is http.
|
||||
#It can be set to https if ssl is enabled on nginx.
|
||||
ui_url_protocol: https
|
||||
|
||||
#Maximum number of job workers in job service
|
||||
max_job_workers: 10
|
||||
|
||||
# The default data volume
|
||||
data_volume: /data
|
||||
|
||||
#The path of cert and key files for nginx, they are applied only the protocol is set to https
|
||||
ssl_cert: /data/cert/server.crt
|
||||
ssl_cert_key: /data/cert/server.key
|
||||
|
||||
#The path of secretkey storage
|
||||
secretkey_path: /data
|
||||
|
||||
#Admiral's url, comment this attribute, or set its value to NA when Harbor is standalone
|
||||
admiral_url: NA
|
||||
|
||||
# Log configurations
|
||||
log:
|
||||
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
rotate_count: 50
|
||||
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||
# are all valid.
|
||||
rotate_size: 200M
|
||||
# The directory that store log files
|
||||
location: /var/log/harbor
|
||||
|
||||
#NOTES: The properties between BEGIN INITIAL PROPERTIES and END INITIAL PROPERTIES
|
||||
#only take effect in the first boot, the subsequent changes of these properties
|
||||
#should be performed on web ui
|
||||
|
||||
##The initial password of Harbor admin, only works for the first time when Harbor starts.
|
||||
#It has no effect after the first launch of Harbor.
|
||||
#Change the admin password from UI after launching Harbor.
|
||||
harbor_admin_password: Harbor12345
|
||||
|
||||
## Harbor DB configuration
|
||||
database:
|
||||
#The address of the Harbor database. Only need to change when using external db.
|
||||
host: postgresql
|
||||
#The port of Harbor database host
|
||||
port: 5432
|
||||
#The user name of Harbor database
|
||||
username: postgres
|
||||
#The password for the root user of Harbor DB. Change this before any production use.
|
||||
password: root123
|
||||
|
||||
|
||||
# Redis server configuration
|
||||
redis:
|
||||
# Redis connection address
|
||||
host: redis
|
||||
# Redis connection port
|
||||
port: 6379
|
||||
# Redis connection password
|
||||
password:
|
||||
# Redis connection db index
|
||||
# db_index 1,2,3 is for registry, jobservice and chartmuseum.
|
||||
# db_index 0 is for UI, it's unchangeable
|
||||
db_index: 1,2,3
|
||||
|
||||
|
||||
# Clair DB configuration
|
||||
clair:
|
||||
# Clair DB host address. Only change it when using an exteral DB.
|
||||
db_host: postgresql
|
||||
# The password of the Clair's postgres database. Only effective when Harbor is deployed with Clair.
|
||||
# Please update it before deployment. Subsequent update will cause Clair's API server and Harbor unable to access Clair's database.
|
||||
db_password: root123
|
||||
# Clair DB connect port
|
||||
db_port: 5432
|
||||
# Clair DB username
|
||||
db_username: postgres
|
||||
# Clair default database
|
||||
db: postgres
|
||||
# The interval of clair updaters, the unit is hour, set to 0 to disable the updaters.
|
||||
updaters_interval: 12
|
||||
|
||||
#Config http proxy for Clair, e.g. http://my.proxy.com:3128
|
||||
#Clair doesn't need to connect to harbor internal components via http proxy.
|
||||
http_proxy:
|
||||
https_proxy:
|
||||
no_proxy: 127.0.0.1,localhost,core,registry
|
||||
|
||||
# Harbor Storage settings
|
||||
storage:
|
||||
#Please be aware that the following storage settings will be applied to both docker registry and helm chart repository.
|
||||
#registry_storage_provider can be: filesystem, s3, gcs, azure, etc.
|
||||
registry_storage_provider_name: filesystem
|
||||
#registry_storage_provider_config is a comma separated "key: value" pairs, e.g. "key1: value, key2: value2".
|
||||
#To avoid duplicated configurations, both docker registry and chart repository follow the same storage configuration specifications of docker registry.
|
||||
#Refer to https://docs.docker.com/registry/configuration/#storage for all available configuration.
|
||||
registry_storage_provider_config:
|
||||
#registry_custom_ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||
#of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||
registry_custom_ca_bundle:
|
||||
|
||||
#If reload_config=true, all settings which present in harbor.yml take effect after prepare and restart harbor, it overwrites exsiting settings.
|
||||
#reload_config=true
|
||||
#Regular expression to match skipped environment variables
|
||||
#skip_reload_env_pattern: (^EMAIL.*)|(^LDAP.*)
|
@ -49,8 +49,8 @@ note() { printf "\n${underline}${bold}${blue}Note:${reset} ${blue}%s${reset}\n"
|
||||
set -e
|
||||
set +o noglob
|
||||
|
||||
usage=$'Please set hostname and other necessary attributes in harbor.cfg first. DO NOT use localhost or 127.0.0.1 for hostname, because Harbor needs to be accessed by external clients.
|
||||
Please set --with-notary if needs enable Notary in Harbor, and set ui_url_protocol/ssl_cert/ssl_cert_key in harbor.cfg bacause notary must run under https.
|
||||
usage=$'Please set hostname and other necessary attributes in harbor.yml first. DO NOT use localhost or 127.0.0.1 for hostname, because Harbor needs to be accessed by external clients.
|
||||
Please set --with-notary if needs enable Notary in Harbor, and set ui_url_protocol/ssl_cert/ssl_cert_key in harbor.yml bacause notary must run under https.
|
||||
Please set --with-clair if needs enable Clair in Harbor
|
||||
Please set --with-chartmuseum if needs enable Chartmuseum in Harbor'
|
||||
item=0
|
||||
@ -83,8 +83,8 @@ done
|
||||
workdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
cd $workdir
|
||||
|
||||
# The hostname in harbor.cfg has not been modified
|
||||
if grep '^[[:blank:]]*hostname = reg.mydomain.com' &> /dev/null harbor.cfg
|
||||
# The hostname in harbor.yml has not been modified
|
||||
if grep '^[[:blank:]]*hostname: reg.mydomain.com' &> /dev/null harbor.yml
|
||||
then
|
||||
warn "$usage"
|
||||
exit 1
|
||||
@ -160,7 +160,7 @@ echo ""
|
||||
h2 "[Step $item]: preparing environment ..."; let item+=1
|
||||
if [ -n "$host" ]
|
||||
then
|
||||
sed "s/^hostname = .*/hostname = $host/g" -i ./harbor.cfg
|
||||
sed "s/^hostname: .*/hostname: $host/g" -i ./harbor.yml
|
||||
fi
|
||||
prepare_para=
|
||||
if [ $with_notary ]
|
||||
@ -179,40 +179,25 @@ fi
|
||||
./prepare $prepare_para
|
||||
echo ""
|
||||
|
||||
h2 "[Step $item]: checking existing instance of Harbor ..."; let item+=1
|
||||
docker_compose_list='-f docker-compose.yml'
|
||||
if [ $with_notary ]
|
||||
then
|
||||
docker_compose_list="${docker_compose_list} -f docker-compose.notary.yml"
|
||||
fi
|
||||
if [ $with_clair ]
|
||||
then
|
||||
docker_compose_list="${docker_compose_list} -f docker-compose.clair.yml"
|
||||
fi
|
||||
if [ $with_chartmuseum ]
|
||||
then
|
||||
docker_compose_list="${docker_compose_list} -f docker-compose.chartmuseum.yml"
|
||||
fi
|
||||
|
||||
if [ -n "$(docker-compose $docker_compose_list ps -q)" ]
|
||||
if [ -n "$(docker-compose ps -q)" ]
|
||||
then
|
||||
note "stopping existing Harbor instance ..."
|
||||
docker-compose $docker_compose_list down -v
|
||||
docker-compose down -v
|
||||
fi
|
||||
echo ""
|
||||
|
||||
h2 "[Step $item]: starting Harbor ..."
|
||||
docker-compose $docker_compose_list up -d
|
||||
docker-compose up -d
|
||||
|
||||
protocol=http
|
||||
hostname=reg.mydomain.com
|
||||
|
||||
if [[ $(cat ./harbor.cfg) =~ ui_url_protocol[[:blank:]]*=[[:blank:]]*(https?) ]]
|
||||
if [[ $(cat ./harbor.yml) =~ ui_url_protocol:[[:blank:]]*(https?) ]]
|
||||
then
|
||||
protocol=${BASH_REMATCH[1]}
|
||||
fi
|
||||
|
||||
if [[ $(grep '^[[:blank:]]*hostname[[:blank:]]*=' ./harbor.cfg) =~ hostname[[:blank:]]*=[[:blank:]]*(.*) ]]
|
||||
if [[ $(grep '^[[:blank:]]*hostname:' ./harbor.yml) =~ hostname:[[:blank:]]*(.*) ]]
|
||||
then
|
||||
hostname=${BASH_REMATCH[1]}
|
||||
fi
|
||||
|
@ -32,6 +32,10 @@ JOBSERVICEBINARYNAME=harbor_jobservice
|
||||
# photon dockerfile
|
||||
DOCKERFILEPATH=$(MAKEPATH)/photon
|
||||
|
||||
DOCKERFILEPATH_PREPARE=$(DOCKERFILEPATH)/prepare
|
||||
DOCKERFILENAME_PREPARE=Dockerfile
|
||||
DOCKERIMAGENAME_PREPARE=goharbor/prepare
|
||||
|
||||
DOCKERFILEPATH_PORTAL=$(DOCKERFILEPATH)/portal
|
||||
DOCKERFILENAME_PORTAL=Dockerfile
|
||||
DOCKERIMAGENAME_PORTAL=goharbor/harbor-portal
|
||||
@ -93,6 +97,11 @@ CHART_SERVER_CODE_BASE=github.com/helm/chartmuseum
|
||||
CHART_SERVER_MAIN_PATH=cmd/chartmuseum
|
||||
CHART_SERVER_BIN_NAME=chartm
|
||||
|
||||
_build_prepare:
|
||||
@echo "building prepare container for photon..."
|
||||
@$(DOCKERBUILD) -f $(DOCKERFILEPATH_PREPARE)/$(DOCKERFILENAME_PREPARE) -t $(DOCKERIMAGENAME_PREPARE):$(VERSIONTAG) .
|
||||
@echo "Done."
|
||||
|
||||
_build_db:
|
||||
@echo "building db container for photon..."
|
||||
@$(DOCKERBUILD) -f $(DOCKERFILEPATH_DB)/$(DOCKERFILENAME_DB) -t $(DOCKERIMAGENAME_DB):$(VERSIONTAG) .
|
||||
@ -200,7 +209,7 @@ define _get_binary
|
||||
$(WGET) --timeout 30 --no-check-certificate $1 -O $2
|
||||
endef
|
||||
|
||||
build: _build_db _build_portal _build_core _build_jobservice _build_log _build_nginx _build_registry _build_registryctl _build_notary _build_clair _build_redis _build_migrator _build_chart_server
|
||||
build: _build_prepare _build_db _build_portal _build_core _build_jobservice _build_log _build_nginx _build_registry _build_registryctl _build_notary _build_clair _build_redis _build_migrator _build_chart_server
|
||||
|
||||
cleanimage:
|
||||
@echo "cleaning image for photon..."
|
||||
|
@ -4,7 +4,7 @@ set +e
|
||||
|
||||
usage(){
|
||||
echo "Usage: builder <golang image:version> <code path> <code release tag> <main.go path> <binary name>"
|
||||
echo "e.g: builder golang:1.11.2 github.com/helm/chartmuseum v0.7.1 cmd/chartmuseum chartm"
|
||||
echo "e.g: builder golang:1.11.2 github.com/helm/chartmuseum v0.8.1 cmd/chartmuseum chartm"
|
||||
exit 1
|
||||
}
|
||||
|
||||
|
18
make/photon/prepare/Dockerfile
Normal file
18
make/photon/prepare/Dockerfile
Normal file
@ -0,0 +1,18 @@
|
||||
FROM photon:2.0
|
||||
|
||||
ENV LANG en_US.UTF-8
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
RUN mkdir -p /harbor_make
|
||||
|
||||
RUN tdnf install -y python3 \
|
||||
&& tdnf install -y python3-pip
|
||||
RUN pip3 install pipenv==2018.11.26
|
||||
|
||||
COPY make/photon/prepare /usr/src/app
|
||||
RUN set -ex && pipenv install --deploy --system
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
||||
|
||||
VOLUME ["/harbor_make"]
|
15
make/photon/prepare/Pipfile
Normal file
15
make/photon/prepare/Pipfile
Normal file
@ -0,0 +1,15 @@
|
||||
[[source]]
|
||||
url = "https://pypi.org/simple"
|
||||
verify_ssl = true
|
||||
name = "pypi"
|
||||
|
||||
[packages]
|
||||
pyyaml = "*"
|
||||
click = "*"
|
||||
"jinja2" = "*"
|
||||
|
||||
[dev-packages]
|
||||
pylint = "*"
|
||||
|
||||
[requires]
|
||||
python_version = "3.6"
|
192
make/photon/prepare/Pipfile.lock
generated
Normal file
192
make/photon/prepare/Pipfile.lock
generated
Normal file
@ -0,0 +1,192 @@
|
||||
{
|
||||
"_meta": {
|
||||
"hash": {
|
||||
"sha256": "8950f4066b83c5eb792e0f828de1530b2a61d19e45531660adfc8e06a02f2e71"
|
||||
},
|
||||
"pipfile-spec": 6,
|
||||
"requires": {
|
||||
"python_version": "3.6"
|
||||
},
|
||||
"sources": [
|
||||
{
|
||||
"name": "pypi",
|
||||
"url": "https://pypi.org/simple",
|
||||
"verify_ssl": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"default": {
|
||||
"click": {
|
||||
"hashes": [
|
||||
"sha256:2335065e6395b9e67ca716de5f7526736bfa6ceead690adf616d925bdc622b13",
|
||||
"sha256:5b94b49521f6456670fdb30cd82a4eca9412788a93fa6dd6df72c94d5a8ff2d7"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==7.0"
|
||||
},
|
||||
"jinja2": {
|
||||
"hashes": [
|
||||
"sha256:74c935a1b8bb9a3947c50a54766a969d4846290e1e788ea44c1392163723c3bd",
|
||||
"sha256:f84be1bb0040caca4cea721fcbbbbd61f9be9464ca236387158b0feea01914a4"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.10"
|
||||
},
|
||||
"markupsafe": {
|
||||
"hashes": [
|
||||
"sha256:048ef924c1623740e70204aa7143ec592504045ae4429b59c30054cb31e3c432",
|
||||
"sha256:130f844e7f5bdd8e9f3f42e7102ef1d49b2e6fdf0d7526df3f87281a532d8c8b",
|
||||
"sha256:19f637c2ac5ae9da8bfd98cef74d64b7e1bb8a63038a3505cd182c3fac5eb4d9",
|
||||
"sha256:1b8a7a87ad1b92bd887568ce54b23565f3fd7018c4180136e1cf412b405a47af",
|
||||
"sha256:1c25694ca680b6919de53a4bb3bdd0602beafc63ff001fea2f2fc16ec3a11834",
|
||||
"sha256:1f19ef5d3908110e1e891deefb5586aae1b49a7440db952454b4e281b41620cd",
|
||||
"sha256:1fa6058938190ebe8290e5cae6c351e14e7bb44505c4a7624555ce57fbbeba0d",
|
||||
"sha256:31cbb1359e8c25f9f48e156e59e2eaad51cd5242c05ed18a8de6dbe85184e4b7",
|
||||
"sha256:3e835d8841ae7863f64e40e19477f7eb398674da6a47f09871673742531e6f4b",
|
||||
"sha256:4e97332c9ce444b0c2c38dd22ddc61c743eb208d916e4265a2a3b575bdccb1d3",
|
||||
"sha256:525396ee324ee2da82919f2ee9c9e73b012f23e7640131dd1b53a90206a0f09c",
|
||||
"sha256:52b07fbc32032c21ad4ab060fec137b76eb804c4b9a1c7c7dc562549306afad2",
|
||||
"sha256:52ccb45e77a1085ec5461cde794e1aa037df79f473cbc69b974e73940655c8d7",
|
||||
"sha256:5c3fbebd7de20ce93103cb3183b47671f2885307df4a17a0ad56a1dd51273d36",
|
||||
"sha256:5e5851969aea17660e55f6a3be00037a25b96a9b44d2083651812c99d53b14d1",
|
||||
"sha256:5edfa27b2d3eefa2210fb2f5d539fbed81722b49f083b2c6566455eb7422fd7e",
|
||||
"sha256:7d263e5770efddf465a9e31b78362d84d015cc894ca2c131901a4445eaa61ee1",
|
||||
"sha256:83381342bfc22b3c8c06f2dd93a505413888694302de25add756254beee8449c",
|
||||
"sha256:857eebb2c1dc60e4219ec8e98dfa19553dae33608237e107db9c6078b1167856",
|
||||
"sha256:98e439297f78fca3a6169fd330fbe88d78b3bb72f967ad9961bcac0d7fdd1550",
|
||||
"sha256:bf54103892a83c64db58125b3f2a43df6d2cb2d28889f14c78519394feb41492",
|
||||
"sha256:d9ac82be533394d341b41d78aca7ed0e0f4ba5a2231602e2f05aa87f25c51672",
|
||||
"sha256:e982fe07ede9fada6ff6705af70514a52beb1b2c3d25d4e873e82114cf3c5401",
|
||||
"sha256:edce2ea7f3dfc981c4ddc97add8a61381d9642dc3273737e756517cc03e84dd6",
|
||||
"sha256:efdc45ef1afc238db84cb4963aa689c0408912a0239b0721cb172b4016eb31d6",
|
||||
"sha256:f137c02498f8b935892d5c0172560d7ab54bc45039de8805075e19079c639a9c",
|
||||
"sha256:f82e347a72f955b7017a39708a3667f106e6ad4d10b25f237396a7115d8ed5fd",
|
||||
"sha256:fb7c206e01ad85ce57feeaaa0bf784b97fa3cad0d4a5737bc5295785f5c613a1"
|
||||
],
|
||||
"version": "==1.1.0"
|
||||
},
|
||||
"pyyaml": {
|
||||
"hashes": [
|
||||
"sha256:3d7da3009c0f3e783b2c873687652d83b1bbfd5c88e9813fb7e5b03c0dd3108b",
|
||||
"sha256:3ef3092145e9b70e3ddd2c7ad59bdd0252a94dfe3949721633e41344de00a6bf",
|
||||
"sha256:40c71b8e076d0550b2e6380bada1f1cd1017b882f7e16f09a65be98e017f211a",
|
||||
"sha256:558dd60b890ba8fd982e05941927a3911dc409a63dcb8b634feaa0cda69330d3",
|
||||
"sha256:a7c28b45d9f99102fa092bb213aa12e0aaf9a6a1f5e395d36166639c1f96c3a1",
|
||||
"sha256:aa7dd4a6a427aed7df6fb7f08a580d68d9b118d90310374716ae90b710280af1",
|
||||
"sha256:bc558586e6045763782014934bfaf39d48b8ae85a2713117d16c39864085c613",
|
||||
"sha256:d46d7982b62e0729ad0175a9bc7e10a566fc07b224d2c79fafb5e032727eaa04",
|
||||
"sha256:d5eef459e30b09f5a098b9cea68bebfeb268697f78d647bd255a085371ac7f3f",
|
||||
"sha256:e01d3203230e1786cd91ccfdc8f8454c8069c91bee3962ad93b87a4b2860f537",
|
||||
"sha256:e170a9e6fcfd19021dd29845af83bb79236068bf5fd4df3327c1be18182b2531"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.13"
|
||||
}
|
||||
},
|
||||
"develop": {
|
||||
"astroid": {
|
||||
"hashes": [
|
||||
"sha256:35b032003d6a863f5dcd7ec11abd5cd5893428beaa31ab164982403bcb311f22",
|
||||
"sha256:6a5d668d7dc69110de01cdf7aeec69a679ef486862a0850cc0fd5571505b6b7e"
|
||||
],
|
||||
"version": "==2.1.0"
|
||||
},
|
||||
"isort": {
|
||||
"hashes": [
|
||||
"sha256:1153601da39a25b14ddc54955dbbacbb6b2d19135386699e2ad58517953b34af",
|
||||
"sha256:b9c40e9750f3d77e6e4d441d8b0266cf555e7cdabdcff33c4fd06366ca761ef8",
|
||||
"sha256:ec9ef8f4a9bc6f71eec99e1806bfa2de401650d996c59330782b89a5555c1497"
|
||||
],
|
||||
"version": "==4.3.4"
|
||||
},
|
||||
"lazy-object-proxy": {
|
||||
"hashes": [
|
||||
"sha256:0ce34342b419bd8f018e6666bfef729aec3edf62345a53b537a4dcc115746a33",
|
||||
"sha256:1b668120716eb7ee21d8a38815e5eb3bb8211117d9a90b0f8e21722c0758cc39",
|
||||
"sha256:209615b0fe4624d79e50220ce3310ca1a9445fd8e6d3572a896e7f9146bbf019",
|
||||
"sha256:27bf62cb2b1a2068d443ff7097ee33393f8483b570b475db8ebf7e1cba64f088",
|
||||
"sha256:27ea6fd1c02dcc78172a82fc37fcc0992a94e4cecf53cb6d73f11749825bd98b",
|
||||
"sha256:2c1b21b44ac9beb0fc848d3993924147ba45c4ebc24be19825e57aabbe74a99e",
|
||||
"sha256:2df72ab12046a3496a92476020a1a0abf78b2a7db9ff4dc2036b8dd980203ae6",
|
||||
"sha256:320ffd3de9699d3892048baee45ebfbbf9388a7d65d832d7e580243ade426d2b",
|
||||
"sha256:50e3b9a464d5d08cc5227413db0d1c4707b6172e4d4d915c1c70e4de0bbff1f5",
|
||||
"sha256:5276db7ff62bb7b52f77f1f51ed58850e315154249aceb42e7f4c611f0f847ff",
|
||||
"sha256:61a6cf00dcb1a7f0c773ed4acc509cb636af2d6337a08f362413c76b2b47a8dd",
|
||||
"sha256:6ae6c4cb59f199d8827c5a07546b2ab7e85d262acaccaacd49b62f53f7c456f7",
|
||||
"sha256:7661d401d60d8bf15bb5da39e4dd72f5d764c5aff5a86ef52a042506e3e970ff",
|
||||
"sha256:7bd527f36a605c914efca5d3d014170b2cb184723e423d26b1fb2fd9108e264d",
|
||||
"sha256:7cb54db3535c8686ea12e9535eb087d32421184eacc6939ef15ef50f83a5e7e2",
|
||||
"sha256:7f3a2d740291f7f2c111d86a1c4851b70fb000a6c8883a59660d95ad57b9df35",
|
||||
"sha256:81304b7d8e9c824d058087dcb89144842c8e0dea6d281c031f59f0acf66963d4",
|
||||
"sha256:933947e8b4fbe617a51528b09851685138b49d511af0b6c0da2539115d6d4514",
|
||||
"sha256:94223d7f060301b3a8c09c9b3bc3294b56b2188e7d8179c762a1cda72c979252",
|
||||
"sha256:ab3ca49afcb47058393b0122428358d2fbe0408cf99f1b58b295cfeb4ed39109",
|
||||
"sha256:bd6292f565ca46dee4e737ebcc20742e3b5be2b01556dafe169f6c65d088875f",
|
||||
"sha256:cb924aa3e4a3fb644d0c463cad5bc2572649a6a3f68a7f8e4fbe44aaa6d77e4c",
|
||||
"sha256:d0fc7a286feac9077ec52a927fc9fe8fe2fabab95426722be4c953c9a8bede92",
|
||||
"sha256:ddc34786490a6e4ec0a855d401034cbd1242ef186c20d79d2166d6a4bd449577",
|
||||
"sha256:e34b155e36fa9da7e1b7c738ed7767fc9491a62ec6af70fe9da4a057759edc2d",
|
||||
"sha256:e5b9e8f6bda48460b7b143c3821b21b452cb3a835e6bbd5dd33aa0c8d3f5137d",
|
||||
"sha256:e81ebf6c5ee9684be8f2c87563880f93eedd56dd2b6146d8a725b50b7e5adb0f",
|
||||
"sha256:eb91be369f945f10d3a49f5f9be8b3d0b93a4c2be8f8a5b83b0571b8123e0a7a",
|
||||
"sha256:f460d1ceb0e4a5dcb2a652db0904224f367c9b3c1470d5a7683c0480e582468b"
|
||||
],
|
||||
"version": "==1.3.1"
|
||||
},
|
||||
"mccabe": {
|
||||
"hashes": [
|
||||
"sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42",
|
||||
"sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"
|
||||
],
|
||||
"version": "==0.6.1"
|
||||
},
|
||||
"pylint": {
|
||||
"hashes": [
|
||||
"sha256:689de29ae747642ab230c6d37be2b969bf75663176658851f456619aacf27492",
|
||||
"sha256:771467c434d0d9f081741fec1d64dfb011ed26e65e12a28fe06ca2f61c4d556c"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.2.2"
|
||||
},
|
||||
"six": {
|
||||
"hashes": [
|
||||
"sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c",
|
||||
"sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73"
|
||||
],
|
||||
"version": "==1.12.0"
|
||||
},
|
||||
"typed-ast": {
|
||||
"hashes": [
|
||||
"sha256:0555eca1671ebe09eb5f2176723826f6f44cca5060502fea259de9b0e893ab53",
|
||||
"sha256:0ca96128ea66163aea13911c9b4b661cb345eb729a20be15c034271360fc7474",
|
||||
"sha256:16ccd06d614cf81b96de42a37679af12526ea25a208bce3da2d9226f44563868",
|
||||
"sha256:1e21ae7b49a3f744958ffad1737dfbdb43e1137503ccc59f4e32c4ac33b0bd1c",
|
||||
"sha256:37670c6fd857b5eb68aa5d193e14098354783b5138de482afa401cc2644f5a7f",
|
||||
"sha256:46d84c8e3806619ece595aaf4f37743083f9454c9ea68a517f1daa05126daf1d",
|
||||
"sha256:5b972bbb3819ece283a67358103cc6671da3646397b06e7acea558444daf54b2",
|
||||
"sha256:6306ffa64922a7b58ee2e8d6f207813460ca5a90213b4a400c2e730375049246",
|
||||
"sha256:6cb25dc95078931ecbd6cbcc4178d1b8ae8f2b513ae9c3bd0b7f81c2191db4c6",
|
||||
"sha256:7e19d439fee23620dea6468d85bfe529b873dace39b7e5b0c82c7099681f8a22",
|
||||
"sha256:7f5cd83af6b3ca9757e1127d852f497d11c7b09b4716c355acfbebf783d028da",
|
||||
"sha256:81e885a713e06faeef37223a5b1167615db87f947ecc73f815b9d1bbd6b585be",
|
||||
"sha256:94af325c9fe354019a29f9016277c547ad5d8a2d98a02806f27a7436b2da6735",
|
||||
"sha256:b1e5445c6075f509d5764b84ce641a1535748801253b97f3b7ea9d948a22853a",
|
||||
"sha256:cb061a959fec9a514d243831c514b51ccb940b58a5ce572a4e209810f2507dcf",
|
||||
"sha256:cc8d0b703d573cbabe0d51c9d68ab68df42a81409e4ed6af45a04a95484b96a5",
|
||||
"sha256:da0afa955865920edb146926455ec49da20965389982f91e926389666f5cf86a",
|
||||
"sha256:dc76738331d61818ce0b90647aedde17bbba3d3f9e969d83c1d9087b4f978862",
|
||||
"sha256:e7ec9a1445d27dbd0446568035f7106fa899a36f55e52ade28020f7b3845180d",
|
||||
"sha256:f741ba03feb480061ab91a465d1a3ed2d40b52822ada5b4017770dfcb88f839f",
|
||||
"sha256:fe800a58547dd424cd286b7270b967b5b3316b993d86453ede184a17b5a6b17d"
|
||||
],
|
||||
"markers": "python_version < '3.7' and implementation_name == 'cpython'",
|
||||
"version": "==1.1.1"
|
||||
},
|
||||
"wrapt": {
|
||||
"hashes": [
|
||||
"sha256:e03f19f64d81d0a3099518ca26b04550026f131eced2e76ced7b85c6b8d32128"
|
||||
],
|
||||
"version": "==1.11.0"
|
||||
}
|
||||
}
|
||||
}
|
29
make/photon/prepare/g.py
Normal file
29
make/photon/prepare/g.py
Normal file
@ -0,0 +1,29 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
## Const
|
||||
DEFAULT_UID = 10000
|
||||
DEFAULT_GID = 10000
|
||||
|
||||
## Global variable
|
||||
base_dir = '/harbor_make'
|
||||
templates_dir = "/usr/src/app/templates"
|
||||
config_dir = '/config'
|
||||
|
||||
secret_dir = '/secret'
|
||||
secret_key_dir='/secret/keys'
|
||||
|
||||
old_private_key_pem_path = Path('/config/core/private_key.pem')
|
||||
old_crt_path = Path('/config/registry/root.crt')
|
||||
|
||||
private_key_pem_path = Path('/secret/core/private_key.pem')
|
||||
root_crt_path = Path('/secret/registry/root.crt')
|
||||
|
||||
config_file_path = '/compose_location/harbor.yml'
|
||||
versions_file_path = Path('/usr/src/app/versions')
|
||||
|
||||
cert_dir = os.path.join(config_dir, "nginx", "cert")
|
||||
core_cert_dir = os.path.join(config_dir, "core", "certificates")
|
||||
|
||||
registry_custom_ca_bundle_storage_path = Path('/secret/common/custom-ca-bundle.crt')
|
||||
registry_custom_ca_bundle_storage_input_path = Path('/input/common/custom-ca-bundle.crt')
|
70
make/photon/prepare/main.py
Normal file
70
make/photon/prepare/main.py
Normal file
@ -0,0 +1,70 @@
|
||||
# pylint: disable=no-value-for-parameter
|
||||
|
||||
import click
|
||||
|
||||
from utils.misc import delfile
|
||||
from utils.configs import validate, parse_yaml_config
|
||||
from utils.cert import prepare_ca, SSL_CERT_KEY_PATH, SSL_CERT_PATH, get_secret_key, copy_ssl_cert, copy_secret_keys
|
||||
from utils.db import prepare_db
|
||||
from utils.jobservice import prepare_job_service
|
||||
from utils.registry import prepare_registry
|
||||
from utils.registry_ctl import prepare_registry_ctl
|
||||
from utils.core import prepare_core
|
||||
from utils.notary import prepare_notary
|
||||
from utils.log import prepare_log_configs
|
||||
from utils.clair import prepare_clair
|
||||
from utils.chart import prepare_chartmuseum
|
||||
from utils.docker_compose import prepare_docker_compose
|
||||
from utils.nginx import prepare_nginx, nginx_confd_dir
|
||||
from g import (config_dir, config_file_path, private_key_pem_path, root_crt_path,
|
||||
registry_custom_ca_bundle_storage_path, registry_custom_ca_bundle_storage_input_path, secret_key_dir,
|
||||
old_private_key_pem_path, old_crt_path)
|
||||
|
||||
# Main function
|
||||
@click.command()
|
||||
@click.option('--conf', default=config_file_path, help="the path of Harbor configuration file")
|
||||
@click.option('--with-notary', is_flag=True, help="the Harbor instance is to be deployed with notary")
|
||||
@click.option('--with-clair', is_flag=True, help="the Harbor instance is to be deployed with clair")
|
||||
@click.option('--with-chartmuseum', is_flag=True, help="the Harbor instance is to be deployed with chart repository supporting")
|
||||
def main(conf, with_notary, with_clair, with_chartmuseum):
|
||||
|
||||
delfile(config_dir)
|
||||
config_dict = parse_yaml_config(conf)
|
||||
validate(config_dict, notary_mode=with_notary)
|
||||
|
||||
prepare_log_configs(config_dict)
|
||||
prepare_nginx(config_dict)
|
||||
prepare_core(config_dict, with_notary=with_notary, with_clair=with_clair, with_chartmuseum=with_chartmuseum)
|
||||
prepare_registry(config_dict)
|
||||
prepare_registry_ctl(config_dict)
|
||||
prepare_db(config_dict)
|
||||
prepare_job_service(config_dict)
|
||||
|
||||
copy_secret_keys()
|
||||
get_secret_key(secret_key_dir)
|
||||
|
||||
if config_dict['protocol'] == 'https':
|
||||
copy_ssl_cert()
|
||||
|
||||
# If Customized cert enabled
|
||||
prepare_ca(
|
||||
private_key_pem_path=private_key_pem_path,
|
||||
root_crt_path=root_crt_path,
|
||||
old_private_key_pem_path=old_private_key_pem_path,
|
||||
old_crt_path=old_crt_path,
|
||||
registry_custom_ca_bundle_config=registry_custom_ca_bundle_storage_input_path,
|
||||
registry_custom_ca_bundle_storage_path=registry_custom_ca_bundle_storage_path)
|
||||
|
||||
if with_notary:
|
||||
prepare_notary(config_dict, nginx_confd_dir, SSL_CERT_PATH, SSL_CERT_KEY_PATH)
|
||||
|
||||
if with_clair:
|
||||
prepare_clair(config_dict)
|
||||
|
||||
if with_chartmuseum:
|
||||
prepare_chartmuseum(config_dict)
|
||||
|
||||
prepare_docker_compose(config_dict, with_clair, with_notary, with_chartmuseum)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -2,24 +2,24 @@
|
||||
PORT=9999
|
||||
|
||||
# Only support redis now. If redis is setup, then enable cache
|
||||
CACHE=$cache_store
|
||||
CACHE_REDIS_ADDR=$cache_redis_addr
|
||||
CACHE_REDIS_PASSWORD=$cache_redis_password
|
||||
CACHE_REDIS_DB=$cache_redis_db_index
|
||||
CACHE={{cache_store}}
|
||||
CACHE_REDIS_ADDR={{cache_redis_addr}}
|
||||
CACHE_REDIS_PASSWORD={{cache_redis_password}}
|
||||
CACHE_REDIS_DB={{cache_redis_db_index}}
|
||||
|
||||
# Credential for internal communication
|
||||
BASIC_AUTH_USER=chart_controller
|
||||
BASIC_AUTH_PASS=$core_secret
|
||||
BASIC_AUTH_PASS={{core_secret}}
|
||||
|
||||
# Multiple tenants
|
||||
# Must be set with 1 to support project namespace
|
||||
DEPTH=1
|
||||
|
||||
# Backend storage driver: e.g. "local", "amazon", "google" etc.
|
||||
STORAGE=$storage_driver
|
||||
STORAGE={{storage_driver}}
|
||||
|
||||
# Storage driver settings
|
||||
$all_storage_driver_configs
|
||||
{{all_storage_driver_configs}}
|
||||
|
||||
## Settings with default values. Just put here for future changes
|
||||
DEBUG=false
|
3
make/photon/prepare/templates/clair/clair_env.jinja
Normal file
3
make/photon/prepare/templates/clair/clair_env.jinja
Normal file
@ -0,0 +1,3 @@
|
||||
http_proxy={{clair_http_proxy}}
|
||||
https_proxy={{clair_https_proxy}}
|
||||
no_proxy={{clair_no_proxy}}
|
@ -2,7 +2,7 @@ clair:
|
||||
database:
|
||||
type: pgsql
|
||||
options:
|
||||
source: postgresql://$username:$password@$host:$port/$dbname?sslmode=disable
|
||||
source: postgresql://{{username}}:{{password}}@{{host}}:{{port}}/{{dbname}}?sslmode=disable
|
||||
|
||||
# Number of elements kept in the cache
|
||||
# Values unlikely to change (e.g. namespaces) are cached in order to save prevent needless roundtrips to the database.
|
||||
@ -16,7 +16,7 @@ clair:
|
||||
# Deadline before an API request will respond with a 503
|
||||
timeout: 300s
|
||||
updater:
|
||||
interval: ${interval}h
|
||||
interval: {{interval}}h
|
||||
|
||||
notifier:
|
||||
attempts: 3
|
1
make/photon/prepare/templates/clair/postgres_env.jinja
Normal file
1
make/photon/prepare/templates/clair/postgres_env.jinja
Normal file
@ -0,0 +1 @@
|
||||
POSTGRES_PASSWORD={{password}}
|
67
make/photon/prepare/templates/core/config_env.jinja
Normal file
67
make/photon/prepare/templates/core/config_env.jinja
Normal file
@ -0,0 +1,67 @@
|
||||
PORT=8080
|
||||
LOG_LEVEL=info
|
||||
EXT_ENDPOINT={{public_url}}
|
||||
SELF_REGISTRATION={{self_registration}}
|
||||
LDAP_URL={{ldap_url}}
|
||||
LDAP_SEARCH_DN={{ldap_searchdn}}
|
||||
LDAP_SEARCH_PWD={{ldap_search_pwd}}
|
||||
LDAP_BASE_DN={{ldap_basedn}}
|
||||
LDAP_FILTER={{ldap_filter}}
|
||||
LDAP_UID={{ldap_uid}}
|
||||
LDAP_SCOPE={{ldap_scope}}
|
||||
LDAP_TIMEOUT={{ldap_timeout}}
|
||||
LDAP_VERIFY_CERT={{ldap_verify_cert}}
|
||||
DATABASE_TYPE=postgresql
|
||||
POSTGRESQL_HOST={{db_host}}
|
||||
POSTGRESQL_PORT={{db_port}}
|
||||
POSTGRESQL_USERNAME={{db_user}}
|
||||
POSTGRESQL_PASSWORD={{db_password}}
|
||||
POSTGRESQL_DATABASE=registry
|
||||
POSTGRESQL_SSLMODE=disable
|
||||
LDAP_GROUP_BASEDN={{ldap_group_basedn}}
|
||||
LDAP_GROUP_FILTER={{ldap_group_filter}}
|
||||
LDAP_GROUP_GID={{ldap_group_gid}}
|
||||
LDAP_GROUP_SCOPE={{ldap_group_scope}}
|
||||
REGISTRY_URL={{registry_url}}
|
||||
TOKEN_SERVICE_URL={{token_service_url}}
|
||||
EMAIL_HOST={{email_host}}
|
||||
EMAIL_PORT={{email_port}}
|
||||
EMAIL_USR={{email_usr}}
|
||||
EMAIL_PWD={{email_pwd}}
|
||||
EMAIL_SSL={{email_ssl}}
|
||||
EMAIL_FROM={{email_from}}
|
||||
EMAIL_IDENTITY={{email_identity}}
|
||||
EMAIL_INSECURE={{email_insecure}}
|
||||
HARBOR_ADMIN_PASSWORD={{harbor_admin_password}}
|
||||
PROJECT_CREATION_RESTRICTION={{project_creation_restriction}}
|
||||
MAX_JOB_WORKERS={{max_job_workers}}
|
||||
CORE_SECRET={{core_secret}}
|
||||
JOBSERVICE_SECRET={{jobservice_secret}}
|
||||
TOKEN_EXPIRATION={{token_expiration}}
|
||||
CFG_EXPIRATION=5
|
||||
ADMIRAL_URL={{admiral_url}}
|
||||
WITH_NOTARY={{with_notary}}
|
||||
WITH_CLAIR={{with_clair}}
|
||||
CLAIR_DB_PASSWORD={{clair_db_password}}
|
||||
CLAIR_DB_HOST={{clair_db_host}}
|
||||
CLAIR_DB_PORT={{clair_db_port}}
|
||||
CLAIR_DB_USERNAME={{clair_db_username}}
|
||||
CLAIR_DB={{clair_db}}
|
||||
CLAIR_DB_SSLMODE=disable
|
||||
RESET={{reload_config}}
|
||||
UAA_ENDPOINT={{uaa_endpoint}}
|
||||
UAA_CLIENTID={{uaa_clientid}}
|
||||
UAA_CLIENTSECRET={{uaa_clientsecret}}
|
||||
UAA_VERIFY_CERT={{uaa_verify_cert}}
|
||||
CORE_URL={{core_url}}
|
||||
JOBSERVICE_URL={{jobservice_url}}
|
||||
CLAIR_URL={{clair_url}}
|
||||
NOTARY_URL={{notary_url}}
|
||||
REGISTRY_STORAGE_PROVIDER_NAME={{storage_provider_name}}
|
||||
READ_ONLY=false
|
||||
SKIP_RELOAD_ENV_PATTERN={{skip_reload_env_pattern}}
|
||||
RELOAD_KEY={{reload_key}}
|
||||
CHART_REPOSITORY_URL={{chart_repository_url}}
|
||||
LDAP_GROUP_ADMIN_DN={{ldap_group_admin_dn}}
|
||||
REGISTRY_CONTROLLER_URL={{registry_controller_url}}
|
||||
WITH_CHARTMUSEUM={{with_chartmuseum}}
|
9
make/photon/prepare/templates/core/env.jinja
Normal file
9
make/photon/prepare/templates/core/env.jinja
Normal file
@ -0,0 +1,9 @@
|
||||
LOG_LEVEL=info
|
||||
CONFIG_PATH=/etc/core/app.conf
|
||||
CORE_SECRET={{core_secret}}
|
||||
JOBSERVICE_SECRET={{jobservice_secret}}
|
||||
UAA_CA_ROOT=/etc/core/certificates/uaa_ca.pem
|
||||
_REDIS_URL={{redis_host}}:{{redis_port}},100,{{redis_password}}
|
||||
SYNC_REGISTRY=false
|
||||
CHART_CACHE_DRIVER={{chart_cache_driver}}
|
||||
_REDIS_URL_REG={{redis_url_reg}}
|
1
make/photon/prepare/templates/db/env.jinja
Normal file
1
make/photon/prepare/templates/db/env.jinja
Normal file
@ -0,0 +1 @@
|
||||
POSTGRES_PASSWORD={{db_password}}
|
@ -0,0 +1,398 @@
|
||||
version: '2'
|
||||
services:
|
||||
log:
|
||||
image: goharbor/harbor-log:{{version}}
|
||||
container_name: harbor-log
|
||||
restart: always
|
||||
dns_search: .
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- CHOWN
|
||||
- DAC_OVERRIDE
|
||||
- SETGID
|
||||
- SETUID
|
||||
volumes:
|
||||
- {{log_location}}/:/var/log/docker/:z
|
||||
- ./common/config/log/:/etc/logrotate.d/:z
|
||||
ports:
|
||||
- 127.0.0.1:1514:10514
|
||||
networks:
|
||||
- harbor
|
||||
registry:
|
||||
image: goharbor/registry-photon:{{reg_version}}
|
||||
container_name: registry
|
||||
restart: always
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- CHOWN
|
||||
- SETGID
|
||||
- SETUID
|
||||
volumes:
|
||||
- {{data_volume}}/registry:/storage:z
|
||||
- ./common/config/registry/:/etc/registry/:z
|
||||
- {{data_volume}}/secret/registry/root.crt:/etc/registry/root.crt:z
|
||||
{%if registry_custom_ca_bundle_storage_path %}
|
||||
- {{data_volume}}/secret/common/custom-ca-bundle.crt:/harbor_cust_cert/custom-ca-bundle.crt:z
|
||||
{% endif %}
|
||||
networks:
|
||||
- harbor
|
||||
{% if with_clair %}
|
||||
- harbor-clair
|
||||
{% endif %}
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- log
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "registry"
|
||||
registryctl:
|
||||
image: goharbor/harbor-registryctl:{{version}}
|
||||
container_name: registryctl
|
||||
env_file:
|
||||
- ./common/config/registryctl/env
|
||||
restart: always
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- CHOWN
|
||||
- SETGID
|
||||
- SETUID
|
||||
volumes:
|
||||
- {{data_volume}}/registry:/storage:z
|
||||
- ./common/config/registry/:/etc/registry/:z
|
||||
- ./common/config/registryctl/config.yml:/etc/registryctl/config.yml:z
|
||||
networks:
|
||||
- harbor
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- log
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "registryctl"
|
||||
postgresql:
|
||||
image: goharbor/harbor-db:{{version}}
|
||||
container_name: harbor-db
|
||||
restart: always
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- CHOWN
|
||||
- DAC_OVERRIDE
|
||||
- SETGID
|
||||
- SETUID
|
||||
volumes:
|
||||
- {{data_volume}}/database:/var/lib/postgresql/data:z
|
||||
networks:
|
||||
harbor:
|
||||
{% if with_notary %}
|
||||
harbor-notary:
|
||||
aliases:
|
||||
- harbor-db
|
||||
{% endif %}
|
||||
{% if with_clair %}
|
||||
harbor-clair:
|
||||
aliases:
|
||||
- harbor-db
|
||||
{% endif %}
|
||||
dns_search: .
|
||||
env_file:
|
||||
- ./common/config/db/env
|
||||
depends_on:
|
||||
- log
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "postgresql"
|
||||
core:
|
||||
image: goharbor/harbor-core:{{version}}
|
||||
container_name: harbor-core
|
||||
env_file:
|
||||
- ./common/config/core/env
|
||||
- ./common/config/core/config_env
|
||||
restart: always
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- SETGID
|
||||
- SETUID
|
||||
volumes:
|
||||
- ./common/config/core/app.conf:/etc/core/app.conf:z
|
||||
- ./common/config/core/certificates/:/etc/core/certificates/:z
|
||||
- {{data_volume}}/secret/core/private_key.pem:/etc/core/private_key.pem:z
|
||||
- {{data_volume}}/secret/keys/secretkey:/etc/core/key:z
|
||||
- {{data_volume}}/ca_download/:/etc/core/ca/:z
|
||||
- {{data_volume}}/psc/:/etc/core/token/:z
|
||||
- {{data_volume}}/:/data/:z
|
||||
networks:
|
||||
harbor:
|
||||
{% if with_notary %}
|
||||
harbor-notary:
|
||||
{% endif %}
|
||||
{% if with_clair %}
|
||||
harbor-clair:
|
||||
aliases:
|
||||
- harbor-core
|
||||
{% endif %}
|
||||
{% if with_chartmuseum %}
|
||||
harbor-chartmuseum:
|
||||
aliases:
|
||||
- harbor-core
|
||||
{% endif %}
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- log
|
||||
- registry
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "core"
|
||||
portal:
|
||||
image: goharbor/harbor-portal:{{version}}
|
||||
container_name: harbor-portal
|
||||
restart: always
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- CHOWN
|
||||
- SETGID
|
||||
- SETUID
|
||||
- NET_BIND_SERVICE
|
||||
networks:
|
||||
- harbor
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- log
|
||||
- core
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "portal"
|
||||
|
||||
jobservice:
|
||||
image: goharbor/harbor-jobservice:{{version}}
|
||||
container_name: harbor-jobservice
|
||||
env_file:
|
||||
- ./common/config/jobservice/env
|
||||
restart: always
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- CHOWN
|
||||
- SETGID
|
||||
- SETUID
|
||||
volumes:
|
||||
- {{data_volume}}/job_logs:/var/log/jobs:z
|
||||
- ./common/config/jobservice/config.yml:/etc/jobservice/config.yml:z
|
||||
networks:
|
||||
- harbor
|
||||
{% if with_clair %}
|
||||
- harbor-clair
|
||||
{% endif %}
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- redis
|
||||
- core
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "jobservice"
|
||||
redis:
|
||||
image: goharbor/redis-photon:{{redis_version}}
|
||||
container_name: redis
|
||||
restart: always
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- CHOWN
|
||||
- SETGID
|
||||
- SETUID
|
||||
volumes:
|
||||
- {{data_volume}}/redis:/var/lib/redis
|
||||
networks:
|
||||
harbor:
|
||||
{% if with_chartmuseum %}
|
||||
harbor-chartmuseum:
|
||||
aliases:
|
||||
- redis
|
||||
{% endif %}
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- log
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "redis"
|
||||
proxy:
|
||||
image: goharbor/nginx-photon:{{redis_version}}
|
||||
container_name: nginx
|
||||
restart: always
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- CHOWN
|
||||
- SETGID
|
||||
- SETUID
|
||||
- NET_BIND_SERVICE
|
||||
volumes:
|
||||
- ./common/config/nginx:/etc/nginx:z
|
||||
{% if protocol == 'https' %}
|
||||
- {{data_volume}}/secret/nginx/server.key:/etc/nginx/cert/server.key
|
||||
- {{data_volume}}/secret/nginx/server.crt:/etc/nginx/cert/server.crt
|
||||
{% endif %}
|
||||
networks:
|
||||
- harbor
|
||||
{% if with_notary %}
|
||||
- harbor-notary
|
||||
{% endif %}
|
||||
dns_search: .
|
||||
ports:
|
||||
- 80:80
|
||||
- 443:443
|
||||
- 4443:4443
|
||||
depends_on:
|
||||
- postgresql
|
||||
- registry
|
||||
- core
|
||||
- portal
|
||||
- log
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "proxy"
|
||||
{% if with_notary %}
|
||||
notary-server:
|
||||
image: goharbor/notary-server-photon:{{notary_version}}
|
||||
container_name: notary-server
|
||||
restart: always
|
||||
networks:
|
||||
- notary-sig
|
||||
- harbor-notary
|
||||
dns_search: .
|
||||
volumes:
|
||||
- ./common/config/notary:/etc/notary:z
|
||||
- {{data_volume}}/secret/notary/notary-signer-ca.crt:/etc/notary/notary-signer-ca.crt:z
|
||||
- {{data_volume}}/secret/registry/root.crt:/etc/notary/root.crt:z
|
||||
env_file:
|
||||
- ./common/config/notary/server_env
|
||||
depends_on:
|
||||
- postgresql
|
||||
- notary-signer
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "notary-server"
|
||||
notary-signer:
|
||||
image: goharbor/notary-signer-photon:{{notary_version}}
|
||||
container_name: notary-signer
|
||||
restart: always
|
||||
networks:
|
||||
harbor-notary:
|
||||
notary-sig:
|
||||
aliases:
|
||||
- notarysigner
|
||||
dns_search: .
|
||||
volumes:
|
||||
- ./common/config/notary:/etc/notary:z
|
||||
- {{data_volume}}/secret/notary/notary-signer.crt:/etc/notary/notary-signer.crt:z
|
||||
- {{data_volume}}/secret/notary/notary-signer.key:/etc/notary/notary-signer.key:z
|
||||
env_file:
|
||||
- ./common/config/notary/signer_env
|
||||
depends_on:
|
||||
- postgresql
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "notary-signer"
|
||||
{% endif %}
|
||||
{% if with_clair %}
|
||||
clair:
|
||||
networks:
|
||||
- harbor-clair
|
||||
container_name: clair
|
||||
image: goharbor/clair-photon:{{clair_version}}
|
||||
restart: always
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- DAC_OVERRIDE
|
||||
- SETGID
|
||||
- SETUID
|
||||
cpu_quota: 50000
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- postgresql
|
||||
volumes:
|
||||
- ./common/config/clair/config.yaml:/etc/clair/config.yaml:z
|
||||
{%if registry_custom_ca_bundle_storage_path %}
|
||||
- {{data_volume}}/secret/common/custom-ca-bundle.crt:/harbor_cust_cert/custom-ca-bundle.crt:z
|
||||
{% endif %}
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "clair"
|
||||
env_file:
|
||||
./common/config/clair/clair_env
|
||||
{% endif %}
|
||||
{% if with_chartmuseum %}
|
||||
chartmuseum:
|
||||
container_name: chartmuseum
|
||||
image: goharbor/chartmuseum-photon:{{chartmuseum_version}}
|
||||
restart: always
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- CHOWN
|
||||
- DAC_OVERRIDE
|
||||
- SETGID
|
||||
- SETUID
|
||||
networks:
|
||||
- harbor-chartmuseum
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- redis
|
||||
volumes:
|
||||
- {{data_volume}}/chart_storage:/chart_storage:z
|
||||
- ./common/config/chartserver:/etc/chartserver:z
|
||||
{%if registry_custom_ca_bundle_storage_path %}
|
||||
- {{data_volume}}/secret/common/custom-ca-bundle.crt:/harbor_cust_cert/custom-ca-bundle.crt:z
|
||||
{% endif %}
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "chartmuseum"
|
||||
env_file:
|
||||
./common/config/chartserver/env
|
||||
{% endif %}
|
||||
networks:
|
||||
harbor:
|
||||
external: false
|
||||
{% if with_notary %}
|
||||
harbor-notary:
|
||||
external: false
|
||||
notary-sig:
|
||||
external: false
|
||||
{% endif %}
|
||||
{% if with_clair %}
|
||||
harbor-clair:
|
||||
external: false
|
||||
{% endif %}
|
||||
{% if with_chartmuseum %}
|
||||
harbor-chartmuseum:
|
||||
external: false
|
||||
{% endif %}
|
@ -13,12 +13,12 @@ port: 8080
|
||||
#Worker pool
|
||||
worker_pool:
|
||||
#Worker concurrency
|
||||
workers: $max_job_workers
|
||||
workers: {{max_job_workers}}
|
||||
backend: "redis"
|
||||
#Additional config if use 'redis' backend
|
||||
redis_pool:
|
||||
#redis://[arbitrary_username:password@]ipaddress:port/database_index
|
||||
redis_url: $redis_url
|
||||
redis_url: {{redis_url}}
|
||||
namespace: "harbor_job_service_namespace"
|
||||
#Loggers for the running job
|
||||
job_loggers:
|
3
make/photon/prepare/templates/jobservice/env.jinja
Normal file
3
make/photon/prepare/templates/jobservice/env.jinja
Normal file
@ -0,0 +1,3 @@
|
||||
CORE_SECRET={{core_secret}}
|
||||
JOBSERVICE_SECRET={{jobservice_secret}}
|
||||
CORE_URL={{core_url}}
|
@ -1,6 +1,6 @@
|
||||
/var/log/docker/*.log {
|
||||
rotate $log_rotate_count
|
||||
size $log_rotate_size
|
||||
rotate {{log_rotate_count}}
|
||||
size {{log_rotate_size}}
|
||||
copytruncate
|
||||
compress
|
||||
missingok
|
@ -20,10 +20,10 @@ http {
|
||||
server portal:80;
|
||||
}
|
||||
|
||||
log_format timed_combined '$$remote_addr - '
|
||||
'"$$request" $$status $$body_bytes_sent '
|
||||
'"$$http_referer" "$$http_user_agent" '
|
||||
'$$request_time $$upstream_response_time $$pipe';
|
||||
log_format timed_combined '$remote_addr - '
|
||||
'"$request" $status $body_bytes_sent '
|
||||
'"$http_referer" "$http_user_agent" '
|
||||
'$request_time $upstream_response_time $pipe';
|
||||
|
||||
access_log /dev/stdout timed_combined;
|
||||
|
||||
@ -38,12 +38,12 @@ http {
|
||||
|
||||
location / {
|
||||
proxy_pass http://portal/;
|
||||
proxy_set_header Host $$host;
|
||||
proxy_set_header X-Real-IP $$remote_addr;
|
||||
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $$scheme;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
@ -51,12 +51,12 @@ http {
|
||||
|
||||
location /c/ {
|
||||
proxy_pass http://core/c/;
|
||||
proxy_set_header Host $$host;
|
||||
proxy_set_header X-Real-IP $$remote_addr;
|
||||
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $$scheme;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
@ -64,12 +64,12 @@ http {
|
||||
|
||||
location /api/ {
|
||||
proxy_pass http://core/api/;
|
||||
proxy_set_header Host $$host;
|
||||
proxy_set_header X-Real-IP $$remote_addr;
|
||||
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $$scheme;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
@ -77,12 +77,12 @@ http {
|
||||
|
||||
location /chartrepo/ {
|
||||
proxy_pass http://core/chartrepo/;
|
||||
proxy_set_header Host $$host;
|
||||
proxy_set_header X-Real-IP $$remote_addr;
|
||||
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $$scheme;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
@ -94,24 +94,24 @@ http {
|
||||
|
||||
location /v2/ {
|
||||
proxy_pass http://core/v2/;
|
||||
proxy_set_header Host $$http_host;
|
||||
proxy_set_header X-Real-IP $$remote_addr;
|
||||
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $$scheme;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
}
|
||||
|
||||
location /service/ {
|
||||
proxy_pass http://core/service/;
|
||||
proxy_set_header Host $$host;
|
||||
proxy_set_header X-Real-IP $$remote_addr;
|
||||
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $$scheme;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
@ -21,10 +21,10 @@ http {
|
||||
server portal:80;
|
||||
}
|
||||
|
||||
log_format timed_combined '$$remote_addr - '
|
||||
'"$$request" $$status $$body_bytes_sent '
|
||||
'"$$http_referer" "$$http_user_agent" '
|
||||
'$$request_time $$upstream_response_time $$pipe';
|
||||
log_format timed_combined '$remote_addr - '
|
||||
'"$request" $status $body_bytes_sent '
|
||||
'"$http_referer" "$http_user_agent" '
|
||||
'$request_time $upstream_response_time $pipe';
|
||||
|
||||
access_log /dev/stdout timed_combined;
|
||||
|
||||
@ -35,8 +35,8 @@ http {
|
||||
# server_name harbordomain.com;
|
||||
server_tokens off;
|
||||
# SSL
|
||||
ssl_certificate $ssl_cert;
|
||||
ssl_certificate_key $ssl_cert_key;
|
||||
ssl_certificate {{ssl_cert}};
|
||||
ssl_certificate_key {{ssl_cert_key}};
|
||||
|
||||
# Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
|
||||
ssl_protocols TLSv1.1 TLSv1.2;
|
||||
@ -55,12 +55,12 @@ http {
|
||||
|
||||
location / {
|
||||
proxy_pass http://portal/;
|
||||
proxy_set_header Host $$http_host;
|
||||
proxy_set_header X-Real-IP $$remote_addr;
|
||||
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $$scheme;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# Add Secure flag when serving HTTPS
|
||||
proxy_cookie_path / "/; secure";
|
||||
@ -71,12 +71,12 @@ http {
|
||||
|
||||
location /c/ {
|
||||
proxy_pass http://core/c/;
|
||||
proxy_set_header Host $$host;
|
||||
proxy_set_header X-Real-IP $$remote_addr;
|
||||
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $$scheme;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
@ -84,12 +84,12 @@ http {
|
||||
|
||||
location /api/ {
|
||||
proxy_pass http://core/api/;
|
||||
proxy_set_header Host $$host;
|
||||
proxy_set_header X-Real-IP $$remote_addr;
|
||||
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $$scheme;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
@ -97,12 +97,12 @@ http {
|
||||
|
||||
location /chartrepo/ {
|
||||
proxy_pass http://core/chartrepo/;
|
||||
proxy_set_header Host $$host;
|
||||
proxy_set_header X-Real-IP $$remote_addr;
|
||||
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $$scheme;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
@ -114,24 +114,24 @@ http {
|
||||
|
||||
location /v2/ {
|
||||
proxy_pass http://core/v2/;
|
||||
proxy_set_header Host $$http_host;
|
||||
proxy_set_header X-Real-IP $$remote_addr;
|
||||
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $$scheme;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
}
|
||||
|
||||
location /service/ {
|
||||
proxy_pass http://core/service/;
|
||||
proxy_set_header Host $$http_host;
|
||||
proxy_set_header X-Real-IP $$remote_addr;
|
||||
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $$scheme;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
@ -144,6 +144,6 @@ http {
|
||||
server {
|
||||
listen 80;
|
||||
#server_name harbordomain.com;
|
||||
return 308 https://$$host$$request_uri;
|
||||
return 308 https://$host$request_uri;
|
||||
}
|
||||
}
|
@ -2,8 +2,8 @@
|
||||
listen 4443 ssl;
|
||||
server_tokens off;
|
||||
# ssl
|
||||
ssl_certificate $ssl_cert;
|
||||
ssl_certificate_key $ssl_cert_key;
|
||||
ssl_certificate {{ssl_cert}};
|
||||
ssl_certificate_key {{ssl_cert_key}};
|
||||
|
||||
# recommendations from https://raymii.org/s/tutorials/strong_ssl_security_on_nginx.html
|
||||
ssl_protocols tlsv1.1 tlsv1.2;
|
||||
@ -19,12 +19,12 @@
|
||||
|
||||
location /v2/ {
|
||||
proxy_pass http://notary-server/v2/;
|
||||
proxy_set_header Host $$http_host;
|
||||
proxy_set_header X-Real-IP $$remote_addr;
|
||||
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $$scheme;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
@ -1,4 +1,3 @@
|
||||
|
||||
upstream notary-server {
|
||||
server notary-server:4443;
|
||||
}
|
||||
}
|
@ -19,7 +19,7 @@
|
||||
"auth": {
|
||||
"type": "token",
|
||||
"options": {
|
||||
"realm": "$token_endpoint/service/token",
|
||||
"realm": "{{token_endpoint}}/service/token",
|
||||
"service": "harbor-notary",
|
||||
"issuer": "harbor-token-issuer",
|
||||
"rootcertbundle": "/etc/notary/root.crt"
|
@ -1,3 +1,3 @@
|
||||
NOTARY_SIGNER_DEFAULTALIAS=$alias
|
||||
NOTARY_SIGNER_DEFAULTALIAS={{alias}}
|
||||
MIGRATIONS_PATH=migrations/signer/postgresql
|
||||
DB_URL=postgres://signer:password@postgresql:5432/notarysigner?sslmode=disable
|
@ -6,16 +6,16 @@ log:
|
||||
storage:
|
||||
cache:
|
||||
layerinfo: redis
|
||||
$storage_provider_info
|
||||
{{storage_provider_info}}
|
||||
maintenance:
|
||||
uploadpurging:
|
||||
enabled: false
|
||||
delete:
|
||||
enabled: true
|
||||
redis:
|
||||
addr: $redis_host:$redis_port
|
||||
password: $redis_password
|
||||
db: $redis_db_index_reg
|
||||
addr: {{redis_host}}:{{redis_port}}
|
||||
password: {{redis_password}}
|
||||
db: {{redis_db_index_reg}}
|
||||
http:
|
||||
addr: :5000
|
||||
secret: placeholder
|
||||
@ -24,7 +24,7 @@ http:
|
||||
auth:
|
||||
token:
|
||||
issuer: harbor-token-issuer
|
||||
realm: $public_url/service/token
|
||||
realm: {{public_url}}/service/token
|
||||
rootcertbundle: /etc/registry/root.crt
|
||||
service: harbor-registry
|
||||
validation:
|
||||
@ -33,7 +33,7 @@ notifications:
|
||||
endpoints:
|
||||
- name: harbor
|
||||
disabled: false
|
||||
url: $core_url/service/notifications
|
||||
url: {{core_url}}/service/notifications
|
||||
timeout: 3000ms
|
||||
threshold: 5
|
||||
backoff: 1s
|
3
make/photon/prepare/templates/registryctl/env.jinja
Normal file
3
make/photon/prepare/templates/registryctl/env.jinja
Normal file
@ -0,0 +1,3 @@
|
||||
CORE_SECRET={{core_secret}}
|
||||
JOBSERVICE_SECRET={{jobservice_secret}}
|
||||
|
0
make/photon/prepare/utils/__init__.py
Normal file
0
make/photon/prepare/utils/__init__.py
Normal file
142
make/photon/prepare/utils/cert.py
Normal file
142
make/photon/prepare/utils/cert.py
Normal file
@ -0,0 +1,142 @@
|
||||
# Get or generate private key
|
||||
import os, sys, subprocess, shutil
|
||||
from pathlib import Path
|
||||
from subprocess import DEVNULL
|
||||
from functools import wraps
|
||||
|
||||
from .misc import mark_file
|
||||
from .misc import generate_random_string
|
||||
|
||||
SSL_CERT_PATH = os.path.join("/etc/nginx/cert", "server.crt")
|
||||
SSL_CERT_KEY_PATH = os.path.join("/etc/nginx/cert", "server.key")
|
||||
|
||||
input_cert = '/input/nginx/server.crt'
|
||||
input_cert_key = '/input/nginx/server.key'
|
||||
|
||||
secret_cert_dir = '/secret/nginx'
|
||||
secret_cert = '/secret/nginx/server.crt'
|
||||
secret_cert_key = '/secret/nginx/server.key'
|
||||
|
||||
input_secret_keys_dir = '/input/keys'
|
||||
secret_keys_dir = '/secret/keys'
|
||||
allowed_secret_key_names = ['defaultalias', 'secretkey']
|
||||
|
||||
def _get_secret(folder, filename, length=16):
|
||||
key_file = os.path.join(folder, filename)
|
||||
if os.path.isfile(key_file):
|
||||
with open(key_file, 'r') as f:
|
||||
key = f.read()
|
||||
print("loaded secret from file: %s" % key_file)
|
||||
mark_file(key_file)
|
||||
return key
|
||||
if not os.path.isdir(folder):
|
||||
os.makedirs(folder)
|
||||
key = generate_random_string(length)
|
||||
with open(key_file, 'w') as f:
|
||||
f.write(key)
|
||||
print("Generated and saved secret to file: %s" % key_file)
|
||||
mark_file(key_file)
|
||||
return key
|
||||
|
||||
|
||||
def get_secret_key(path):
|
||||
secret_key = _get_secret(path, "secretkey")
|
||||
if len(secret_key) != 16:
|
||||
raise Exception("secret key's length has to be 16 chars, current length: %d" % len(secret_key))
|
||||
return secret_key
|
||||
|
||||
|
||||
def get_alias(path):
|
||||
alias = _get_secret(path, "defaultalias", length=8)
|
||||
return alias
|
||||
|
||||
def copy_secret_keys():
|
||||
"""
|
||||
Copy the secret keys, which used for encrypt user password, from input keys dir to secret keys dir
|
||||
"""
|
||||
if os.path.isdir(input_secret_keys_dir) and os.path.isdir(secret_keys_dir):
|
||||
input_files = os.listdir(input_secret_keys_dir)
|
||||
secret_files = os.listdir(secret_keys_dir)
|
||||
files_need_copy = [x for x in input_files if (x in allowed_secret_key_names) and (x not in secret_files) ]
|
||||
for f in files_need_copy:
|
||||
shutil.copy(f, secret_keys_dir)
|
||||
|
||||
def copy_ssl_cert():
|
||||
"""
|
||||
Copy the ssl certs key paris, which used in nginx ssl certificate, from input dir to secret cert dir
|
||||
"""
|
||||
if os.path.isfile(input_cert_key) and os.path.isfile(input_cert):
|
||||
os.makedirs(secret_cert_dir, exist_ok=True)
|
||||
shutil.copy(input_cert, secret_cert)
|
||||
shutil.copy(input_cert_key, secret_cert_key)
|
||||
|
||||
## decorator actions
|
||||
def stat_decorator(func):
|
||||
@wraps(func)
|
||||
def check_wrapper(*args, **kw):
|
||||
stat = func(*args, **kw)
|
||||
if stat == 0:
|
||||
print("Generated certificate, key file: {key_path}, cert file: {cert_path}".format(**kw))
|
||||
else:
|
||||
print("Fail to generate key file: {key_path}, cert file: {cert_path}".format(**kw))
|
||||
sys.exit(1)
|
||||
return check_wrapper
|
||||
|
||||
|
||||
@stat_decorator
|
||||
def create_root_cert(subj, key_path="./k.key", cert_path="./cert.crt"):
|
||||
rc = subprocess.call(["/usr/bin/openssl", "genrsa", "-out", key_path, "4096"], stdout=DEVNULL, stderr=subprocess.STDOUT)
|
||||
if rc != 0:
|
||||
return rc
|
||||
return subprocess.call(["/usr/bin/openssl", "req", "-new", "-x509", "-key", key_path,\
|
||||
"-out", cert_path, "-days", "3650", "-subj", subj], stdout=DEVNULL, stderr=subprocess.STDOUT)
|
||||
|
||||
@stat_decorator
|
||||
def create_cert(subj, ca_key, ca_cert, key_path="./k.key", cert_path="./cert.crt"):
|
||||
cert_dir = os.path.dirname(cert_path)
|
||||
csr_path = os.path.join(cert_dir, "tmp.csr")
|
||||
rc = subprocess.call(["/usr/bin/openssl", "req", "-newkey", "rsa:4096", "-nodes","-sha256","-keyout", key_path,\
|
||||
"-out", csr_path, "-subj", subj], stdout=DEVNULL, stderr=subprocess.STDOUT)
|
||||
if rc != 0:
|
||||
return rc
|
||||
return subprocess.call(["/usr/bin/openssl", "x509", "-req", "-days", "3650", "-in", csr_path, "-CA", \
|
||||
ca_cert, "-CAkey", ca_key, "-CAcreateserial", "-out", cert_path], stdout=DEVNULL, stderr=subprocess.STDOUT)
|
||||
|
||||
|
||||
def openssl_installed():
|
||||
shell_stat = subprocess.check_call(["/usr/bin/which", "openssl"], stdout=DEVNULL, stderr=subprocess.STDOUT)
|
||||
if shell_stat != 0:
|
||||
print("Cannot find openssl installed in this computer\nUse default SSL certificate file")
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def prepare_ca(
|
||||
private_key_pem_path: Path,
|
||||
root_crt_path: Path,
|
||||
old_private_key_pem_path: Path,
|
||||
old_crt_path: Path,
|
||||
registry_custom_ca_bundle_config: Path,
|
||||
registry_custom_ca_bundle_storage_path: Path):
|
||||
if not ( private_key_pem_path.exists() and root_crt_path.exists() ):
|
||||
# From version 1.8 the cert storage path is changed
|
||||
# if old key paris not exist create new ones
|
||||
# if old key pairs exist in old place copy it to new place
|
||||
if not (old_crt_path.exists() and old_private_key_pem_path.exists()):
|
||||
private_key_pem_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
root_crt_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
empty_subj = "/"
|
||||
create_root_cert(empty_subj, key_path=private_key_pem_path, cert_path=root_crt_path)
|
||||
mark_file(private_key_pem_path)
|
||||
mark_file(root_crt_path)
|
||||
else:
|
||||
shutil.move(old_crt_path, root_crt_path)
|
||||
shutil.move(old_private_key_pem_path, private_key_pem_path)
|
||||
|
||||
|
||||
if not registry_custom_ca_bundle_storage_path.exists() and registry_custom_ca_bundle_config.exists():
|
||||
registry_custom_ca_bundle_storage_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
shutil.copyfile(registry_custom_ca_bundle_config, registry_custom_ca_bundle_storage_path)
|
||||
mark_file(registry_custom_ca_bundle_storage_path)
|
||||
print("Copied custom ca bundle: %s" % registry_custom_ca_bundle_config)
|
121
make/photon/prepare/utils/chart.py
Normal file
121
make/photon/prepare/utils/chart.py
Normal file
@ -0,0 +1,121 @@
|
||||
import os, shutil
|
||||
|
||||
from g import templates_dir, config_dir
|
||||
from .jinja import render_jinja
|
||||
|
||||
chartm_temp_dir = os.path.join(templates_dir, "chartserver")
|
||||
chartm_env_temp = os.path.join(chartm_temp_dir, "env.jinja")
|
||||
|
||||
chartm_config_dir = os.path.join(config_dir, "chartserver")
|
||||
chartm_env = os.path.join(config_dir, "chartserver", "env")
|
||||
|
||||
def prepare_chartmuseum(config_dict):
|
||||
|
||||
core_secret = config_dict['core_secret']
|
||||
registry_custom_ca_bundle_path = config_dict['registry_custom_ca_bundle_path']
|
||||
redis_host = config_dict['redis_host']
|
||||
redis_port = config_dict['redis_port']
|
||||
redis_password = config_dict['redis_password']
|
||||
redis_db_index_chart = config_dict['redis_db_index_chart']
|
||||
storage_provider_config = config_dict['storage_provider_config']
|
||||
storage_provider_name = config_dict['storage_provider_name']
|
||||
|
||||
if not os.path.isdir(chartm_config_dir):
|
||||
print ("Create config folder: %s" % chartm_config_dir)
|
||||
os.makedirs(chartm_config_dir)
|
||||
|
||||
# handle custom ca bundle
|
||||
if len(registry_custom_ca_bundle_path) > 0 and os.path.isfile(registry_custom_ca_bundle_path):
|
||||
shutil.copyfile(registry_custom_ca_bundle_path, os.path.join(chartm_config_dir, "custom-ca-bundle.crt"))
|
||||
print("Copied custom ca bundle: %s" % os.path.join(chartm_config_dir, "custom-ca-bundle.crt"))
|
||||
|
||||
# process redis info
|
||||
cache_store = "redis"
|
||||
cache_redis_password = redis_password
|
||||
cache_redis_addr = "{}:{}".format(redis_host, redis_port)
|
||||
cache_redis_db_index = redis_db_index_chart
|
||||
|
||||
|
||||
# process storage info
|
||||
#default using local file system
|
||||
storage_driver = "local"
|
||||
# storage provider configurations
|
||||
# please be aware that, we do not check the validations of the values for the specified keys
|
||||
# convert the configs to config map
|
||||
storage_provider_configs = storage_provider_config.split(",")
|
||||
storgae_provider_confg_map = {}
|
||||
storage_provider_config_options = []
|
||||
|
||||
for k_v in storage_provider_configs:
|
||||
if len(k_v) > 0:
|
||||
kvs = k_v.split(": ") # add space suffix to avoid existing ":" in the value
|
||||
if len(kvs) == 2:
|
||||
#key must not be empty
|
||||
if kvs[0].strip() != "":
|
||||
storgae_provider_confg_map[kvs[0].strip()] = kvs[1].strip()
|
||||
|
||||
if storage_provider_name == "s3":
|
||||
# aws s3 storage
|
||||
storage_driver = "amazon"
|
||||
storage_provider_config_options.append("STORAGE_AMAZON_BUCKET=%s" % storgae_provider_confg_map.get("bucket", ""))
|
||||
storage_provider_config_options.append("STORAGE_AMAZON_PREFIX=%s" % storgae_provider_confg_map.get("rootdirectory", ""))
|
||||
storage_provider_config_options.append("STORAGE_AMAZON_REGION=%s" % storgae_provider_confg_map.get("region", ""))
|
||||
storage_provider_config_options.append("STORAGE_AMAZON_ENDPOINT=%s" % storgae_provider_confg_map.get("regionendpoint", ""))
|
||||
storage_provider_config_options.append("AWS_ACCESS_KEY_ID=%s" % storgae_provider_confg_map.get("accesskey", ""))
|
||||
storage_provider_config_options.append("AWS_SECRET_ACCESS_KEY=%s" % storgae_provider_confg_map.get("secretkey", ""))
|
||||
elif storage_provider_name == "gcs":
|
||||
# google cloud storage
|
||||
storage_driver = "google"
|
||||
storage_provider_config_options.append("STORAGE_GOOGLE_BUCKET=%s" % storgae_provider_confg_map.get("bucket", ""))
|
||||
storage_provider_config_options.append("STORAGE_GOOGLE_PREFIX=%s" % storgae_provider_confg_map.get("rootdirectory", ""))
|
||||
|
||||
keyFileOnHost = storgae_provider_confg_map.get("keyfile", "")
|
||||
if os.path.isfile(keyFileOnHost):
|
||||
shutil.copyfile(keyFileOnHost, os.path.join(chartm_config_dir, "gcs.key"))
|
||||
targetKeyFile = "/etc/chartserver/gcs.key"
|
||||
storage_provider_config_options.append("GOOGLE_APPLICATION_CREDENTIALS=%s" % targetKeyFile)
|
||||
elif storage_provider_name == "azure":
|
||||
# azure storage
|
||||
storage_driver = "microsoft"
|
||||
storage_provider_config_options.append("STORAGE_MICROSOFT_CONTAINER=%s" % storgae_provider_confg_map.get("container", ""))
|
||||
storage_provider_config_options.append("AZURE_STORAGE_ACCOUNT=%s" % storgae_provider_confg_map.get("accountname", ""))
|
||||
storage_provider_config_options.append("AZURE_STORAGE_ACCESS_KEY=%s" % storgae_provider_confg_map.get("accountkey", ""))
|
||||
storage_provider_config_options.append("STORAGE_MICROSOFT_PREFIX=/azure/harbor/charts")
|
||||
elif storage_provider_name == "swift":
|
||||
# open stack swift
|
||||
storage_driver = "openstack"
|
||||
storage_provider_config_options.append("STORAGE_OPENSTACK_CONTAINER=%s" % storgae_provider_confg_map.get("container", ""))
|
||||
storage_provider_config_options.append("STORAGE_OPENSTACK_PREFIX=%s" % storgae_provider_confg_map.get("rootdirectory", ""))
|
||||
storage_provider_config_options.append("STORAGE_OPENSTACK_REGION=%s" % storgae_provider_confg_map.get("region", ""))
|
||||
storage_provider_config_options.append("OS_AUTH_URL=%s" % storgae_provider_confg_map.get("authurl", ""))
|
||||
storage_provider_config_options.append("OS_USERNAME=%s" % storgae_provider_confg_map.get("username", ""))
|
||||
storage_provider_config_options.append("OS_PASSWORD=%s" % storgae_provider_confg_map.get("password", ""))
|
||||
storage_provider_config_options.append("OS_PROJECT_ID=%s" % storgae_provider_confg_map.get("tenantid", ""))
|
||||
storage_provider_config_options.append("OS_PROJECT_NAME=%s" % storgae_provider_confg_map.get("tenant", ""))
|
||||
storage_provider_config_options.append("OS_DOMAIN_ID=%s" % storgae_provider_confg_map.get("domainid", ""))
|
||||
storage_provider_config_options.append("OS_DOMAIN_NAME=%s" % storgae_provider_confg_map.get("domain", ""))
|
||||
elif storage_provider_name == "oss":
|
||||
# aliyun OSS
|
||||
storage_driver = "alibaba"
|
||||
storage_provider_config_options.append("STORAGE_ALIBABA_BUCKET=%s" % storgae_provider_confg_map.get("bucket", ""))
|
||||
storage_provider_config_options.append("STORAGE_ALIBABA_PREFIX=%s" % storgae_provider_confg_map.get("rootdirectory", ""))
|
||||
storage_provider_config_options.append("STORAGE_ALIBABA_ENDPOINT=%s" % storgae_provider_confg_map.get("endpoint", ""))
|
||||
storage_provider_config_options.append("ALIBABA_CLOUD_ACCESS_KEY_ID=%s" % storgae_provider_confg_map.get("accesskeyid", ""))
|
||||
storage_provider_config_options.append("ALIBABA_CLOUD_ACCESS_KEY_SECRET=%s" % storgae_provider_confg_map.get("accesskeysecret", ""))
|
||||
else:
|
||||
# use local file system
|
||||
storage_provider_config_options.append("STORAGE_LOCAL_ROOTDIR=/chart_storage")
|
||||
|
||||
# generate storage provider configuration
|
||||
all_storage_provider_configs = ('\n').join(storage_provider_config_options)
|
||||
|
||||
render_jinja(
|
||||
chartm_env_temp,
|
||||
chartm_env,
|
||||
cache_store=cache_store,
|
||||
cache_redis_addr=cache_redis_addr,
|
||||
cache_redis_password=cache_redis_password,
|
||||
cache_redis_db_index=cache_redis_db_index,
|
||||
core_secret=core_secret,
|
||||
storage_driver=storage_driver,
|
||||
all_storage_driver_configs=all_storage_provider_configs)
|
48
make/photon/prepare/utils/clair.py
Normal file
48
make/photon/prepare/utils/clair.py
Normal file
@ -0,0 +1,48 @@
|
||||
import os, shutil
|
||||
|
||||
from g import templates_dir, config_dir, DEFAULT_UID, DEFAULT_GID
|
||||
from .jinja import render_jinja
|
||||
from .misc import prepare_config_dir
|
||||
|
||||
clair_template_dir = os.path.join(templates_dir, "clair")
|
||||
|
||||
def prepare_clair(config_dict):
|
||||
clair_config_dir = prepare_config_dir(config_dir, "clair")
|
||||
|
||||
if os.path.exists(os.path.join(clair_config_dir, "postgresql-init.d")):
|
||||
print("Copying offline data file for clair DB")
|
||||
shutil.rmtree(os.path.join(clair_config_dir, "postgresql-init.d"))
|
||||
|
||||
shutil.copytree(os.path.join(clair_template_dir, "postgresql-init.d"), os.path.join(clair_config_dir, "postgresql-init.d"))
|
||||
|
||||
postgres_env_path = os.path.join(clair_config_dir, "postgres_env")
|
||||
postgres_env_template = os.path.join(clair_template_dir, "postgres_env.jinja")
|
||||
|
||||
clair_config_path = os.path.join(clair_config_dir, "config.yaml")
|
||||
clair_config_template = os.path.join(clair_template_dir, "config.yaml.jinja")
|
||||
|
||||
clair_env_path = os.path.join(clair_config_dir, "clair_env")
|
||||
clair_env_template = os.path.join(clair_template_dir, "clair_env.jinja")
|
||||
|
||||
render_jinja(
|
||||
postgres_env_template,
|
||||
postgres_env_path,
|
||||
password=config_dict['clair_db_password'])
|
||||
|
||||
render_jinja(
|
||||
clair_config_template,
|
||||
clair_config_path,
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID,
|
||||
password= config_dict['clair_db_password'],
|
||||
username= config_dict['clair_db_username'],
|
||||
host= config_dict['clair_db_host'],
|
||||
port= config_dict['clair_db_port'],
|
||||
dbname= config_dict['clair_db'],
|
||||
interval= config_dict['clair_updaters_interval'])
|
||||
|
||||
# config http proxy for Clair
|
||||
render_jinja(
|
||||
clair_env_template,
|
||||
clair_env_path,
|
||||
**config_dict)
|
234
make/photon/prepare/utils/configs.py
Normal file
234
make/photon/prepare/utils/configs.py
Normal file
@ -0,0 +1,234 @@
|
||||
import yaml
|
||||
from g import versions_file_path
|
||||
from .misc import generate_random_string
|
||||
|
||||
def validate(conf, **kwargs):
|
||||
protocol = conf.get("protocol")
|
||||
if protocol != "https" and kwargs.get('notary_mode'):
|
||||
raise Exception(
|
||||
"Error: the protocol must be https when Harbor is deployed with Notary")
|
||||
if protocol == "https":
|
||||
if not conf.get("cert_path"):
|
||||
raise Exception("Error: The protocol is https but attribute ssl_cert is not set")
|
||||
if not conf.get("cert_key_path"):
|
||||
raise Exception("Error: The protocol is https but attribute ssl_cert_key is not set")
|
||||
|
||||
# Storage validate
|
||||
valid_storage_drivers = ["filesystem", "azure", "gcs", "s3", "swift", "oss"]
|
||||
storage_provider_name = conf.get("storage_provider_name")
|
||||
if storage_provider_name not in valid_storage_drivers:
|
||||
raise Exception("Error: storage driver %s is not supported, only the following ones are supported: %s" % (
|
||||
storage_provider_name, ",".join(valid_storage_drivers)))
|
||||
|
||||
storage_provider_config = conf.get("storage_provider_config") ## original is registry_storage_provider_config
|
||||
if storage_provider_name != "filesystem":
|
||||
if storage_provider_config == "":
|
||||
raise Exception(
|
||||
"Error: no provider configurations are provided for provider %s" % storage_provider_name)
|
||||
|
||||
# Redis validate
|
||||
redis_host = conf.get("redis_host")
|
||||
if redis_host is None or len(redis_host) < 1:
|
||||
raise Exception(
|
||||
"Error: redis_host in harbor.cfg needs to point to an endpoint of Redis server or cluster.")
|
||||
|
||||
redis_port = conf.get("redis_port")
|
||||
if redis_host is None or (redis_port < 1 or redis_port > 65535):
|
||||
raise Exception(
|
||||
"Error: redis_port in harbor.cfg needs to point to the port of Redis server or cluster.")
|
||||
|
||||
redis_db_index = conf.get("redis_db_index")
|
||||
if len(redis_db_index.split(",")) != 3:
|
||||
raise Exception(
|
||||
"Error invalid value for redis_db_index: %s. please set it as 1,2,3" % redis_db_index)
|
||||
|
||||
def parse_versions():
|
||||
if not versions_file_path.is_file():
|
||||
return {}
|
||||
with open('versions') as f:
|
||||
versions = yaml.load(f)
|
||||
return versions
|
||||
|
||||
def parse_yaml_config(config_file_path):
|
||||
'''
|
||||
:param configs: config_parser object
|
||||
:returns: dict of configs
|
||||
'''
|
||||
|
||||
with open(config_file_path) as f:
|
||||
configs = yaml.load(f)
|
||||
|
||||
config_dict = {}
|
||||
config_dict['adminserver_url'] = "http://adminserver:8080"
|
||||
config_dict['registry_url'] = "http://registry:5000"
|
||||
config_dict['registry_controller_url'] = "http://registryctl:8080"
|
||||
config_dict['core_url'] = "http://core:8080"
|
||||
config_dict['token_service_url'] = "http://core:8080/service/token"
|
||||
|
||||
config_dict['jobservice_url'] = "http://jobservice:8080"
|
||||
config_dict['clair_url'] = "http://clair:6060"
|
||||
config_dict['notary_url'] = "http://notary-server:4443"
|
||||
config_dict['chart_repository_url'] = "http://chartmuseum:9999"
|
||||
|
||||
if configs.get("reload_config"):
|
||||
config_dict['reload_config'] = configs.get("reload_config")
|
||||
else:
|
||||
config_dict['reload_config'] = "false"
|
||||
|
||||
config_dict['hostname'] = configs.get("hostname")
|
||||
config_dict['protocol'] = configs.get("ui_url_protocol")
|
||||
config_dict['public_url'] = config_dict['protocol'] + "://" + config_dict['hostname']
|
||||
|
||||
# Data path volume
|
||||
config_dict['data_volume'] = configs.get("data_volume")
|
||||
|
||||
# Email related configs
|
||||
config_dict['email_identity'] = configs.get("email_identity")
|
||||
config_dict['email_host'] = configs.get("email_server")
|
||||
config_dict['email_port'] = configs.get("email_server_port")
|
||||
config_dict['email_usr'] = configs.get("email_username")
|
||||
config_dict['email_pwd'] = configs.get("email_password")
|
||||
config_dict['email_from'] = configs.get("email_from")
|
||||
config_dict['email_ssl'] = configs.get("email_ssl")
|
||||
config_dict['email_insecure'] = configs.get("email_insecure")
|
||||
config_dict['harbor_admin_password'] = configs.get("harbor_admin_password")
|
||||
config_dict['auth_mode'] = configs.get("auth_mode")
|
||||
config_dict['ldap_url'] = configs.get("ldap_url")
|
||||
|
||||
# LDAP related configs
|
||||
# this two options are either both set or unset
|
||||
if configs.get("ldap_searchdn"):
|
||||
config_dict['ldap_searchdn'] = configs["ldap_searchdn"]
|
||||
config_dict['ldap_search_pwd'] = configs["ldap_search_pwd"]
|
||||
else:
|
||||
config_dict['ldap_searchdn'] = ""
|
||||
config_dict['ldap_search_pwd'] = ""
|
||||
config_dict['ldap_basedn'] = configs.get("ldap_basedn")
|
||||
# ldap_filter is null by default
|
||||
if configs.get("ldap_filter"):
|
||||
config_dict['ldap_filter'] = configs["ldap_filter"]
|
||||
else:
|
||||
config_dict['ldap_filter'] = ""
|
||||
config_dict['ldap_uid'] = configs.get("ldap_uid")
|
||||
config_dict['ldap_scope'] = configs.get("ldap_scope")
|
||||
config_dict['ldap_timeout'] = configs.get("ldap_timeout")
|
||||
config_dict['ldap_verify_cert'] = configs.get("ldap_verify_cert")
|
||||
config_dict['ldap_group_basedn'] = configs.get("ldap_group_basedn")
|
||||
config_dict['ldap_group_filter'] = configs.get("ldap_group_filter")
|
||||
config_dict['ldap_group_gid'] = configs.get("ldap_group_gid")
|
||||
config_dict['ldap_group_scope'] = configs.get("ldap_group_scope")
|
||||
# Admin dn
|
||||
config_dict['ldap_group_admin_dn'] = configs.get("ldap_group_admin_dn") or ''
|
||||
|
||||
# DB configs
|
||||
db_configs = configs.get('database')
|
||||
config_dict['db_host'] = db_configs.get("host")
|
||||
config_dict['db_port'] = db_configs.get("port")
|
||||
config_dict['db_user'] = db_configs.get("username")
|
||||
config_dict['db_password'] = db_configs.get("password")
|
||||
|
||||
config_dict['self_registration'] = configs.get("self_registration")
|
||||
config_dict['project_creation_restriction'] = configs.get("project_creation_restriction")
|
||||
|
||||
# secure configs
|
||||
if config_dict['protocol'] == "https":
|
||||
config_dict['cert_path'] = configs.get("ssl_cert")
|
||||
config_dict['cert_key_path'] = configs.get("ssl_cert_key")
|
||||
config_dict['customize_crt'] = configs.get("customize_crt")
|
||||
config_dict['max_job_workers'] = configs.get("max_job_workers")
|
||||
config_dict['token_expiration'] = configs.get("token_expiration")
|
||||
|
||||
config_dict['secretkey_path'] = configs["secretkey_path"]
|
||||
# Admiral configs
|
||||
if configs.get("admiral_url"):
|
||||
config_dict['admiral_url'] = configs["admiral_url"]
|
||||
else:
|
||||
config_dict['admiral_url'] = ""
|
||||
|
||||
# Clair configs
|
||||
clair_configs = configs.get("clair")
|
||||
if clair_configs:
|
||||
config_dict['clair_db_password'] = clair_configs.get("db_password")
|
||||
config_dict['clair_db_host'] = clair_configs.get("db_host")
|
||||
config_dict['clair_db_port'] = clair_configs.get("db_port")
|
||||
config_dict['clair_db_username'] = clair_configs.get("db_username")
|
||||
config_dict['clair_db'] = clair_configs.get("db")
|
||||
config_dict['clair_updaters_interval'] = clair_configs.get("updaters_interval")
|
||||
config_dict['clair_http_proxy'] = clair_configs.get('http_proxy')
|
||||
config_dict['clair_https_proxy'] = clair_configs.get('https_proxy')
|
||||
config_dict['clair_no_proxy'] = clair_configs.get('no_proxy')
|
||||
else:
|
||||
config_dict['clair_db_password'] = ''
|
||||
config_dict['clair_db_host'] = ''
|
||||
config_dict['clair_db_port'] = ''
|
||||
config_dict['clair_db_username'] = ''
|
||||
config_dict['clair_db'] = ''
|
||||
config_dict['clair_updaters_interval'] = ''
|
||||
config_dict['clair_http_proxy'] = ''
|
||||
config_dict['clair_https_proxy'] = ''
|
||||
config_dict['clair_no_proxy'] = ''
|
||||
|
||||
# UAA configs
|
||||
config_dict['uaa_endpoint'] = configs.get("uaa_endpoint")
|
||||
config_dict['uaa_clientid'] = configs.get("uaa_clientid")
|
||||
config_dict['uaa_clientsecret'] = configs.get("uaa_clientsecret")
|
||||
config_dict['uaa_verify_cert'] = configs.get("uaa_verify_cert")
|
||||
config_dict['uaa_ca_cert'] = configs.get("uaa_ca_cert")
|
||||
|
||||
# Log configs
|
||||
log_configs = configs.get('log') or {}
|
||||
config_dict['log_location'] = log_configs.get("location")
|
||||
config_dict['log_rotate_count'] = log_configs.get("rotate_count")
|
||||
config_dict['log_rotate_size'] = log_configs.get("rotate_size")
|
||||
|
||||
# Redis configs
|
||||
redis_configs = configs.get("redis")
|
||||
if redis_configs:
|
||||
config_dict['redis_host'] = redis_configs.get("host") or ''
|
||||
config_dict['redis_port'] = redis_configs.get("port") or ''
|
||||
config_dict['redis_password'] = redis_configs.get("password") or ''
|
||||
config_dict['redis_db_index'] = redis_configs.get("db_index") or ''
|
||||
db_indexs = config_dict['redis_db_index'].split(',')
|
||||
config_dict['redis_db_index_reg'] = db_indexs[0]
|
||||
config_dict['redis_db_index_js'] = db_indexs[1]
|
||||
config_dict['redis_db_index_chart'] = db_indexs[2]
|
||||
else:
|
||||
config_dict['redis_host'] = ''
|
||||
config_dict['redis_port'] = ''
|
||||
config_dict['redis_password'] = ''
|
||||
config_dict['redis_db_index'] = ''
|
||||
config_dict['redis_db_index_reg'] = ''
|
||||
config_dict['redis_db_index_js'] = ''
|
||||
config_dict['redis_db_index_chart'] = ''
|
||||
|
||||
# redis://[arbitrary_username:password@]ipaddress:port/database_index
|
||||
if config_dict.get('redis_password'):
|
||||
config_dict['redis_url_js'] = "redis://anonymous:%s@%s:%s/%s" % (config_dict['redis_password'], config_dict['redis_host'], config_dict['redis_port'], config_dict['redis_db_index_js'])
|
||||
config_dict['redis_url_reg'] = "redis://anonymous:%s@%s:%s/%s" % (config_dict['redis_password'], config_dict['redis_host'], config_dict['redis_port'], config_dict['redis_db_index_reg'])
|
||||
else:
|
||||
config_dict['redis_url_js'] = "redis://%s:%s/%s" % (config_dict['redis_host'], config_dict['redis_port'], config_dict['redis_db_index_js'])
|
||||
config_dict['redis_url_reg'] = "redis://%s:%s/%s" % (config_dict['redis_host'], config_dict['redis_port'], config_dict['redis_db_index_reg'])
|
||||
|
||||
if configs.get("skip_reload_env_pattern"):
|
||||
config_dict['skip_reload_env_pattern'] = configs["skip_reload_env_pattern"]
|
||||
else:
|
||||
config_dict['skip_reload_env_pattern'] = "$^"
|
||||
|
||||
# Registry storage configs
|
||||
storage_config = configs.get('storage')
|
||||
if storage_config:
|
||||
config_dict['storage_provider_name'] = storage_config.get("registry_storage_provider_name") or ''
|
||||
config_dict['storage_provider_config'] = storage_config.get("registry_storage_provider_config") or ''
|
||||
# yaml requires 1 or more spaces between the key and value
|
||||
config_dict['storage_provider_config'] = config_dict['storage_provider_config'].replace(":", ": ", 1)
|
||||
config_dict['registry_custom_ca_bundle_path'] = storage_config.get("registry_custom_ca_bundle") or ''
|
||||
else:
|
||||
config_dict['storage_provider_name'] = ''
|
||||
config_dict['storage_provider_config'] = ''
|
||||
config_dict['registry_custom_ca_bundle_path'] = ''
|
||||
|
||||
# auto generate secret string
|
||||
config_dict['core_secret'] = generate_random_string(16)
|
||||
config_dict['jobservice_secret'] = generate_random_string(16)
|
||||
|
||||
return config_dict
|
56
make/photon/prepare/utils/core.py
Normal file
56
make/photon/prepare/utils/core.py
Normal file
@ -0,0 +1,56 @@
|
||||
import shutil, os
|
||||
|
||||
from g import config_dir, templates_dir
|
||||
from utils.misc import prepare_config_dir, generate_random_string
|
||||
from utils.jinja import render_jinja
|
||||
|
||||
core_config_dir = os.path.join(config_dir, "core", "certificates")
|
||||
core_env_template_path = os.path.join(templates_dir, "core", "env.jinja")
|
||||
core_conf_env = os.path.join(config_dir, "core", "env")
|
||||
core_conf_template_path = os.path.join(templates_dir, "core", "app.conf.jinja")
|
||||
core_conf = os.path.join(config_dir, "core", "app.conf")
|
||||
|
||||
core_config_env_template = os.path.join(templates_dir, "core", "config_env.jinja")
|
||||
core_config_env = os.path.join(config_dir, "core", "config_env")
|
||||
|
||||
def prepare_core(config_dict, with_notary, with_clair, with_chartmuseum):
|
||||
prepare_core_config_dir()
|
||||
# Render Core
|
||||
# set cache for chart repo server
|
||||
# default set 'memory' mode, if redis is configured then set to 'redis'
|
||||
if len(config_dict['redis_host']) > 0:
|
||||
chart_cache_driver = "redis"
|
||||
else:
|
||||
chart_cache_driver = "memory"
|
||||
|
||||
render_config_env(config_dict, with_notary, with_clair, with_chartmuseum)
|
||||
|
||||
render_jinja(
|
||||
core_env_template_path,
|
||||
core_conf_env,
|
||||
chart_cache_driver=chart_cache_driver,
|
||||
**config_dict)
|
||||
|
||||
# Copy Core app.conf
|
||||
copy_core_config(core_conf_template_path, core_conf)
|
||||
|
||||
def prepare_core_config_dir():
|
||||
prepare_config_dir(core_config_dir)
|
||||
|
||||
def copy_core_config(core_templates_path, core_config_path):
|
||||
shutil.copyfile(core_templates_path, core_config_path)
|
||||
print("Generated configuration file: %s" % core_config_path)
|
||||
|
||||
def render_config_env(config_dict, with_notary, with_clair, with_chartmuseum):
|
||||
# Use reload_key to avoid reload config after restart harbor
|
||||
reload_key = generate_random_string(6) if config_dict['reload_config'] == "true" else ""
|
||||
|
||||
render_jinja(
|
||||
core_config_env_template,
|
||||
core_config_env,
|
||||
with_notary=with_notary,
|
||||
with_clair=with_clair,
|
||||
with_chartmuseum=with_chartmuseum,
|
||||
reload_key=reload_key,
|
||||
**config_dict
|
||||
)
|
20
make/photon/prepare/utils/db.py
Normal file
20
make/photon/prepare/utils/db.py
Normal file
@ -0,0 +1,20 @@
|
||||
import os
|
||||
|
||||
from g import config_dir, templates_dir
|
||||
from utils.misc import prepare_config_dir
|
||||
from utils.jinja import render_jinja
|
||||
|
||||
db_config_dir = os.path.join(config_dir, "db")
|
||||
db_env_template_path = os.path.join(templates_dir, "db", "env.jinja")
|
||||
db_conf_env = os.path.join(config_dir, "db", "env")
|
||||
|
||||
def prepare_db(config_dict):
|
||||
prepare_db_config_dir()
|
||||
|
||||
render_jinja(
|
||||
db_env_template_path,
|
||||
db_conf_env,
|
||||
db_password=config_dict['db_password'])
|
||||
|
||||
def prepare_db_config_dir():
|
||||
prepare_config_dir(db_config_dir)
|
37
make/photon/prepare/utils/docker_compose.py
Normal file
37
make/photon/prepare/utils/docker_compose.py
Normal file
@ -0,0 +1,37 @@
|
||||
import os
|
||||
|
||||
from g import templates_dir
|
||||
from .configs import parse_versions
|
||||
from .jinja import render_jinja
|
||||
|
||||
docker_compose_template_path = os.path.join(templates_dir, 'docker_compose', 'docker-compose.yml.jinja')
|
||||
docker_compose_yml_path = '/compose_location/docker-compose.yml'
|
||||
|
||||
# render docker-compose
|
||||
def prepare_docker_compose(configs, with_clair, with_notary, with_chartmuseum):
|
||||
versions = parse_versions()
|
||||
VERSION_TAG = versions.get('VERSION_TAG') or 'dev'
|
||||
REGISTRY_VERSION = versions.get('REGISTRY_VERSION') or 'v2.7.1'
|
||||
NOTARY_VERSION = versions.get('NOTARY_VERSION') or 'v0.6.1'
|
||||
CLAIR_VERSION = versions.get('CLAIR_VERSION') or 'v2.0.7'
|
||||
CHARTMUSEUM_VERSION = versions.get('CHARTMUSEUM_VERSION') or 'v0.8.1'
|
||||
|
||||
rendering_variables = {
|
||||
'version': VERSION_TAG,
|
||||
'reg_version': "{}-{}".format(REGISTRY_VERSION, VERSION_TAG),
|
||||
'redis_version': VERSION_TAG,
|
||||
'notary_version': '{}-{}'.format(NOTARY_VERSION, VERSION_TAG),
|
||||
'clair_version': '{}-{}'.format(CLAIR_VERSION, VERSION_TAG),
|
||||
'chartmuseum_version': '{}-{}'.format(CHARTMUSEUM_VERSION, VERSION_TAG),
|
||||
'data_volume': configs['data_volume'],
|
||||
'log_location': configs['log_location'],
|
||||
'cert_key_path': configs['cert_key_path'],
|
||||
'cert_path': configs['cert_path'],
|
||||
'protocol': configs['protocol'],
|
||||
'registry_custom_ca_bundle_storage_path': configs['registry_custom_ca_bundle_path'],
|
||||
'with_notary': with_notary,
|
||||
'with_clair': with_clair,
|
||||
'with_chartmuseum': with_chartmuseum
|
||||
}
|
||||
|
||||
render_jinja(docker_compose_template_path, docker_compose_yml_path, **rendering_variables)
|
11
make/photon/prepare/utils/jinja.py
Normal file
11
make/photon/prepare/utils/jinja.py
Normal file
@ -0,0 +1,11 @@
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from .misc import mark_file
|
||||
|
||||
jinja_env = Environment(loader=FileSystemLoader('/'), trim_blocks=True)
|
||||
|
||||
def render_jinja(src, dest,mode=0o640, uid=0, gid=0, **kw):
|
||||
t = jinja_env.get_template(src)
|
||||
with open(dest, 'w') as f:
|
||||
f.write(t.render(**kw))
|
||||
mark_file(dest, mode, uid, gid)
|
||||
print("Generated configuration file: %s" % dest)
|
34
make/photon/prepare/utils/jobservice.py
Normal file
34
make/photon/prepare/utils/jobservice.py
Normal file
@ -0,0 +1,34 @@
|
||||
import os
|
||||
|
||||
from g import config_dir, DEFAULT_GID, DEFAULT_UID, templates_dir
|
||||
from utils.misc import prepare_config_dir
|
||||
from utils.jinja import render_jinja
|
||||
|
||||
job_config_dir = os.path.join(config_dir, "jobservice")
|
||||
job_service_env_template_path = os.path.join(templates_dir, "jobservice", "env.jinja")
|
||||
job_service_conf_env = os.path.join(config_dir, "jobservice", "env")
|
||||
job_service_conf_template_path = os.path.join(templates_dir, "jobservice", "config.yml.jinja")
|
||||
jobservice_conf = os.path.join(config_dir, "jobservice", "config.yml")
|
||||
|
||||
|
||||
def prepare_job_service(config_dict):
|
||||
prepare_config_dir(job_config_dir)
|
||||
|
||||
# Job log is stored in data dir
|
||||
job_log_dir = os.path.join('/data', "job_logs")
|
||||
prepare_config_dir(job_log_dir)
|
||||
|
||||
# Render Jobservice env
|
||||
render_jinja(
|
||||
job_service_env_template_path,
|
||||
job_service_conf_env,
|
||||
**config_dict)
|
||||
|
||||
# Render Jobservice config
|
||||
render_jinja(
|
||||
job_service_conf_template_path,
|
||||
jobservice_conf,
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID,
|
||||
max_job_workers=config_dict['max_job_workers'],
|
||||
redis_url=config_dict['redis_url_js'])
|
20
make/photon/prepare/utils/log.py
Normal file
20
make/photon/prepare/utils/log.py
Normal file
@ -0,0 +1,20 @@
|
||||
import os
|
||||
|
||||
from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID
|
||||
from utils.misc import prepare_config_dir
|
||||
from utils.jinja import render_jinja
|
||||
|
||||
log_config_dir = os.path.join(config_dir, "log")
|
||||
logrotate_template_path = os.path.join(templates_dir, "log", "logrotate.conf.jinja")
|
||||
log_rotate_config = os.path.join(config_dir, "log", "logrotate.conf")
|
||||
|
||||
def prepare_log_configs(config_dict):
|
||||
prepare_config_dir(log_config_dir)
|
||||
|
||||
# Render Log config
|
||||
render_jinja(
|
||||
logrotate_template_path,
|
||||
log_rotate_config,
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID,
|
||||
**config_dict)
|
105
make/photon/prepare/utils/misc.py
Normal file
105
make/photon/prepare/utils/misc.py
Normal file
@ -0,0 +1,105 @@
|
||||
import os
|
||||
import string
|
||||
import random
|
||||
|
||||
from g import DEFAULT_UID, DEFAULT_GID
|
||||
|
||||
|
||||
# To meet security requirement
|
||||
# By default it will change file mode to 0600, and make the owner of the file to 10000:10000
|
||||
def mark_file(path, mode=0o600, uid=DEFAULT_UID, gid=DEFAULT_GID):
|
||||
if mode > 0:
|
||||
os.chmod(path, mode)
|
||||
if uid > 0 and gid > 0:
|
||||
os.chown(path, uid, gid)
|
||||
|
||||
|
||||
def validate(conf, **kwargs):
|
||||
# Protocol validate
|
||||
protocol = conf.get("configuration", "ui_url_protocol")
|
||||
if protocol != "https" and kwargs.get('notary_mode'):
|
||||
raise Exception(
|
||||
"Error: the protocol must be https when Harbor is deployed with Notary")
|
||||
if protocol == "https":
|
||||
if not conf.has_option("configuration", "ssl_cert"):
|
||||
raise Exception(
|
||||
"Error: The protocol is https but attribute ssl_cert is not set")
|
||||
cert_path = conf.get("configuration", "ssl_cert")
|
||||
if not os.path.isfile(cert_path):
|
||||
raise Exception(
|
||||
"Error: The path for certificate: %s is invalid" % cert_path)
|
||||
if not conf.has_option("configuration", "ssl_cert_key"):
|
||||
raise Exception(
|
||||
"Error: The protocol is https but attribute ssl_cert_key is not set")
|
||||
cert_key_path = conf.get("configuration", "ssl_cert_key")
|
||||
if not os.path.isfile(cert_key_path):
|
||||
raise Exception(
|
||||
"Error: The path for certificate key: %s is invalid" % cert_key_path)
|
||||
|
||||
# Project validate
|
||||
project_creation = conf.get(
|
||||
"configuration", "project_creation_restriction")
|
||||
if project_creation != "everyone" and project_creation != "adminonly":
|
||||
raise Exception(
|
||||
"Error invalid value for project_creation_restriction: %s" % project_creation)
|
||||
|
||||
# Storage validate
|
||||
valid_storage_drivers = ["filesystem",
|
||||
"azure", "gcs", "s3", "swift", "oss"]
|
||||
storage_provider_name = conf.get(
|
||||
"configuration", "registry_storage_provider_name").strip()
|
||||
if storage_provider_name not in valid_storage_drivers:
|
||||
raise Exception("Error: storage driver %s is not supported, only the following ones are supported: %s" % (
|
||||
storage_provider_name, ",".join(valid_storage_drivers)))
|
||||
|
||||
storage_provider_config = conf.get(
|
||||
"configuration", "registry_storage_provider_config").strip()
|
||||
if storage_provider_name != "filesystem":
|
||||
if storage_provider_config == "":
|
||||
raise Exception(
|
||||
"Error: no provider configurations are provided for provider %s" % storage_provider_name)
|
||||
|
||||
# Redis validate
|
||||
redis_host = conf.get("configuration", "redis_host")
|
||||
if redis_host is None or len(redis_host) < 1:
|
||||
raise Exception(
|
||||
"Error: redis_host in harbor.cfg needs to point to an endpoint of Redis server or cluster.")
|
||||
|
||||
redis_port = conf.get("configuration", "redis_port")
|
||||
if len(redis_port) < 1:
|
||||
raise Exception(
|
||||
"Error: redis_port in harbor.cfg needs to point to the port of Redis server or cluster.")
|
||||
|
||||
redis_db_index = conf.get("configuration", "redis_db_index").strip()
|
||||
if len(redis_db_index.split(",")) != 3:
|
||||
raise Exception(
|
||||
"Error invalid value for redis_db_index: %s. please set it as 1,2,3" % redis_db_index)
|
||||
|
||||
def validate_crt_subj(dirty_subj):
|
||||
subj_list = [item for item in dirty_subj.strip().split("/") \
|
||||
if len(item.split("=")) == 2 and len(item.split("=")[1]) > 0]
|
||||
return "/" + "/".join(subj_list)
|
||||
|
||||
|
||||
def generate_random_string(length):
|
||||
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length))
|
||||
|
||||
|
||||
def prepare_config_dir(root, *name):
|
||||
absolute_path = os.path.join(root, *name)
|
||||
if not os.path.exists(absolute_path):
|
||||
os.makedirs(absolute_path)
|
||||
return absolute_path
|
||||
|
||||
|
||||
def delfile(src):
|
||||
if os.path.isfile(src):
|
||||
try:
|
||||
os.remove(src)
|
||||
print("Clearing the configuration file: %s" % src)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
elif os.path.isdir(src):
|
||||
for item in os.listdir(src):
|
||||
itemsrc = os.path.join(src, item)
|
||||
delfile(itemsrc)
|
52
make/photon/prepare/utils/nginx.py
Normal file
52
make/photon/prepare/utils/nginx.py
Normal file
@ -0,0 +1,52 @@
|
||||
import os, shutil
|
||||
from fnmatch import fnmatch
|
||||
|
||||
from g import config_dir, templates_dir
|
||||
from utils.misc import prepare_config_dir, mark_file
|
||||
from utils.jinja import render_jinja
|
||||
from utils.cert import SSL_CERT_KEY_PATH, SSL_CERT_PATH
|
||||
|
||||
nginx_conf = os.path.join(config_dir, "nginx", "nginx.conf")
|
||||
nginx_confd_dir = os.path.join(config_dir, "nginx", "conf.d")
|
||||
nginx_https_conf_template = os.path.join(templates_dir, "nginx", "nginx.https.conf.jinja")
|
||||
nginx_http_conf_template = os.path.join(templates_dir, "nginx", "nginx.http.conf.jinja")
|
||||
nginx_template_ext_dir = os.path.join(templates_dir, 'nginx', 'ext')
|
||||
|
||||
CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTPS = 'harbor.https.*.conf'
|
||||
CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTP = 'harbor.http.*.conf'
|
||||
|
||||
def prepare_nginx(config_dict):
|
||||
prepare_config_dir(nginx_confd_dir)
|
||||
render_nginx_template(config_dict)
|
||||
|
||||
def render_nginx_template(config_dict):
|
||||
if config_dict['protocol'] == "https":
|
||||
render_jinja(nginx_https_conf_template, nginx_conf,
|
||||
ssl_cert = SSL_CERT_PATH,
|
||||
ssl_cert_key = SSL_CERT_KEY_PATH)
|
||||
location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTPS
|
||||
else:
|
||||
render_jinja(nginx_http_conf_template, nginx_conf)
|
||||
location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTP
|
||||
copy_nginx_location_configs_if_exist(nginx_template_ext_dir, nginx_confd_dir, location_file_pattern)
|
||||
|
||||
def add_additional_location_config(src, dst):
|
||||
"""
|
||||
These conf files is used for user that wanna add additional customized locations to harbor proxy
|
||||
:params src: source of the file
|
||||
:params dst: destination file path
|
||||
"""
|
||||
if not os.path.isfile(src):
|
||||
return
|
||||
print("Copying nginx configuration file {src} to {dst}".format(
|
||||
src=src, dst=dst))
|
||||
shutil.copy2(src, dst)
|
||||
mark_file(dst, mode=0o644)
|
||||
|
||||
def copy_nginx_location_configs_if_exist(src_config_dir, dst_config_dir, filename_pattern):
|
||||
if not os.path.exists(src_config_dir):
|
||||
return
|
||||
map(lambda filename: add_additional_location_config(
|
||||
os.path.join(src_config_dir, filename),
|
||||
os.path.join(dst_config_dir, filename)),
|
||||
[f for f in os.listdir(src_config_dir) if fnmatch(f, filename_pattern)])
|
126
make/photon/prepare/utils/notary.py
Normal file
126
make/photon/prepare/utils/notary.py
Normal file
@ -0,0 +1,126 @@
|
||||
import os, shutil, pathlib
|
||||
from g import templates_dir, config_dir, root_crt_path, secret_key_dir,DEFAULT_UID, DEFAULT_GID
|
||||
from .cert import openssl_installed, create_cert, create_root_cert, get_alias
|
||||
from .jinja import render_jinja
|
||||
from .misc import mark_file, prepare_config_dir
|
||||
|
||||
notary_template_dir = os.path.join(templates_dir, "notary")
|
||||
notary_signer_pg_template = os.path.join(notary_template_dir, "signer-config.postgres.json.jinja")
|
||||
notary_server_pg_template = os.path.join(notary_template_dir, "server-config.postgres.json.jinja")
|
||||
notary_server_nginx_config_template = os.path.join(templates_dir, "nginx", "notary.server.conf.jinja")
|
||||
notary_signer_env_template = os.path.join(notary_template_dir, "signer_env.jinja")
|
||||
notary_server_env_template = os.path.join(notary_template_dir, "server_env.jinja")
|
||||
|
||||
notary_config_dir = os.path.join(config_dir, 'notary')
|
||||
notary_signer_pg_config = os.path.join(notary_config_dir, "signer-config.postgres.json")
|
||||
notary_server_pg_config = os.path.join(notary_config_dir, "server-config.postgres.json")
|
||||
notary_server_config_path = os.path.join(notary_config_dir, 'notary.server.conf')
|
||||
notary_signer_env_path = os.path.join(notary_config_dir, "signer_env")
|
||||
notary_server_env_path = os.path.join(notary_config_dir, "server_env")
|
||||
|
||||
|
||||
def prepare_env_notary(customize_crt, nginx_config_dir):
|
||||
notary_config_dir = prepare_config_dir(config_dir, "notary")
|
||||
old_signer_cert_secret_path = pathlib.Path(os.path.join(config_dir, 'notary-signer.crt'))
|
||||
old_signer_key_secret_path = pathlib.Path(os.path.join(config_dir, 'notary-signer.key'))
|
||||
old_signer_ca_cert_secret_path = pathlib.Path(os.path.join(config_dir, 'notary-signer-ca.crt'))
|
||||
|
||||
notary_secret_dir = prepare_config_dir('/secret/notary')
|
||||
signer_cert_secret_path = pathlib.Path(os.path.join(notary_secret_dir, 'notary-signer.crt'))
|
||||
signer_key_secret_path = pathlib.Path(os.path.join(notary_secret_dir, 'notary-signer.key'))
|
||||
signer_ca_cert_secret_path = pathlib.Path(os.path.join(notary_secret_dir, 'notary-signer-ca.crt'))
|
||||
|
||||
# In version 1.8 the secret path changed
|
||||
# If cert, key , ca all are exist in new place don't do anything
|
||||
if not(
|
||||
signer_cert_secret_path.exists() and
|
||||
signer_key_secret_path.exists() and
|
||||
signer_ca_cert_secret_path.exists()
|
||||
):
|
||||
# If the certs are exist in old place, move it to new place
|
||||
if old_signer_ca_cert_secret_path.exists() and old_signer_cert_secret_path.exists() and old_signer_key_secret_path.exists():
|
||||
print("Copying certs for notary signer")
|
||||
shutil.copy2(old_signer_ca_cert_secret_path, signer_ca_cert_secret_path)
|
||||
shutil.copy2(old_signer_key_secret_path, signer_key_secret_path)
|
||||
shutil.copy2(old_signer_cert_secret_path, signer_cert_secret_path)
|
||||
# If certs neither exist in new place nor in the old place, create it and move it to new place
|
||||
elif openssl_installed():
|
||||
try:
|
||||
temp_cert_dir = os.path.join('/tmp', "cert_tmp")
|
||||
if not os.path.exists(temp_cert_dir):
|
||||
os.makedirs(temp_cert_dir)
|
||||
ca_subj = "/C=US/ST=California/L=Palo Alto/O=GoHarbor/OU=Harbor/CN=Self-signed by GoHarbor"
|
||||
cert_subj = "/C=US/ST=California/L=Palo Alto/O=GoHarbor/OU=Harbor/CN=notarysigner"
|
||||
signer_ca_cert = os.path.join(temp_cert_dir, "notary-signer-ca.crt")
|
||||
signer_ca_key = os.path.join(temp_cert_dir, "notary-signer-ca.key")
|
||||
signer_cert_path = os.path.join(temp_cert_dir, "notary-signer.crt")
|
||||
signer_key_path = os.path.join(temp_cert_dir, "notary-signer.key")
|
||||
create_root_cert(ca_subj, key_path=signer_ca_key, cert_path=signer_ca_cert)
|
||||
create_cert(cert_subj, signer_ca_key, signer_ca_cert, key_path=signer_key_path, cert_path=signer_cert_path)
|
||||
print("Copying certs for notary signer")
|
||||
shutil.copy2(signer_cert_path, signer_cert_secret_path)
|
||||
shutil.copy2(signer_key_path, signer_key_secret_path)
|
||||
shutil.copy2(signer_ca_cert, signer_ca_cert_secret_path)
|
||||
finally:
|
||||
srl_tmp = os.path.join(os.getcwd(), ".srl")
|
||||
if os.path.isfile(srl_tmp):
|
||||
os.remove(srl_tmp)
|
||||
if os.path.isdir(temp_cert_dir):
|
||||
shutil.rmtree(temp_cert_dir, True)
|
||||
else:
|
||||
raise(Exception("No certs for notary"))
|
||||
|
||||
# copy server_env to notary config
|
||||
shutil.copy2(
|
||||
os.path.join(notary_template_dir, "server_env.jinja"),
|
||||
os.path.join(notary_config_dir, "server_env"))
|
||||
|
||||
print("Copying nginx configuration file for notary")
|
||||
notary_nginx_upstream_template_conf = os.path.join(templates_dir, "nginx", "notary.upstream.conf.jinja")
|
||||
notary_server_nginx_config = os.path.join(nginx_config_dir, "notary.server.conf")
|
||||
shutil.copy2(notary_nginx_upstream_template_conf, notary_server_nginx_config)
|
||||
|
||||
mark_file(os.path.join(notary_secret_dir, "notary-signer.crt"))
|
||||
mark_file(os.path.join(notary_secret_dir, "notary-signer.key"))
|
||||
mark_file(os.path.join(notary_secret_dir, "notary-signer-ca.crt"))
|
||||
|
||||
# print("Copying sql file for notary DB")
|
||||
# if os.path.exists(os.path.join(notary_config_dir, "postgresql-initdb.d")):
|
||||
# shutil.rmtree(os.path.join(notary_config_dir, "postgresql-initdb.d"))
|
||||
# shutil.copytree(os.path.join(notary_temp_dir, "postgresql-initdb.d"), os.path.join(notary_config_dir, "postgresql-initdb.d"))
|
||||
|
||||
|
||||
def prepare_notary(config_dict, nginx_config_dir, ssl_cert_path, ssl_cert_key_path):
|
||||
|
||||
prepare_env_notary(config_dict['customize_crt'], nginx_config_dir)
|
||||
|
||||
render_jinja(
|
||||
notary_signer_pg_template,
|
||||
notary_signer_pg_config,
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID
|
||||
)
|
||||
|
||||
render_jinja(
|
||||
notary_server_pg_template,
|
||||
notary_server_pg_config,
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID,
|
||||
token_endpoint=config_dict['public_url'])
|
||||
|
||||
render_jinja(
|
||||
notary_server_nginx_config_template,
|
||||
os.path.join(nginx_config_dir, "notary.server.conf"),
|
||||
ssl_cert=ssl_cert_path,
|
||||
ssl_cert_key=ssl_cert_key_path)
|
||||
|
||||
default_alias = get_alias(secret_key_dir)
|
||||
render_jinja(
|
||||
notary_signer_env_template,
|
||||
notary_signer_env_path,
|
||||
alias=default_alias)
|
||||
|
||||
render_jinja(
|
||||
notary_server_env_template,
|
||||
notary_server_env_path
|
||||
)
|
0
make/photon/prepare/utils/proxy.py
Normal file
0
make/photon/prepare/utils/proxy.py
Normal file
51
make/photon/prepare/utils/registry.py
Normal file
51
make/photon/prepare/utils/registry.py
Normal file
@ -0,0 +1,51 @@
|
||||
import os, shutil
|
||||
|
||||
from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID
|
||||
from utils.misc import prepare_config_dir
|
||||
from utils.jinja import render_jinja
|
||||
|
||||
|
||||
registry_config_dir = os.path.join(config_dir, "registry")
|
||||
registry_config_template_path = os.path.join(templates_dir, "registry", "config.yml.jinja")
|
||||
registry_conf = os.path.join(config_dir, "registry", "config.yml")
|
||||
|
||||
|
||||
def prepare_registry(config_dict):
|
||||
prepare_registry_config_dir()
|
||||
|
||||
storage_provider_info = get_storage_provider_info(
|
||||
config_dict['storage_provider_name'],
|
||||
config_dict['storage_provider_config'],
|
||||
registry_config_dir)
|
||||
|
||||
render_jinja(
|
||||
registry_config_template_path,
|
||||
registry_conf,
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID,
|
||||
storage_provider_info=storage_provider_info,
|
||||
**config_dict)
|
||||
|
||||
def prepare_registry_config_dir():
|
||||
prepare_config_dir(registry_config_dir)
|
||||
|
||||
def get_storage_provider_info(provider_name, provider_config, registry_config_dir_path):
|
||||
if provider_name == "filesystem":
|
||||
if not provider_config:
|
||||
storage_provider_config = "rootdirectory: /storage"
|
||||
elif "rootdirectory:" not in storage_provider_config:
|
||||
storage_provider_config = "rootdirectory: /storage" + "," + storage_provider_config
|
||||
# generate storage configuration section in yaml format
|
||||
storage_provider_conf_list = [provider_name + ':']
|
||||
for c in storage_provider_config.split(","):
|
||||
kvs = c.split(": ")
|
||||
if len(kvs) == 2:
|
||||
if kvs[0].strip() == "keyfile":
|
||||
srcKeyFile = kvs[1].strip()
|
||||
if os.path.isfile(srcKeyFile):
|
||||
shutil.copyfile(srcKeyFile, os.path.join(registry_config_dir_path, "gcs.key"))
|
||||
storage_provider_conf_list.append("keyfile: %s" % "/etc/registry/gcs.key")
|
||||
continue
|
||||
storage_provider_conf_list.append(c.strip())
|
||||
storage_provider_info = ('\n' + ' ' * 4).join(storage_provider_conf_list)
|
||||
return storage_provider_info
|
30
make/photon/prepare/utils/registry_ctl.py
Normal file
30
make/photon/prepare/utils/registry_ctl.py
Normal file
@ -0,0 +1,30 @@
|
||||
import os, shutil
|
||||
|
||||
from g import config_dir, templates_dir
|
||||
from utils.misc import prepare_config_dir
|
||||
from utils.jinja import render_jinja
|
||||
|
||||
registryctl_config_dir = os.path.join(config_dir, "registryctl")
|
||||
registryctl_config_template_path = os.path.join(templates_dir, "registryctl", "config.yml.jinja")
|
||||
registryctl_conf = os.path.join(config_dir, "registryctl", "config.yml")
|
||||
registryctl_env_template_path = os.path.join(templates_dir, "registryctl", "env.jinja")
|
||||
registryctl_conf_env = os.path.join(config_dir, "registryctl", "env")
|
||||
|
||||
def prepare_registry_ctl(config_dict):
|
||||
# prepare dir
|
||||
prepare_registry_ctl_config_dir()
|
||||
|
||||
# Render Registryctl
|
||||
render_jinja(
|
||||
registryctl_env_template_path,
|
||||
registryctl_conf_env,
|
||||
**config_dict)
|
||||
|
||||
# Copy Registryctl config
|
||||
copy_registry_ctl_conf(registryctl_config_template_path, registryctl_conf)
|
||||
|
||||
def prepare_registry_ctl_config_dir():
|
||||
prepare_config_dir(registryctl_config_dir)
|
||||
|
||||
def copy_registry_ctl_conf(src, dst):
|
||||
shutil.copyfile(src, dst)
|
11
make/photon/prepare/utils/uaa.py
Normal file
11
make/photon/prepare/utils/uaa.py
Normal file
@ -0,0 +1,11 @@
|
||||
import os, shutil
|
||||
|
||||
def prepare_uaa_cert_file(uaa_ca_cert, core_cert_dir):
|
||||
if os.path.isfile(uaa_ca_cert):
|
||||
if not os.path.isdir(core_cert_dir):
|
||||
os.makedirs(core_cert_dir)
|
||||
core_uaa_ca = os.path.join(core_cert_dir, "uaa_ca.pem")
|
||||
print("Copying UAA CA cert to %s" % core_uaa_ca)
|
||||
shutil.copyfile(uaa_ca_cert, core_uaa_ca)
|
||||
else:
|
||||
print("Can not find UAA CA cert: %s, skip" % uaa_ca_cert)
|
801
make/prepare
801
make/prepare
@ -1,750 +1,51 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import print_function, unicode_literals # We require Python 2.6 or later
|
||||
from string import Template
|
||||
import random
|
||||
import os
|
||||
from fnmatch import fnmatch
|
||||
import sys
|
||||
import string
|
||||
import argparse
|
||||
import subprocess
|
||||
import shutil
|
||||
from io import open
|
||||
|
||||
if sys.version_info[:3][0] == 2:
|
||||
import ConfigParser as ConfigParser
|
||||
import StringIO as StringIO
|
||||
|
||||
if sys.version_info[:3][0] == 3:
|
||||
import configparser as ConfigParser
|
||||
import io as StringIO
|
||||
|
||||
DATA_VOL = "/data"
|
||||
DEFAULT_UID = 10000
|
||||
DEFAULT_GID = 10000
|
||||
|
||||
base_dir = os.path.dirname(__file__)
|
||||
config_dir = os.path.join(base_dir, "common/config")
|
||||
templates_dir = os.path.join(base_dir, "common/templates")
|
||||
|
||||
custom_nginx_location_file_pattern = 'harbor.https.*.conf'
|
||||
|
||||
def validate(conf, args):
|
||||
|
||||
protocol = rcp.get("configuration", "ui_url_protocol")
|
||||
if protocol != "https" and args.notary_mode:
|
||||
raise Exception("Error: the protocol must be https when Harbor is deployed with Notary")
|
||||
if protocol == "https":
|
||||
if not rcp.has_option("configuration", "ssl_cert"):
|
||||
raise Exception("Error: The protocol is https but attribute ssl_cert is not set")
|
||||
cert_path = rcp.get("configuration", "ssl_cert")
|
||||
if not os.path.isfile(cert_path):
|
||||
raise Exception("Error: The path for certificate: %s is invalid" % cert_path)
|
||||
if not rcp.has_option("configuration", "ssl_cert_key"):
|
||||
raise Exception("Error: The protocol is https but attribute ssl_cert_key is not set")
|
||||
cert_key_path = rcp.get("configuration", "ssl_cert_key")
|
||||
if not os.path.isfile(cert_key_path):
|
||||
raise Exception("Error: The path for certificate key: %s is invalid" % cert_key_path)
|
||||
project_creation = rcp.get("configuration", "project_creation_restriction")
|
||||
|
||||
if project_creation != "everyone" and project_creation != "adminonly":
|
||||
raise Exception("Error invalid value for project_creation_restriction: %s" % project_creation)
|
||||
|
||||
valid_storage_drivers = ["filesystem", "azure", "gcs", "s3", "swift", "oss"]
|
||||
storage_provider_name = rcp.get("configuration", "registry_storage_provider_name").strip()
|
||||
if storage_provider_name not in valid_storage_drivers:
|
||||
raise Exception("Error: storage driver %s is not supported, only the following ones are supported: %s" % (storage_provider_name, ",".join(valid_storage_drivers)))
|
||||
|
||||
storage_provider_config = rcp.get("configuration", "registry_storage_provider_config").strip()
|
||||
if storage_provider_name != "filesystem":
|
||||
if storage_provider_config == "":
|
||||
raise Exception("Error: no provider configurations are provided for provider %s" % storage_provider_name)
|
||||
|
||||
redis_host = rcp.get("configuration", "redis_host")
|
||||
if redis_host is None or len(redis_host) < 1:
|
||||
raise Exception("Error: redis_host in harbor.cfg needs to point to an endpoint of Redis server or cluster.")
|
||||
|
||||
redis_port = rcp.get("configuration", "redis_port")
|
||||
if len(redis_port) < 1:
|
||||
raise Exception("Error: redis_port in harbor.cfg needs to point to the port of Redis server or cluster.")
|
||||
|
||||
redis_db_index = rcp.get("configuration", "redis_db_index").strip()
|
||||
if len(redis_db_index.split(",")) != 3:
|
||||
raise Exception("Error invalid value for redis_db_index: %s. please set it as 1,2,3" % redis_db_index)
|
||||
|
||||
#To meet security requirement
|
||||
#By default it will change file mode to 0600, and make the owner of the file to 10000:10000
|
||||
def mark_file(path, mode=0o600, uid=DEFAULT_UID, gid=DEFAULT_GID):
|
||||
if mode > 0:
|
||||
os.chmod(path, mode)
|
||||
if uid > 0 and gid > 0:
|
||||
os.chown(path, uid, gid)
|
||||
|
||||
def get_secret_key(path):
|
||||
secret_key = _get_secret(path, "secretkey")
|
||||
if len(secret_key) != 16:
|
||||
raise Exception("secret key's length has to be 16 chars, current length: %d" % len(secret_key))
|
||||
return secret_key
|
||||
|
||||
def get_alias(path):
|
||||
alias = _get_secret(path, "defaultalias", length=8)
|
||||
return alias
|
||||
|
||||
def _get_secret(folder, filename, length=16):
|
||||
key_file = os.path.join(folder, filename)
|
||||
if os.path.isfile(key_file):
|
||||
with open(key_file, 'r') as f:
|
||||
key = f.read()
|
||||
print("loaded secret from file: %s" % key_file)
|
||||
mark_file(key_file)
|
||||
return key
|
||||
if not os.path.isdir(folder):
|
||||
os.makedirs(folder)
|
||||
key = ''.join(random.choice(string.ascii_letters+string.digits) for i in range(length))
|
||||
with open(key_file, 'w') as f:
|
||||
f.write(key)
|
||||
print("Generated and saved secret to file: %s" % key_file)
|
||||
mark_file(key_file)
|
||||
return key
|
||||
|
||||
def prep_conf_dir(root, *name):
|
||||
absolute_path = os.path.join(root, *name)
|
||||
if not os.path.exists(absolute_path):
|
||||
os.makedirs(absolute_path)
|
||||
return absolute_path
|
||||
|
||||
def render(src, dest, mode=0o640, uid=0, gid=0, **kw):
|
||||
t = Template(open(src, 'r').read())
|
||||
with open(dest, 'w') as f:
|
||||
f.write(t.substitute(**kw))
|
||||
mark_file(dest, mode, uid, gid)
|
||||
print("Generated configuration file: %s" % dest)
|
||||
|
||||
def delfile(src):
|
||||
if os.path.isfile(src):
|
||||
try:
|
||||
os.remove(src)
|
||||
print("Clearing the configuration file: %s" % src)
|
||||
except:
|
||||
pass
|
||||
elif os.path.isdir(src):
|
||||
for item in os.listdir(src):
|
||||
itemsrc=os.path.join(src,item)
|
||||
delfile(itemsrc)
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--conf', dest='cfgfile', default=base_dir+'/harbor.cfg',type=str,help="the path of Harbor configuration file")
|
||||
parser.add_argument('--with-notary', dest='notary_mode', default=False, action='store_true', help="the Harbor instance is to be deployed with notary")
|
||||
parser.add_argument('--with-clair', dest='clair_mode', default=False, action='store_true', help="the Harbor instance is to be deployed with clair")
|
||||
parser.add_argument('--with-chartmuseum', dest='chart_mode', default=False, action='store_true', help="the Harbor instance is to be deployed with chart repository supporting")
|
||||
args = parser.parse_args()
|
||||
|
||||
delfile(config_dir)
|
||||
#Read configurations
|
||||
conf = StringIO.StringIO()
|
||||
conf.write("[configuration]\n")
|
||||
conf.write(open(args.cfgfile).read())
|
||||
conf.seek(0, os.SEEK_SET)
|
||||
rcp = ConfigParser.RawConfigParser()
|
||||
rcp.readfp(conf)
|
||||
validate(rcp, args)
|
||||
|
||||
reload_config = rcp.get("configuration", "reload_config") if rcp.has_option(
|
||||
"configuration", "reload_config") else "false"
|
||||
hostname = rcp.get("configuration", "hostname")
|
||||
protocol = rcp.get("configuration", "ui_url_protocol")
|
||||
public_url = protocol + "://" + hostname
|
||||
email_identity = rcp.get("configuration", "email_identity")
|
||||
email_host = rcp.get("configuration", "email_server")
|
||||
email_port = rcp.get("configuration", "email_server_port")
|
||||
email_usr = rcp.get("configuration", "email_username")
|
||||
email_pwd = rcp.get("configuration", "email_password")
|
||||
email_from = rcp.get("configuration", "email_from")
|
||||
email_ssl = rcp.get("configuration", "email_ssl")
|
||||
email_insecure = rcp.get("configuration", "email_insecure")
|
||||
harbor_admin_password = rcp.get("configuration", "harbor_admin_password")
|
||||
auth_mode = rcp.get("configuration", "auth_mode")
|
||||
ldap_url = rcp.get("configuration", "ldap_url")
|
||||
# this two options are either both set or unset
|
||||
if rcp.has_option("configuration", "ldap_searchdn"):
|
||||
ldap_searchdn = rcp.get("configuration", "ldap_searchdn")
|
||||
ldap_search_pwd = rcp.get("configuration", "ldap_search_pwd")
|
||||
else:
|
||||
ldap_searchdn = ""
|
||||
ldap_search_pwd = ""
|
||||
ldap_basedn = rcp.get("configuration", "ldap_basedn")
|
||||
# ldap_filter is null by default
|
||||
if rcp.has_option("configuration", "ldap_filter"):
|
||||
ldap_filter = rcp.get("configuration", "ldap_filter")
|
||||
else:
|
||||
ldap_filter = ""
|
||||
ldap_uid = rcp.get("configuration", "ldap_uid")
|
||||
ldap_scope = rcp.get("configuration", "ldap_scope")
|
||||
ldap_timeout = rcp.get("configuration", "ldap_timeout")
|
||||
ldap_verify_cert = rcp.get("configuration", "ldap_verify_cert")
|
||||
ldap_group_basedn = rcp.get("configuration", "ldap_group_basedn")
|
||||
ldap_group_filter = rcp.get("configuration", "ldap_group_filter")
|
||||
ldap_group_gid = rcp.get("configuration", "ldap_group_gid")
|
||||
ldap_group_scope = rcp.get("configuration", "ldap_group_scope")
|
||||
db_password = rcp.get("configuration", "db_password")
|
||||
db_host = rcp.get("configuration", "db_host")
|
||||
db_user = rcp.get("configuration", "db_user")
|
||||
db_port = rcp.get("configuration", "db_port")
|
||||
self_registration = rcp.get("configuration", "self_registration")
|
||||
if protocol == "https":
|
||||
cert_path = rcp.get("configuration", "ssl_cert")
|
||||
cert_key_path = rcp.get("configuration", "ssl_cert_key")
|
||||
customize_crt = rcp.get("configuration", "customize_crt")
|
||||
max_job_workers = rcp.get("configuration", "max_job_workers")
|
||||
token_expiration = rcp.get("configuration", "token_expiration")
|
||||
proj_cre_restriction = rcp.get("configuration", "project_creation_restriction")
|
||||
secretkey_path = rcp.get("configuration", "secretkey_path")
|
||||
if rcp.has_option("configuration", "admiral_url"):
|
||||
admiral_url = rcp.get("configuration", "admiral_url")
|
||||
else:
|
||||
admiral_url = ""
|
||||
clair_db_password = rcp.get("configuration", "clair_db_password")
|
||||
clair_db_host = rcp.get("configuration", "clair_db_host")
|
||||
clair_db_port = rcp.get("configuration", "clair_db_port")
|
||||
clair_db_username = rcp.get("configuration", "clair_db_username")
|
||||
clair_db = rcp.get("configuration", "clair_db")
|
||||
clair_updaters_interval = rcp.get("configuration", "clair_updaters_interval")
|
||||
|
||||
uaa_endpoint = rcp.get("configuration", "uaa_endpoint")
|
||||
uaa_clientid = rcp.get("configuration", "uaa_clientid")
|
||||
uaa_clientsecret = rcp.get("configuration", "uaa_clientsecret")
|
||||
uaa_verify_cert = rcp.get("configuration", "uaa_verify_cert")
|
||||
uaa_ca_cert = rcp.get("configuration", "uaa_ca_cert")
|
||||
|
||||
secret_key = get_secret_key(secretkey_path)
|
||||
log_rotate_count = rcp.get("configuration", "log_rotate_count")
|
||||
log_rotate_size = rcp.get("configuration", "log_rotate_size")
|
||||
|
||||
redis_host = rcp.get("configuration", "redis_host")
|
||||
redis_port = rcp.get("configuration", "redis_port")
|
||||
redis_password = rcp.get("configuration", "redis_password")
|
||||
redis_db_index = rcp.get("configuration", "redis_db_index")
|
||||
|
||||
db_indexs = redis_db_index.split(',')
|
||||
redis_db_index_reg = db_indexs[0]
|
||||
redis_db_index_js = db_indexs[1]
|
||||
redis_db_index_chart = db_indexs[2]
|
||||
|
||||
#redis://[arbitrary_username:password@]ipaddress:port/database_index
|
||||
redis_url_js = ''
|
||||
redis_url_reg = ''
|
||||
if len(redis_password) > 0:
|
||||
redis_url_js = "redis://anonymous:%s@%s:%s/%s" % (redis_password, redis_host, redis_port, redis_db_index_js)
|
||||
redis_url_reg = "redis://anonymous:%s@%s:%s/%s" % (redis_password, redis_host, redis_port, redis_db_index_reg)
|
||||
else:
|
||||
redis_url_js = "redis://%s:%s/%s" % (redis_host, redis_port, redis_db_index_js)
|
||||
redis_url_reg = "redis://%s:%s/%s" % (redis_host, redis_port, redis_db_index_reg)
|
||||
|
||||
if rcp.has_option("configuration", "skip_reload_env_pattern"):
|
||||
skip_reload_env_pattern = rcp.get("configuration", "skip_reload_env_pattern")
|
||||
else:
|
||||
skip_reload_env_pattern = "$^"
|
||||
storage_provider_name = rcp.get("configuration", "registry_storage_provider_name").strip()
|
||||
storage_provider_config = rcp.get("configuration", "registry_storage_provider_config").strip()
|
||||
# yaml requires 1 or more spaces between the key and value
|
||||
storage_provider_config = storage_provider_config.replace(":", ": ", 1)
|
||||
registry_custom_ca_bundle_path = rcp.get("configuration", "registry_custom_ca_bundle").strip()
|
||||
core_secret = ''.join(random.choice(string.ascii_letters+string.digits) for i in range(16))
|
||||
jobservice_secret = ''.join(random.choice(string.ascii_letters+string.digits) for i in range(16))
|
||||
|
||||
core_config_dir = prep_conf_dir(config_dir,"core")
|
||||
core_certificates_dir = prep_conf_dir(core_config_dir,"certificates")
|
||||
db_config_dir = prep_conf_dir(config_dir, "db")
|
||||
job_config_dir = prep_conf_dir(config_dir, "jobservice")
|
||||
registry_config_dir = prep_conf_dir(config_dir, "registry")
|
||||
registryctl_config_dir = prep_conf_dir(config_dir, "registryctl")
|
||||
nginx_config_dir = prep_conf_dir (config_dir, "nginx")
|
||||
nginx_conf_d = prep_conf_dir(nginx_config_dir, "conf.d")
|
||||
log_config_dir = prep_conf_dir (config_dir, "log")
|
||||
|
||||
conf_env = os.path.join(config_dir, "core", "config_env")
|
||||
core_conf_env = os.path.join(config_dir, "core", "env")
|
||||
core_conf = os.path.join(config_dir, "core", "app.conf")
|
||||
core_cert_dir = os.path.join(config_dir, "core", "certificates")
|
||||
jobservice_conf = os.path.join(config_dir, "jobservice", "config.yml")
|
||||
registry_conf = os.path.join(config_dir, "registry", "config.yml")
|
||||
registryctl_conf_env = os.path.join(config_dir, "registryctl", "env")
|
||||
registryctl_conf_yml = os.path.join(config_dir, "registryctl", "config.yml")
|
||||
db_conf_env = os.path.join(config_dir, "db", "env")
|
||||
job_conf_env = os.path.join(config_dir, "jobservice", "env")
|
||||
nginx_conf = os.path.join(config_dir, "nginx", "nginx.conf")
|
||||
cert_dir = os.path.join(config_dir, "nginx", "cert")
|
||||
log_rotate_config = os.path.join(config_dir, "log", "logrotate.conf")
|
||||
registry_url = "http://registry:5000"
|
||||
registry_controller_url = "http://registryctl:8080"
|
||||
core_url = "http://core:8080"
|
||||
token_service_url = "http://core:8080/service/token"
|
||||
|
||||
jobservice_url = "http://jobservice:8080"
|
||||
clair_url = "http://clair:6060"
|
||||
notary_url = "http://notary-server:4443"
|
||||
chart_repository_url = "http://chartmuseum:9999"
|
||||
|
||||
if len(admiral_url) != 0 and admiral_url != "NA":
|
||||
#VIC overwrites the data volume path, which by default should be same as the value of secretkey_path
|
||||
DATA_VOL = secretkey_path
|
||||
JOB_LOG_DIR = os.path.join(DATA_VOL, "job_logs")
|
||||
if not os.path.exists(JOB_LOG_DIR):
|
||||
os.makedirs(JOB_LOG_DIR)
|
||||
mark_file(JOB_LOG_DIR, mode=0o755)
|
||||
|
||||
if protocol == "https":
|
||||
target_cert_path = os.path.join(cert_dir, os.path.basename(cert_path))
|
||||
if not os.path.exists(cert_dir):
|
||||
os.makedirs(cert_dir)
|
||||
shutil.copy2(cert_path,target_cert_path)
|
||||
target_cert_key_path = os.path.join(cert_dir, os.path.basename(cert_key_path))
|
||||
shutil.copy2(cert_key_path,target_cert_key_path)
|
||||
render(os.path.join(templates_dir, "nginx", "nginx.https.conf"),
|
||||
nginx_conf,
|
||||
ssl_cert = os.path.join("/etc/nginx/cert", os.path.basename(target_cert_path)),
|
||||
ssl_cert_key = os.path.join("/etc/nginx/cert", os.path.basename(target_cert_key_path)))
|
||||
else:
|
||||
render(os.path.join(templates_dir, "nginx", "nginx.http.conf"), nginx_conf)
|
||||
custom_nginx_location_file_pattern = 'harbor.http.*.conf'
|
||||
|
||||
def add_additional_location_config(src, dst):
|
||||
"""
|
||||
This conf file is used for user that wanna add additional customized locations to harbor proxy
|
||||
:params src: source of the file
|
||||
:params dst: destination file path
|
||||
"""
|
||||
if not os.path.isfile(src):
|
||||
return
|
||||
print("Copying nginx configuration file {src} to {dst}".format(src=src, dst=dst))
|
||||
shutil.copy2(src, dst)
|
||||
mark_file(dst)
|
||||
|
||||
nginx_template_ext_dir = os.path.join(templates_dir, 'nginx', 'ext')
|
||||
if os.path.exists(nginx_template_ext_dir):
|
||||
map(lambda filename: add_additional_location_config(
|
||||
os.path.join(nginx_template_ext_dir, filename),
|
||||
os.path.join(nginx_conf_d, filename)),
|
||||
[fname for fname in os.listdir(nginx_template_ext_dir) if fnmatch(fname, custom_nginx_location_file_pattern)])
|
||||
|
||||
#Use reload_key to avoid reload config after restart harbor
|
||||
reload_key = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) if reload_config == "true" else ""
|
||||
|
||||
ldap_group_admin_dn = rcp.get("configuration", "ldap_group_admin_dn") if rcp.has_option("configuration", "ldap_group_admin_dn") else ""
|
||||
|
||||
render(os.path.join(templates_dir, "core", "config_env"),
|
||||
conf_env,
|
||||
reload_config=reload_config,
|
||||
public_url=public_url,
|
||||
core_url=core_url,
|
||||
auth_mode=auth_mode,
|
||||
self_registration=self_registration,
|
||||
ldap_url=ldap_url,
|
||||
ldap_searchdn =ldap_searchdn,
|
||||
ldap_search_pwd =ldap_search_pwd,
|
||||
ldap_basedn=ldap_basedn,
|
||||
ldap_filter=ldap_filter,
|
||||
ldap_uid=ldap_uid,
|
||||
ldap_scope=ldap_scope,
|
||||
ldap_verify_cert=ldap_verify_cert,
|
||||
ldap_timeout=ldap_timeout,
|
||||
ldap_group_basedn=ldap_group_basedn,
|
||||
ldap_group_filter=ldap_group_filter,
|
||||
ldap_group_gid=ldap_group_gid,
|
||||
ldap_group_scope=ldap_group_scope,
|
||||
ldap_group_admin_dn=ldap_group_admin_dn,
|
||||
db_password=db_password,
|
||||
db_host=db_host,
|
||||
db_user=db_user,
|
||||
db_port=db_port,
|
||||
email_host=email_host,
|
||||
email_port=email_port,
|
||||
email_usr=email_usr,
|
||||
email_pwd=email_pwd,
|
||||
email_ssl=email_ssl,
|
||||
email_insecure=email_insecure,
|
||||
email_from=email_from,
|
||||
email_identity=email_identity,
|
||||
harbor_admin_password=harbor_admin_password,
|
||||
project_creation_restriction=proj_cre_restriction,
|
||||
max_job_workers=max_job_workers,
|
||||
core_secret=core_secret,
|
||||
jobservice_secret=jobservice_secret,
|
||||
token_expiration=token_expiration,
|
||||
admiral_url=admiral_url,
|
||||
with_notary=args.notary_mode,
|
||||
with_clair=args.clair_mode,
|
||||
clair_db_password=clair_db_password,
|
||||
clair_db_host=clair_db_host,
|
||||
clair_db_port=clair_db_port,
|
||||
clair_db_username=clair_db_username,
|
||||
clair_db=clair_db,
|
||||
uaa_endpoint=uaa_endpoint,
|
||||
uaa_clientid=uaa_clientid,
|
||||
uaa_clientsecret=uaa_clientsecret,
|
||||
uaa_verify_cert=uaa_verify_cert,
|
||||
storage_provider_name=storage_provider_name,
|
||||
registry_url=registry_url,
|
||||
token_service_url=token_service_url,
|
||||
jobservice_url=jobservice_url,
|
||||
clair_url=clair_url,
|
||||
notary_url=notary_url,
|
||||
reload_key=reload_key,
|
||||
skip_reload_env_pattern=skip_reload_env_pattern,
|
||||
chart_repository_url=chart_repository_url,
|
||||
registry_controller_url = registry_controller_url,
|
||||
with_chartmuseum=args.chart_mode
|
||||
)
|
||||
|
||||
# set cache for chart repo server
|
||||
# default set 'memory' mode, if redis is configured then set to 'redis'
|
||||
chart_cache_driver = "memory"
|
||||
if len(redis_host) > 0:
|
||||
chart_cache_driver = "redis"
|
||||
|
||||
render(os.path.join(templates_dir, "core", "env"),
|
||||
core_conf_env,
|
||||
core_secret=core_secret,
|
||||
jobservice_secret=jobservice_secret,
|
||||
redis_host=redis_host,
|
||||
redis_port=redis_port,
|
||||
redis_password=redis_password,
|
||||
chart_cache_driver = chart_cache_driver,
|
||||
redis_url_reg = redis_url_reg)
|
||||
|
||||
registry_config_file = "config.yml"
|
||||
if storage_provider_name == "filesystem":
|
||||
if not storage_provider_config:
|
||||
storage_provider_config = "rootdirectory: /storage"
|
||||
elif "rootdirectory:" not in storage_provider_config:
|
||||
storage_provider_config = "rootdirectory: /storage" + "," + storage_provider_config
|
||||
# generate storage configuration section in yaml format
|
||||
storage_provider_conf_list = [storage_provider_name + ':']
|
||||
for c in storage_provider_config.split(","):
|
||||
kvs = c.split(": ")
|
||||
if len(kvs) == 2:
|
||||
if kvs[0].strip() == "keyfile":
|
||||
srcKeyFile = kvs[1].strip()
|
||||
if os.path.isfile(srcKeyFile):
|
||||
shutil.copyfile(srcKeyFile, os.path.join(registry_config_dir, "gcs.key"))
|
||||
storage_provider_conf_list.append("keyfile: %s" % "/etc/registry/gcs.key")
|
||||
continue
|
||||
storage_provider_conf_list.append(c.strip())
|
||||
storage_provider_info = ('\n' + ' ' * 4).join(storage_provider_conf_list)
|
||||
render(os.path.join(templates_dir, "registry", registry_config_file),
|
||||
registry_conf,
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID,
|
||||
storage_provider_info=storage_provider_info,
|
||||
public_url=public_url,
|
||||
core_url=core_url,
|
||||
redis_host=redis_host,
|
||||
redis_port=redis_port,
|
||||
redis_password=redis_password,
|
||||
redis_db_index_reg=redis_db_index_reg)
|
||||
|
||||
render(os.path.join(templates_dir, "db", "env"),
|
||||
db_conf_env,
|
||||
db_password=db_password)
|
||||
|
||||
render(os.path.join(templates_dir, "jobservice", "env"),
|
||||
job_conf_env,
|
||||
core_secret=core_secret,
|
||||
jobservice_secret=jobservice_secret,
|
||||
core_url=core_url)
|
||||
|
||||
render(os.path.join(templates_dir, "jobservice", "config.yml"),
|
||||
jobservice_conf,
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID,
|
||||
max_job_workers=max_job_workers,
|
||||
redis_url=redis_url_js)
|
||||
|
||||
render(os.path.join(templates_dir, "log", "logrotate.conf"),
|
||||
log_rotate_config,
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID,
|
||||
log_rotate_count=log_rotate_count,
|
||||
log_rotate_size=log_rotate_size)
|
||||
|
||||
render(os.path.join(templates_dir, "registryctl", "env"),
|
||||
registryctl_conf_env,
|
||||
jobservice_secret=jobservice_secret,
|
||||
core_secret=core_secret)
|
||||
|
||||
shutil.copyfile(os.path.join(templates_dir, "core", "app.conf"), core_conf)
|
||||
shutil.copyfile(os.path.join(templates_dir, "registryctl", "config.yml"), registryctl_conf_yml)
|
||||
print("Generated configuration file: %s" % core_conf)
|
||||
|
||||
if auth_mode == "uaa_auth":
|
||||
if os.path.isfile(uaa_ca_cert):
|
||||
if not os.path.isdir(core_cert_dir):
|
||||
os.makedirs(core_cert_dir)
|
||||
core_uaa_ca = os.path.join(core_cert_dir, "uaa_ca.pem")
|
||||
print("Copying UAA CA cert to %s" % core_uaa_ca)
|
||||
shutil.copyfile(uaa_ca_cert, core_uaa_ca)
|
||||
else:
|
||||
print("Can not find UAA CA cert: %s, skip" % uaa_ca_cert)
|
||||
|
||||
|
||||
def validate_crt_subj(dirty_subj):
|
||||
subj_list = [item for item in dirty_subj.strip().split("/") \
|
||||
if len(item.split("=")) == 2 and len(item.split("=")[1]) > 0]
|
||||
return "/" + "/".join(subj_list)
|
||||
|
||||
FNULL = open(os.devnull, 'w')
|
||||
|
||||
from functools import wraps
|
||||
def stat_decorator(func):
|
||||
@wraps(func)
|
||||
def check_wrapper(*args, **kw):
|
||||
stat = func(*args, **kw)
|
||||
message = "Generated certificate, key file: %s, cert file: %s" % (kw['key_path'], kw['cert_path']) \
|
||||
if stat == 0 else "Fail to generate key file: %s, cert file: %s" % (kw['key_path'], kw['cert_path'])
|
||||
print(message)
|
||||
if stat != 0:
|
||||
sys.exit(1)
|
||||
return check_wrapper
|
||||
|
||||
@stat_decorator
|
||||
def create_root_cert(subj, key_path="./k.key", cert_path="./cert.crt"):
|
||||
rc = subprocess.call(["openssl", "genrsa", "-out", key_path, "4096"], stdout=FNULL, stderr=subprocess.STDOUT)
|
||||
if rc != 0:
|
||||
return rc
|
||||
return subprocess.call(["openssl", "req", "-new", "-x509", "-key", key_path,\
|
||||
"-out", cert_path, "-days", "3650", "-subj", subj], stdout=FNULL, stderr=subprocess.STDOUT)
|
||||
|
||||
@stat_decorator
|
||||
def create_cert(subj, ca_key, ca_cert, key_path="./k.key", cert_path="./cert.crt"):
|
||||
cert_dir = os.path.dirname(cert_path)
|
||||
csr_path = os.path.join(cert_dir, "tmp.csr")
|
||||
rc = subprocess.call(["openssl", "req", "-newkey", "rsa:4096", "-nodes","-sha256","-keyout", key_path,\
|
||||
"-out", csr_path, "-subj", subj], stdout=FNULL, stderr=subprocess.STDOUT)
|
||||
if rc != 0:
|
||||
return rc
|
||||
return subprocess.call(["openssl", "x509", "-req", "-days", "3650", "-in", csr_path, "-CA", \
|
||||
ca_cert, "-CAkey", ca_key, "-CAcreateserial", "-out", cert_path], stdout=FNULL, stderr=subprocess.STDOUT)
|
||||
|
||||
def openssl_installed():
|
||||
shell_stat = subprocess.check_call(["which", "openssl"], stdout=FNULL, stderr=subprocess.STDOUT)
|
||||
if shell_stat != 0:
|
||||
print("Cannot find openssl installed in this computer\nUse default SSL certificate file")
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
if customize_crt == 'on' and openssl_installed():
|
||||
shell_stat = subprocess.check_call(["which", "openssl"], stdout=FNULL, stderr=subprocess.STDOUT)
|
||||
empty_subj = "/"
|
||||
private_key_pem = os.path.join(config_dir, "core", "private_key.pem")
|
||||
root_crt = os.path.join(config_dir, "registry", "root.crt")
|
||||
create_root_cert(empty_subj, key_path=private_key_pem, cert_path=root_crt)
|
||||
mark_file(private_key_pem)
|
||||
mark_file(root_crt)
|
||||
else:
|
||||
print("Copied configuration file: %s" % core_config_dir + "private_key.pem")
|
||||
shutil.copyfile(os.path.join(templates_dir, "core", "private_key.pem"), os.path.join(core_config_dir, "private_key.pem"))
|
||||
print("Copied configuration file: %s" % registry_config_dir + "root.crt")
|
||||
shutil.copyfile(os.path.join(templates_dir, "registry", "root.crt"), os.path.join(registry_config_dir, "root.crt"))
|
||||
|
||||
if len(registry_custom_ca_bundle_path) > 0 and os.path.isfile(registry_custom_ca_bundle_path):
|
||||
shutil.copyfile(registry_custom_ca_bundle_path, os.path.join(config_dir, "custom-ca-bundle.crt"))
|
||||
print("Copied custom ca bundle: %s" % os.path.join(config_dir, "custom-ca-bundle.crt"))
|
||||
|
||||
if args.notary_mode:
|
||||
notary_config_dir = prep_conf_dir(config_dir, "notary")
|
||||
notary_temp_dir = os.path.join(templates_dir, "notary")
|
||||
print("Copying sql file for notary DB")
|
||||
# if os.path.exists(os.path.join(notary_config_dir, "postgresql-initdb.d")):
|
||||
# shutil.rmtree(os.path.join(notary_config_dir, "postgresql-initdb.d"))
|
||||
# shutil.copytree(os.path.join(notary_temp_dir, "postgresql-initdb.d"), os.path.join(notary_config_dir, "postgresql-initdb.d"))
|
||||
if customize_crt == 'on' and openssl_installed():
|
||||
try:
|
||||
temp_cert_dir = os.path.join(base_dir, "cert_tmp")
|
||||
if not os.path.exists(temp_cert_dir):
|
||||
os.makedirs(temp_cert_dir)
|
||||
ca_subj = "/C=US/ST=California/L=Palo Alto/O=GoHarbor/OU=Harbor/CN=Self-signed by GoHarbor"
|
||||
cert_subj = "/C=US/ST=California/L=Palo Alto/O=GoHarbor/OU=Harbor/CN=notarysigner"
|
||||
signer_ca_cert = os.path.join(temp_cert_dir, "notary-signer-ca.crt")
|
||||
signer_ca_key = os.path.join(temp_cert_dir, "notary-signer-ca.key")
|
||||
signer_cert_path = os.path.join(temp_cert_dir, "notary-signer.crt")
|
||||
signer_key_path = os.path.join(temp_cert_dir, "notary-signer.key")
|
||||
create_root_cert(ca_subj, key_path=signer_ca_key, cert_path=signer_ca_cert)
|
||||
create_cert(cert_subj, signer_ca_key, signer_ca_cert, key_path=signer_key_path, cert_path=signer_cert_path)
|
||||
print("Copying certs for notary signer")
|
||||
shutil.copy2(signer_cert_path, notary_config_dir)
|
||||
shutil.copy2(signer_key_path, notary_config_dir)
|
||||
shutil.copy2(signer_ca_cert, notary_config_dir)
|
||||
finally:
|
||||
srl_tmp = os.path.join(os.getcwd(), ".srl")
|
||||
if os.path.isfile(srl_tmp):
|
||||
os.remove(srl_tmp)
|
||||
if os.path.isdir(temp_cert_dir):
|
||||
shutil.rmtree(temp_cert_dir, True)
|
||||
else:
|
||||
print("Copying certs for notary signer")
|
||||
shutil.copy2(os.path.join(notary_temp_dir, "notary-signer.crt"), notary_config_dir)
|
||||
shutil.copy2(os.path.join(notary_temp_dir, "notary-signer.key"), notary_config_dir)
|
||||
shutil.copy2(os.path.join(notary_temp_dir, "notary-signer-ca.crt"), notary_config_dir)
|
||||
shutil.copy2(os.path.join(registry_config_dir, "root.crt"), notary_config_dir)
|
||||
mark_file(os.path.join(notary_config_dir, "notary-signer.crt"))
|
||||
mark_file(os.path.join(notary_config_dir, "notary-signer.key"))
|
||||
mark_file(os.path.join(notary_config_dir, "notary-signer-ca.crt"))
|
||||
mark_file(os.path.join(notary_config_dir, "root.crt"))
|
||||
print("Copying notary signer configuration file")
|
||||
render(os.path.join(notary_temp_dir, "signer-config.postgres.json"),
|
||||
os.path.join(notary_config_dir, "signer-config.postgres.json"),
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID
|
||||
)
|
||||
|
||||
render(os.path.join(notary_temp_dir, "server-config.postgres.json"),
|
||||
os.path.join(notary_config_dir, "server-config.postgres.json"),
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID,
|
||||
token_endpoint=public_url)
|
||||
print("Copying nginx configuration file for notary")
|
||||
shutil.copy2(os.path.join(templates_dir, "nginx", "notary.upstream.conf"), nginx_conf_d)
|
||||
render(os.path.join(templates_dir, "nginx", "notary.server.conf"),
|
||||
os.path.join(nginx_conf_d, "notary.server.conf"),
|
||||
ssl_cert = os.path.join("/etc/nginx/cert", os.path.basename(target_cert_path)),
|
||||
ssl_cert_key = os.path.join("/etc/nginx/cert", os.path.basename(target_cert_key_path)))
|
||||
|
||||
default_alias = get_alias(secretkey_path)
|
||||
render(os.path.join(notary_temp_dir, "signer_env"), os.path.join(notary_config_dir, "signer_env"), alias = default_alias)
|
||||
shutil.copy2(os.path.join(notary_temp_dir, "server_env"), notary_config_dir)
|
||||
|
||||
if args.clair_mode:
|
||||
clair_temp_dir = os.path.join(templates_dir, "clair")
|
||||
clair_config_dir = prep_conf_dir(config_dir, "clair")
|
||||
if os.path.exists(os.path.join(clair_config_dir, "postgresql-init.d")):
|
||||
print("Copying offline data file for clair DB")
|
||||
shutil.rmtree(os.path.join(clair_config_dir, "postgresql-init.d"))
|
||||
shutil.copytree(os.path.join(clair_temp_dir, "postgresql-init.d"), os.path.join(clair_config_dir, "postgresql-init.d"))
|
||||
postgres_env = os.path.join(clair_config_dir, "postgres_env")
|
||||
render(os.path.join(clair_temp_dir, "postgres_env"), postgres_env, password = clair_db_password)
|
||||
clair_conf = os.path.join(clair_config_dir, "config.yaml")
|
||||
render(os.path.join(clair_temp_dir, "config.yaml"),
|
||||
clair_conf,
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID,
|
||||
password = clair_db_password,
|
||||
username = clair_db_username,
|
||||
host = clair_db_host,
|
||||
port = clair_db_port,
|
||||
dbname = clair_db,
|
||||
interval = clair_updaters_interval)
|
||||
|
||||
# config http proxy for Clair
|
||||
http_proxy = rcp.get("configuration", "http_proxy").strip()
|
||||
https_proxy = rcp.get("configuration", "https_proxy").strip()
|
||||
no_proxy = rcp.get("configuration", "no_proxy").strip()
|
||||
clair_env = os.path.join(clair_config_dir, "clair_env")
|
||||
render(os.path.join(clair_temp_dir, "clair_env"), clair_env,
|
||||
http_proxy = http_proxy,
|
||||
https_proxy = https_proxy,
|
||||
no_proxy = no_proxy)
|
||||
|
||||
# config chart repository
|
||||
if args.chart_mode:
|
||||
chartm_temp_dir = os.path.join(templates_dir, "chartserver")
|
||||
chartm_config_dir = os.path.join(config_dir, "chartserver")
|
||||
chartm_env = os.path.join(config_dir, "chartserver", "env")
|
||||
|
||||
if not os.path.isdir(chartm_config_dir):
|
||||
print ("Create config folder: %s" % chartm_config_dir)
|
||||
os.makedirs(chartm_config_dir)
|
||||
|
||||
# process redis info
|
||||
cache_store = "redis"
|
||||
cache_redis_password = redis_password
|
||||
cache_redis_addr = redis_host+":"+redis_port
|
||||
cache_redis_db_index = redis_db_index_chart
|
||||
|
||||
# process storage info
|
||||
#default using local file system
|
||||
storage_driver = "local"
|
||||
# storage provider configurations
|
||||
# please be aware that, we do not check the validations of the values for the specified keys
|
||||
# convert the configs to config map
|
||||
storage_provider_configs = storage_provider_config.split(",")
|
||||
storgae_provider_confg_map = {}
|
||||
storage_provider_config_options = []
|
||||
|
||||
for k_v in storage_provider_configs:
|
||||
if len(k_v) > 0:
|
||||
kvs = k_v.split(": ") # add space suffix to avoid existing ":" in the value
|
||||
if len(kvs) == 2:
|
||||
#key must not be empty
|
||||
if kvs[0].strip() != "":
|
||||
storgae_provider_confg_map[kvs[0].strip()] = kvs[1].strip()
|
||||
|
||||
if storage_provider_name == "s3":
|
||||
# aws s3 storage
|
||||
storage_driver = "amazon"
|
||||
storage_provider_config_options.append("STORAGE_AMAZON_BUCKET=%s" % storgae_provider_confg_map.get("bucket", ""))
|
||||
storage_provider_config_options.append("STORAGE_AMAZON_PREFIX=%s" % storgae_provider_confg_map.get("rootdirectory", ""))
|
||||
storage_provider_config_options.append("STORAGE_AMAZON_REGION=%s" % storgae_provider_confg_map.get("region", ""))
|
||||
storage_provider_config_options.append("STORAGE_AMAZON_ENDPOINT=%s" % storgae_provider_confg_map.get("regionendpoint", ""))
|
||||
storage_provider_config_options.append("AWS_ACCESS_KEY_ID=%s" % storgae_provider_confg_map.get("accesskey", ""))
|
||||
storage_provider_config_options.append("AWS_SECRET_ACCESS_KEY=%s" % storgae_provider_confg_map.get("secretkey", ""))
|
||||
elif storage_provider_name == "gcs":
|
||||
# google cloud storage
|
||||
storage_driver = "google"
|
||||
storage_provider_config_options.append("STORAGE_GOOGLE_BUCKET=%s" % storgae_provider_confg_map.get("bucket", ""))
|
||||
storage_provider_config_options.append("STORAGE_GOOGLE_PREFIX=%s" % storgae_provider_confg_map.get("rootdirectory", ""))
|
||||
|
||||
keyFileOnHost = storgae_provider_confg_map.get("keyfile", "")
|
||||
if os.path.isfile(keyFileOnHost):
|
||||
shutil.copyfile(keyFileOnHost, os.path.join(chartm_config_dir, "gcs.key"))
|
||||
targetKeyFile = "/etc/chartserver/gcs.key"
|
||||
storage_provider_config_options.append("GOOGLE_APPLICATION_CREDENTIALS=%s" % targetKeyFile)
|
||||
elif storage_provider_name == "azure":
|
||||
# azure storage
|
||||
storage_driver = "microsoft"
|
||||
storage_provider_config_options.append("STORAGE_MICROSOFT_CONTAINER=%s" % storgae_provider_confg_map.get("container", ""))
|
||||
storage_provider_config_options.append("AZURE_STORAGE_ACCOUNT=%s" % storgae_provider_confg_map.get("accountname", ""))
|
||||
storage_provider_config_options.append("AZURE_STORAGE_ACCESS_KEY=%s" % storgae_provider_confg_map.get("accountkey", ""))
|
||||
storage_provider_config_options.append("STORAGE_MICROSOFT_PREFIX=/azure/harbor/charts")
|
||||
elif storage_provider_name == "swift":
|
||||
# open stack swift
|
||||
storage_driver = "openstack"
|
||||
storage_provider_config_options.append("STORAGE_OPENSTACK_CONTAINER=%s" % storgae_provider_confg_map.get("container", ""))
|
||||
storage_provider_config_options.append("STORAGE_OPENSTACK_PREFIX=%s" % storgae_provider_confg_map.get("rootdirectory", ""))
|
||||
storage_provider_config_options.append("STORAGE_OPENSTACK_REGION=%s" % storgae_provider_confg_map.get("region", ""))
|
||||
storage_provider_config_options.append("OS_AUTH_URL=%s" % storgae_provider_confg_map.get("authurl", ""))
|
||||
storage_provider_config_options.append("OS_USERNAME=%s" % storgae_provider_confg_map.get("username", ""))
|
||||
storage_provider_config_options.append("OS_PASSWORD=%s" % storgae_provider_confg_map.get("password", ""))
|
||||
storage_provider_config_options.append("OS_PROJECT_ID=%s" % storgae_provider_confg_map.get("tenantid", ""))
|
||||
storage_provider_config_options.append("OS_PROJECT_NAME=%s" % storgae_provider_confg_map.get("tenant", ""))
|
||||
storage_provider_config_options.append("OS_DOMAIN_ID=%s" % storgae_provider_confg_map.get("domainid", ""))
|
||||
storage_provider_config_options.append("OS_DOMAIN_NAME=%s" % storgae_provider_confg_map.get("domain", ""))
|
||||
elif storage_provider_name == "oss":
|
||||
# aliyun OSS
|
||||
storage_driver = "alibaba"
|
||||
storage_provider_config_options.append("STORAGE_ALIBABA_BUCKET=%s" % storgae_provider_confg_map.get("bucket", ""))
|
||||
storage_provider_config_options.append("STORAGE_ALIBABA_PREFIX=%s" % storgae_provider_confg_map.get("rootdirectory", ""))
|
||||
storage_provider_config_options.append("STORAGE_ALIBABA_ENDPOINT=%s" % storgae_provider_confg_map.get("endpoint", ""))
|
||||
storage_provider_config_options.append("ALIBABA_CLOUD_ACCESS_KEY_ID=%s" % storgae_provider_confg_map.get("accesskeyid", ""))
|
||||
storage_provider_config_options.append("ALIBABA_CLOUD_ACCESS_KEY_SECRET=%s" % storgae_provider_confg_map.get("accesskeysecret", ""))
|
||||
else:
|
||||
# use local file system
|
||||
storage_provider_config_options.append("STORAGE_LOCAL_ROOTDIR=/chart_storage")
|
||||
|
||||
# generate storage provider configuration
|
||||
all_storage_provider_configs = ('\n').join(storage_provider_config_options)
|
||||
|
||||
render(os.path.join(chartm_temp_dir, "env"),
|
||||
chartm_env,
|
||||
cache_store=cache_store,
|
||||
cache_redis_addr=cache_redis_addr,
|
||||
cache_redis_password=cache_redis_password,
|
||||
cache_redis_db_index=cache_redis_db_index,
|
||||
core_secret=core_secret,
|
||||
storage_driver=storage_driver,
|
||||
all_storage_driver_configs=all_storage_provider_configs)
|
||||
|
||||
|
||||
FNULL.close()
|
||||
print("The configuration files are ready, please use docker-compose to start the service.")
|
||||
#!/bin/bash
|
||||
|
||||
# If compling source code this dir is harbor's make dir
|
||||
# If install harbor via pacakge, this dir is harbor's root dir
|
||||
harbor_prepare_path="$( cd "$(dirname "$0")" ; pwd -P )"
|
||||
|
||||
echo host make path is set to ${harbor_prepare_path}
|
||||
data_path=$(grep '^[^#]*data_volume:' ${harbor_prepare_path}/harbor.yml | awk '{print $NF}')
|
||||
log_path=$(grep '^[^#]*location:' ${harbor_prepare_path}/harbor.yml | awk '{print $NF}')
|
||||
secretkey_path=$(grep '^[^#]*secretkey_path:' ${harbor_prepare_path}/harbor.yml | awk '{print $NF}')
|
||||
ssl_cert_path=$(grep '^[^#]*ssl_cert:' ${harbor_prepare_path}/harbor.yml | awk '{print $NF}')
|
||||
ssl_cert_key_path=$(grep '^[^#]*ssl_cert_key:' ${harbor_prepare_path}/harbor.yml | awk '{print $NF}')
|
||||
registry_custom_ca_bundle=$(grep '^[^#]*registry_custom_ca_bundle:' ${harbor_prepare_path}/harbor.yml | awk '{print $NF}')
|
||||
|
||||
# Create a input dirs
|
||||
mkdir -p ${harbor_prepare_path}/input
|
||||
input_dir=${harbor_prepare_path}/input
|
||||
mkdir -p $input_dir/nginx
|
||||
mkdir -p $input_dir/keys
|
||||
mkdir -p $input_dir/common
|
||||
|
||||
# Copy nginx config file to input dir
|
||||
cp $ssl_cert_path $input_dir/nginx/server.crt
|
||||
cp $ssl_cert_key_path $input_dir/nginx/server.key
|
||||
|
||||
# Copy secretkey to input dir
|
||||
cp -r $secretkey_path $input_dir/keys
|
||||
|
||||
# Copy ca bundle to input dir
|
||||
if [ -f $registry_custom_ca_bundle ]
|
||||
then
|
||||
cp -r $registry_custom_ca_bundle $input_dir/common/custom-ca-bundle.crt
|
||||
fi
|
||||
|
||||
# Copy harbor.yml to input dir
|
||||
cp ${harbor_prepare_path}/harbor.yml $input_dir/harbor.yml
|
||||
|
||||
# Create secret dir
|
||||
secret_dir=${data_path}/secret
|
||||
config_dir=$harbor_prepare_path/common/config
|
||||
|
||||
# Run prepare script
|
||||
docker run -it --rm -v $input_dir:/input \
|
||||
-v $harbor_prepare_path:/compose_location \
|
||||
-v $config_dir:/config \
|
||||
-v $secret_dir:/secret \
|
||||
-v $log_path:/var/log/harbor \
|
||||
goharbor/prepare:dev $@
|
||||
|
||||
# Clean up input dir
|
||||
rm -rf ${harbor_prepare_path}/input
|
@ -318,6 +318,11 @@ func TestListUsers(t *testing.T) {
|
||||
if users2[0].Username != username {
|
||||
t.Errorf("The username in result list does not match, expected: %s, actual: %s", username, users2[0].Username)
|
||||
}
|
||||
|
||||
users3, err := ListUsers(&models.UserQuery{Username: username, Pagination: &models.Pagination{Page: 2, Size: 1}})
|
||||
if len(users3) != 0 {
|
||||
t.Errorf("Expect no user in list, but the acutal length is %d, the list: %+v", len(users3), users3)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResetUserPassword(t *testing.T) {
|
||||
|
@ -106,10 +106,13 @@ func GetTotalOfUsers(query *models.UserQuery) (int64, error) {
|
||||
|
||||
// ListUsers lists all users according to different conditions.
|
||||
func ListUsers(query *models.UserQuery) ([]models.User, error) {
|
||||
qs := userQueryConditions(query)
|
||||
if query != nil && query.Pagination != nil {
|
||||
offset := (query.Pagination.Page - 1) * query.Pagination.Size
|
||||
qs = qs.Offset(offset).Limit(query.Pagination.Size)
|
||||
}
|
||||
users := []models.User{}
|
||||
_, err := userQueryConditions(query).Limit(-1).
|
||||
OrderBy("username").
|
||||
All(&users)
|
||||
_, err := qs.OrderBy("username").All(&users)
|
||||
return users, err
|
||||
}
|
||||
|
||||
|
@ -125,7 +125,7 @@ func GetUnitTestConfig() map[string]interface{} {
|
||||
common.WithNotary: "false",
|
||||
common.WithChartMuseum: "false",
|
||||
common.SelfRegistration: "true",
|
||||
common.WithClair: "false",
|
||||
common.WithClair: "true",
|
||||
common.TokenServiceURL: "http://core:8080/service/token",
|
||||
common.RegistryURL: fmt.Sprintf("http://%s:5000", ipAddress),
|
||||
}
|
||||
|
261
src/core/api/admin_job.go
Normal file
261
src/core/api/admin_job.go
Normal file
@ -0,0 +1,261 @@
|
||||
// Copyright 2018 Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"encoding/json"
|
||||
"github.com/goharbor/harbor/src/common/dao"
|
||||
common_http "github.com/goharbor/harbor/src/common/http"
|
||||
common_job "github.com/goharbor/harbor/src/common/job"
|
||||
common_models "github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
"github.com/goharbor/harbor/src/core/api/models"
|
||||
utils_core "github.com/goharbor/harbor/src/core/utils"
|
||||
)
|
||||
|
||||
// AJAPI manages the CRUD of admin job and its schedule, any API wants to handle manual and cron job like ScanAll and GC cloud reuse it.
|
||||
type AJAPI struct {
|
||||
BaseController
|
||||
}
|
||||
|
||||
// Prepare validates the URL and parms, it needs the system admin permission.
|
||||
func (aj *AJAPI) Prepare() {
|
||||
aj.BaseController.Prepare()
|
||||
}
|
||||
|
||||
// updateSchedule update a schedule of admin job.
|
||||
func (aj *AJAPI) updateSchedule(ajr models.AdminJobReq) {
|
||||
if ajr.Schedule.Type == models.ScheduleManual {
|
||||
aj.HandleInternalServerError(fmt.Sprintf("Fail to update admin job schedule as wrong schedule type: %s.", ajr.Schedule.Type))
|
||||
return
|
||||
}
|
||||
|
||||
query := &common_models.AdminJobQuery{
|
||||
Name: ajr.Name,
|
||||
Kind: common_job.JobKindPeriodic,
|
||||
}
|
||||
jobs, err := dao.GetAdminJobs(query)
|
||||
if err != nil {
|
||||
aj.HandleInternalServerError(fmt.Sprintf("%v", err))
|
||||
return
|
||||
}
|
||||
if len(jobs) != 1 {
|
||||
aj.HandleInternalServerError("Fail to update admin job schedule as we found more than one schedule in system, please ensure that only one schedule left for your job .")
|
||||
return
|
||||
}
|
||||
|
||||
// stop the scheduled job and remove it.
|
||||
if err = utils_core.GetJobServiceClient().PostAction(jobs[0].UUID, common_job.JobActionStop); err != nil {
|
||||
if e, ok := err.(*common_http.Error); !ok || e.Code != http.StatusNotFound {
|
||||
aj.HandleInternalServerError(fmt.Sprintf("%v", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err = dao.DeleteAdminJob(jobs[0].ID); err != nil {
|
||||
aj.HandleInternalServerError(fmt.Sprintf("%v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Set schedule to None means to cancel the schedule, won't add new job.
|
||||
if ajr.Schedule.Type != models.ScheduleNone {
|
||||
aj.submit(&ajr)
|
||||
}
|
||||
}
|
||||
|
||||
// get get a execution of admin job by ID
|
||||
func (aj *AJAPI) get(id int64) {
|
||||
jobs, err := dao.GetAdminJobs(&common_models.AdminJobQuery{
|
||||
ID: id,
|
||||
})
|
||||
if err != nil {
|
||||
aj.HandleInternalServerError(fmt.Sprintf("failed to get admin jobs: %v", err))
|
||||
return
|
||||
}
|
||||
if len(jobs) == 0 {
|
||||
aj.HandleNotFound("No admin job found.")
|
||||
return
|
||||
}
|
||||
|
||||
adminJobRep, err := convertToAdminJobRep(jobs[0])
|
||||
if err != nil {
|
||||
aj.HandleInternalServerError(fmt.Sprintf("failed to convert admin job response: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
aj.Data["json"] = adminJobRep
|
||||
aj.ServeJSON()
|
||||
}
|
||||
|
||||
// list list all executions of admin job by name
|
||||
func (aj *AJAPI) list(name string) {
|
||||
jobs, err := dao.GetTop10AdminJobsOfName(name)
|
||||
if err != nil {
|
||||
aj.HandleInternalServerError(fmt.Sprintf("failed to get admin jobs: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
AdminJobReps := []*models.AdminJobRep{}
|
||||
for _, job := range jobs {
|
||||
AdminJobRep, err := convertToAdminJobRep(job)
|
||||
if err != nil {
|
||||
aj.HandleInternalServerError(fmt.Sprintf("failed to convert admin job response: %v", err))
|
||||
return
|
||||
}
|
||||
AdminJobReps = append(AdminJobReps, &AdminJobRep)
|
||||
}
|
||||
|
||||
aj.Data["json"] = AdminJobReps
|
||||
aj.ServeJSON()
|
||||
}
|
||||
|
||||
// getSchedule gets admin job schedule ...
|
||||
func (aj *AJAPI) getSchedule(name string) {
|
||||
adminJobSchedule := models.AdminJobSchedule{}
|
||||
|
||||
jobs, err := dao.GetAdminJobs(&common_models.AdminJobQuery{
|
||||
Name: name,
|
||||
Kind: common_job.JobKindPeriodic,
|
||||
})
|
||||
if err != nil {
|
||||
aj.HandleInternalServerError(fmt.Sprintf("failed to get admin jobs: %v", err))
|
||||
return
|
||||
}
|
||||
if len(jobs) > 1 {
|
||||
aj.HandleInternalServerError("Get more than one scheduled admin job, make sure there has only one.")
|
||||
return
|
||||
}
|
||||
|
||||
if len(jobs) != 0 {
|
||||
adminJobRep, err := convertToAdminJobRep(jobs[0])
|
||||
if err != nil {
|
||||
aj.HandleInternalServerError(fmt.Sprintf("failed to convert admin job response: %v", err))
|
||||
return
|
||||
}
|
||||
adminJobSchedule.Schedule = adminJobRep.Schedule
|
||||
}
|
||||
|
||||
aj.Data["json"] = adminJobSchedule
|
||||
aj.ServeJSON()
|
||||
}
|
||||
|
||||
// getLog ...
|
||||
func (aj *AJAPI) getLog(id int64) {
|
||||
job, err := dao.GetAdminJob(id)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to load job data for job: %d, error: %v", id, err)
|
||||
aj.CustomAbort(http.StatusInternalServerError, "Failed to get Job data")
|
||||
}
|
||||
if job == nil {
|
||||
log.Errorf("Failed to get admin job: %d", id)
|
||||
aj.CustomAbort(http.StatusNotFound, "Failed to get Job")
|
||||
}
|
||||
|
||||
logBytes, err := utils_core.GetJobServiceClient().GetJobLog(job.UUID)
|
||||
if err != nil {
|
||||
if httpErr, ok := err.(*common_http.Error); ok {
|
||||
aj.RenderError(httpErr.Code, "")
|
||||
log.Errorf(fmt.Sprintf("failed to get log of job %d: %d %s",
|
||||
id, httpErr.Code, httpErr.Message))
|
||||
return
|
||||
}
|
||||
aj.HandleInternalServerError(fmt.Sprintf("Failed to get job logs, uuid: %s, error: %v", job.UUID, err))
|
||||
return
|
||||
}
|
||||
aj.Ctx.ResponseWriter.Header().Set(http.CanonicalHeaderKey("Content-Length"), strconv.Itoa(len(logBytes)))
|
||||
aj.Ctx.ResponseWriter.Header().Set(http.CanonicalHeaderKey("Content-Type"), "text/plain")
|
||||
_, err = aj.Ctx.ResponseWriter.Write(logBytes)
|
||||
if err != nil {
|
||||
aj.HandleInternalServerError(fmt.Sprintf("Failed to write job logs, uuid: %s, error: %v", job.UUID, err))
|
||||
}
|
||||
}
|
||||
|
||||
// submit submits a job to job service per request
|
||||
func (aj *AJAPI) submit(ajr *models.AdminJobReq) {
|
||||
// cannot post multiple schedule for admin job.
|
||||
if ajr.IsPeriodic() {
|
||||
jobs, err := dao.GetAdminJobs(&common_models.AdminJobQuery{
|
||||
Name: ajr.Name,
|
||||
Kind: common_job.JobKindPeriodic,
|
||||
})
|
||||
if err != nil {
|
||||
aj.HandleInternalServerError(fmt.Sprintf("failed to get admin jobs: %v", err))
|
||||
return
|
||||
}
|
||||
if len(jobs) != 0 {
|
||||
aj.HandleStatusPreconditionFailed("Fail to set schedule for admin job as always had one, please delete it firstly then to re-schedule.")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
id, err := dao.AddAdminJob(&common_models.AdminJob{
|
||||
Name: ajr.Name,
|
||||
Kind: ajr.JobKind(),
|
||||
Cron: ajr.CronString(),
|
||||
})
|
||||
if err != nil {
|
||||
aj.HandleInternalServerError(fmt.Sprintf("%v", err))
|
||||
return
|
||||
}
|
||||
ajr.ID = id
|
||||
job := ajr.ToJob()
|
||||
|
||||
// submit job to job service
|
||||
log.Debugf("submitting admin job to job service")
|
||||
uuid, err := utils_core.GetJobServiceClient().SubmitJob(job)
|
||||
if err != nil {
|
||||
if err := dao.DeleteAdminJob(id); err != nil {
|
||||
log.Debugf("Failed to delete admin job, err: %v", err)
|
||||
}
|
||||
if httpErr, ok := err.(*common_http.Error); ok && httpErr.Code == http.StatusConflict {
|
||||
aj.HandleConflict(fmt.Sprintf("Conflict when triggering %s, please try again later.", ajr.Name))
|
||||
return
|
||||
}
|
||||
aj.HandleInternalServerError(fmt.Sprintf("%v", err))
|
||||
return
|
||||
}
|
||||
if err := dao.SetAdminJobUUID(id, uuid); err != nil {
|
||||
aj.HandleInternalServerError(fmt.Sprintf("%v", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func convertToAdminJobRep(job *common_models.AdminJob) (models.AdminJobRep, error) {
|
||||
if job == nil {
|
||||
return models.AdminJobRep{}, nil
|
||||
}
|
||||
|
||||
AdminJobRep := models.AdminJobRep{
|
||||
ID: job.ID,
|
||||
Name: job.Name,
|
||||
Kind: job.Kind,
|
||||
Status: job.Status,
|
||||
CreationTime: job.CreationTime,
|
||||
UpdateTime: job.UpdateTime,
|
||||
}
|
||||
|
||||
if len(job.Cron) > 0 {
|
||||
schedule := &models.ScheduleParam{}
|
||||
if err := json.Unmarshal([]byte(job.Cron), &schedule); err != nil {
|
||||
return models.AdminJobRep{}, err
|
||||
}
|
||||
AdminJobRep.Schedule = schedule
|
||||
}
|
||||
return AdminJobRep, nil
|
||||
}
|
@ -29,11 +29,11 @@ import (
|
||||
|
||||
"github.com/astaxie/beego"
|
||||
"github.com/dghubble/sling"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/dao"
|
||||
"github.com/goharbor/harbor/src/common/job/test"
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
testutils "github.com/goharbor/harbor/src/common/utils/test"
|
||||
api_models "github.com/goharbor/harbor/src/core/api/models"
|
||||
apimodels "github.com/goharbor/harbor/src/core/api/models"
|
||||
_ "github.com/goharbor/harbor/src/core/auth/db"
|
||||
_ "github.com/goharbor/harbor/src/core/auth/ldap"
|
||||
@ -148,6 +148,7 @@ func init() {
|
||||
beego.Router("/api/system/gc/:id", &GCAPI{}, "get:GetGC")
|
||||
beego.Router("/api/system/gc/:id([0-9]+)/log", &GCAPI{}, "get:GetLog")
|
||||
beego.Router("/api/system/gc/schedule", &GCAPI{}, "get:Get;put:Put;post:Post")
|
||||
beego.Router("/api/system/scanAll/schedule", &ScanAllAPI{}, "get:Get;put:Put;post:Post")
|
||||
|
||||
beego.Router("/api/projects/:pid([0-9]+)/robots/", &RobotAPI{}, "post:Post;get:List")
|
||||
beego.Router("/api/projects/:pid([0-9]+)/robots/:id([0-9]+)", &RobotAPI{}, "get:Get;put:Put;delete:Delete")
|
||||
@ -1091,7 +1092,7 @@ func (a testapi) DeleteMeta(authInfor usrInfo, projectID int64, name string) (in
|
||||
return code, string(body), err
|
||||
}
|
||||
|
||||
func (a testapi) AddGC(authInfor usrInfo, adminReq apilib.GCReq) (int, error) {
|
||||
func (a testapi) AddGC(authInfor usrInfo, adminReq apilib.AdminJobReq) (int, error) {
|
||||
_sling := sling.New().Post(a.basePath)
|
||||
|
||||
path := "/api/system/gc/schedule"
|
||||
@ -1108,12 +1109,42 @@ func (a testapi) AddGC(authInfor usrInfo, adminReq apilib.GCReq) (int, error) {
|
||||
return httpStatusCode, err
|
||||
}
|
||||
|
||||
func (a testapi) GCScheduleGet(authInfo usrInfo) (int, []apilib.AdminJob, error) {
|
||||
func (a testapi) GCScheduleGet(authInfo usrInfo) (int, api_models.AdminJobSchedule, error) {
|
||||
_sling := sling.New().Get(a.basePath)
|
||||
path := "/api/system/gc/schedule"
|
||||
_sling = _sling.Path(path)
|
||||
httpStatusCode, body, err := request(_sling, jsonAcceptHeader, authInfo)
|
||||
var successPayLoad []apilib.AdminJob
|
||||
var successPayLoad api_models.AdminJobSchedule
|
||||
if 200 == httpStatusCode && nil == err {
|
||||
err = json.Unmarshal(body, &successPayLoad)
|
||||
}
|
||||
|
||||
return httpStatusCode, successPayLoad, err
|
||||
}
|
||||
|
||||
func (a testapi) AddScanAll(authInfor usrInfo, adminReq apilib.AdminJobReq) (int, error) {
|
||||
_sling := sling.New().Post(a.basePath)
|
||||
|
||||
path := "/api/system/scanAll/schedule"
|
||||
|
||||
_sling = _sling.Path(path)
|
||||
|
||||
// body params
|
||||
_sling = _sling.BodyJSON(adminReq)
|
||||
var httpStatusCode int
|
||||
var err error
|
||||
|
||||
httpStatusCode, _, err = request(_sling, jsonAcceptHeader, authInfor)
|
||||
|
||||
return httpStatusCode, err
|
||||
}
|
||||
|
||||
func (a testapi) ScanAllScheduleGet(authInfo usrInfo) (int, api_models.AdminJobSchedule, error) {
|
||||
_sling := sling.New().Get(a.basePath)
|
||||
path := "/api/system/scanAll/schedule"
|
||||
_sling = _sling.Path(path)
|
||||
httpStatusCode, body, err := request(_sling, jsonAcceptHeader, authInfo)
|
||||
var successPayLoad api_models.AdminJobSchedule
|
||||
if 200 == httpStatusCode && nil == err {
|
||||
err = json.Unmarshal(body, &successPayLoad)
|
||||
}
|
||||
|
@ -42,14 +42,20 @@ const (
|
||||
ScheduleNone = "None"
|
||||
)
|
||||
|
||||
// GCReq holds request information for admin job
|
||||
type GCReq struct {
|
||||
Schedule *ScheduleParam `json:"schedule"`
|
||||
// AdminJobReq holds request information for admin job
|
||||
type AdminJobReq struct {
|
||||
AdminJobSchedule
|
||||
Name string `json:"name"`
|
||||
Status string `json:"status"`
|
||||
ID int64 `json:"id"`
|
||||
Parameters map[string]interface{} `json:"parameters"`
|
||||
}
|
||||
|
||||
// AdminJobSchedule ...
|
||||
type AdminJobSchedule struct {
|
||||
Schedule *ScheduleParam `json:"schedule"`
|
||||
}
|
||||
|
||||
// ScheduleParam defines the parameter of schedule trigger
|
||||
type ScheduleParam struct {
|
||||
// Daily, Weekly, Custom, Manual, None
|
||||
@ -58,62 +64,63 @@ type ScheduleParam struct {
|
||||
Cron string `json:"cron"`
|
||||
}
|
||||
|
||||
// GCRep holds the response of query gc
|
||||
type GCRep struct {
|
||||
ID int64 `json:"id"`
|
||||
Name string `json:"job_name"`
|
||||
Kind string `json:"job_kind"`
|
||||
Schedule *ScheduleParam `json:"schedule"`
|
||||
Status string `json:"job_status"`
|
||||
UUID string `json:"-"`
|
||||
Deleted bool `json:"deleted"`
|
||||
CreationTime time.Time `json:"creation_time"`
|
||||
UpdateTime time.Time `json:"update_time"`
|
||||
// AdminJobRep holds the response of query admin job
|
||||
type AdminJobRep struct {
|
||||
AdminJobSchedule
|
||||
ID int64 `json:"id"`
|
||||
Name string `json:"job_name"`
|
||||
Kind string `json:"job_kind"`
|
||||
Status string `json:"job_status"`
|
||||
UUID string `json:"-"`
|
||||
Deleted bool `json:"deleted"`
|
||||
CreationTime time.Time `json:"creation_time"`
|
||||
UpdateTime time.Time `json:"update_time"`
|
||||
}
|
||||
|
||||
// Valid validates the gc request
|
||||
func (gr *GCReq) Valid(v *validation.Validation) {
|
||||
if gr.Schedule == nil {
|
||||
// Valid validates the schedule type of a admin job request.
|
||||
// Only scheduleHourly, ScheduleDaily, ScheduleWeekly, ScheduleCustom, ScheduleManual, ScheduleNone are accepted.
|
||||
func (ar *AdminJobReq) Valid(v *validation.Validation) {
|
||||
if ar.Schedule == nil {
|
||||
return
|
||||
}
|
||||
switch gr.Schedule.Type {
|
||||
switch ar.Schedule.Type {
|
||||
case ScheduleHourly, ScheduleDaily, ScheduleWeekly, ScheduleCustom:
|
||||
if _, err := cron.Parse(gr.Schedule.Cron); err != nil {
|
||||
v.SetError("cron", fmt.Sprintf("Invalid schedule trigger parameter cron: %s", gr.Schedule.Cron))
|
||||
if _, err := cron.Parse(ar.Schedule.Cron); err != nil {
|
||||
v.SetError("cron", fmt.Sprintf("Invalid schedule trigger parameter cron: %s", ar.Schedule.Cron))
|
||||
}
|
||||
case ScheduleManual, ScheduleNone:
|
||||
default:
|
||||
v.SetError("kind", fmt.Sprintf("Invalid schedule kind: %s", gr.Schedule.Type))
|
||||
v.SetError("kind", fmt.Sprintf("Invalid schedule kind: %s", ar.Schedule.Type))
|
||||
}
|
||||
}
|
||||
|
||||
// ToJob converts request to a job recognized by job service.
|
||||
func (gr *GCReq) ToJob() *models.JobData {
|
||||
func (ar *AdminJobReq) ToJob() *models.JobData {
|
||||
metadata := &models.JobMetadata{
|
||||
JobKind: gr.JobKind(),
|
||||
Cron: gr.Schedule.Cron,
|
||||
JobKind: ar.JobKind(),
|
||||
Cron: ar.Schedule.Cron,
|
||||
// GC job must be unique ...
|
||||
IsUnique: true,
|
||||
}
|
||||
|
||||
jobData := &models.JobData{
|
||||
Name: job.ImageGC,
|
||||
Parameters: gr.Parameters,
|
||||
Name: ar.Name,
|
||||
Parameters: ar.Parameters,
|
||||
Metadata: metadata,
|
||||
StatusHook: fmt.Sprintf("%s/service/notifications/jobs/adminjob/%d",
|
||||
config.InternalCoreURL(), gr.ID),
|
||||
config.InternalCoreURL(), ar.ID),
|
||||
}
|
||||
return jobData
|
||||
}
|
||||
|
||||
// IsPeriodic ...
|
||||
func (gr *GCReq) IsPeriodic() bool {
|
||||
return gr.JobKind() == job.JobKindPeriodic
|
||||
func (ar *AdminJobReq) IsPeriodic() bool {
|
||||
return ar.JobKind() == job.JobKindPeriodic
|
||||
}
|
||||
|
||||
// JobKind ...
|
||||
func (gr *GCReq) JobKind() string {
|
||||
switch gr.Schedule.Type {
|
||||
func (ar *AdminJobReq) JobKind() string {
|
||||
switch ar.Schedule.Type {
|
||||
case ScheduleHourly, ScheduleDaily, ScheduleWeekly, ScheduleCustom:
|
||||
return job.JobKindPeriodic
|
||||
case ScheduleManual:
|
||||
@ -124,8 +131,8 @@ func (gr *GCReq) JobKind() string {
|
||||
}
|
||||
|
||||
// CronString ...
|
||||
func (gr *GCReq) CronString() string {
|
||||
str, err := json.Marshal(gr.Schedule)
|
||||
func (ar *AdminJobReq) CronString() string {
|
||||
str, err := json.Marshal(ar.Schedule)
|
||||
if err != nil {
|
||||
log.Debugf("failed to marshal json error, %v", err)
|
||||
return ""
|
@ -40,13 +40,17 @@ func TestMain(m *testing.M) {
|
||||
}
|
||||
|
||||
func TestToJob(t *testing.T) {
|
||||
schedule := &ScheduleParam{
|
||||
Type: "Daily",
|
||||
Cron: "20 3 0 * * *",
|
||||
|
||||
adminJobSchedule := AdminJobSchedule{
|
||||
Schedule: &ScheduleParam{
|
||||
Type: "Daily",
|
||||
Cron: "20 3 0 * * *",
|
||||
},
|
||||
}
|
||||
|
||||
adminjob := &GCReq{
|
||||
Schedule: schedule,
|
||||
adminjob := &AdminJobReq{
|
||||
Name: common_job.ImageGC,
|
||||
AdminJobSchedule: adminJobSchedule,
|
||||
}
|
||||
|
||||
job := adminjob.ToJob()
|
||||
@ -56,12 +60,16 @@ func TestToJob(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestToJobManual(t *testing.T) {
|
||||
schedule := &ScheduleParam{
|
||||
Type: "Manual",
|
||||
|
||||
adminJobSchedule := AdminJobSchedule{
|
||||
Schedule: &ScheduleParam{
|
||||
Type: "Manual",
|
||||
},
|
||||
}
|
||||
|
||||
adminjob := &GCReq{
|
||||
Schedule: schedule,
|
||||
adminjob := &AdminJobReq{
|
||||
AdminJobSchedule: adminJobSchedule,
|
||||
Name: common_job.ImageGC,
|
||||
}
|
||||
|
||||
job := adminjob.ToJob()
|
||||
@ -70,13 +78,16 @@ func TestToJobManual(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsPeriodic(t *testing.T) {
|
||||
schedule := &ScheduleParam{
|
||||
Type: "Daily",
|
||||
Cron: "20 3 0 * * *",
|
||||
|
||||
adminJobSchedule := AdminJobSchedule{
|
||||
Schedule: &ScheduleParam{
|
||||
Type: "Daily",
|
||||
Cron: "20 3 0 * * *",
|
||||
},
|
||||
}
|
||||
|
||||
adminjob := &GCReq{
|
||||
Schedule: schedule,
|
||||
adminjob := &AdminJobReq{
|
||||
AdminJobSchedule: adminJobSchedule,
|
||||
}
|
||||
|
||||
isPeriodic := adminjob.IsPeriodic()
|
||||
@ -84,33 +95,44 @@ func TestIsPeriodic(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestJobKind(t *testing.T) {
|
||||
schedule := &ScheduleParam{
|
||||
Type: "Daily",
|
||||
Cron: "20 3 0 * * *",
|
||||
|
||||
adminJobSchedule := AdminJobSchedule{
|
||||
Schedule: &ScheduleParam{
|
||||
Type: "Daily",
|
||||
Cron: "20 3 0 * * *",
|
||||
},
|
||||
}
|
||||
adminjob := &GCReq{
|
||||
Schedule: schedule,
|
||||
|
||||
adminjob := &AdminJobReq{
|
||||
AdminJobSchedule: adminJobSchedule,
|
||||
}
|
||||
|
||||
kind := adminjob.JobKind()
|
||||
assert.Equal(t, kind, "Periodic")
|
||||
|
||||
schedule1 := &ScheduleParam{
|
||||
Type: "Manual",
|
||||
adminJobSchedule1 := AdminJobSchedule{
|
||||
Schedule: &ScheduleParam{
|
||||
Type: "Manual",
|
||||
},
|
||||
}
|
||||
adminjob1 := &GCReq{
|
||||
Schedule: schedule1,
|
||||
adminjob1 := &AdminJobReq{
|
||||
AdminJobSchedule: adminJobSchedule1,
|
||||
}
|
||||
kind1 := adminjob1.JobKind()
|
||||
assert.Equal(t, kind1, "Generic")
|
||||
}
|
||||
|
||||
func TestCronString(t *testing.T) {
|
||||
schedule := &ScheduleParam{
|
||||
Type: "Daily",
|
||||
Cron: "20 3 0 * * *",
|
||||
|
||||
adminJobSchedule := AdminJobSchedule{
|
||||
Schedule: &ScheduleParam{
|
||||
Type: "Daily",
|
||||
Cron: "20 3 0 * * *",
|
||||
},
|
||||
}
|
||||
adminjob := &GCReq{
|
||||
Schedule: schedule,
|
||||
|
||||
adminjob := &AdminJobReq{
|
||||
AdminJobSchedule: adminJobSchedule,
|
||||
}
|
||||
cronStr := adminjob.CronString()
|
||||
assert.True(t, strings.EqualFold(cronStr, "{\"type\":\"Daily\",\"Cron\":\"20 3 0 * * *\"}"))
|
@ -20,19 +20,13 @@ import (
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"encoding/json"
|
||||
"github.com/goharbor/harbor/src/common/dao"
|
||||
common_http "github.com/goharbor/harbor/src/common/http"
|
||||
common_job "github.com/goharbor/harbor/src/common/job"
|
||||
common_models "github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
"github.com/goharbor/harbor/src/core/api/models"
|
||||
utils_core "github.com/goharbor/harbor/src/core/utils"
|
||||
)
|
||||
|
||||
// GCAPI handles request of harbor admin...
|
||||
// GCAPI handles request of harbor GC...
|
||||
type GCAPI struct {
|
||||
BaseController
|
||||
AJAPI
|
||||
}
|
||||
|
||||
// Prepare validates the URL and parms, it needs the system admin permission.
|
||||
@ -48,55 +42,44 @@ func (gc *GCAPI) Prepare() {
|
||||
}
|
||||
}
|
||||
|
||||
// Post ...
|
||||
// Post according to the request, it creates a cron schedule or a manual trigger for GC.
|
||||
// create a daily schedule for GC
|
||||
// {
|
||||
// "schedule": {
|
||||
// "type": "Daily",
|
||||
// "cron": "0 0 0 * * *"
|
||||
// }
|
||||
// }
|
||||
// create a manual trigger for GC
|
||||
// {
|
||||
// "schedule": {
|
||||
// "type": "Manual"
|
||||
// }
|
||||
// }
|
||||
func (gc *GCAPI) Post() {
|
||||
gr := models.GCReq{}
|
||||
gc.DecodeJSONReqAndValidate(&gr)
|
||||
gc.submitJob(&gr)
|
||||
gc.Redirect(http.StatusCreated, strconv.FormatInt(gr.ID, 10))
|
||||
ajr := models.AdminJobReq{}
|
||||
gc.DecodeJSONReqAndValidate(&ajr)
|
||||
ajr.Name = common_job.ImageGC
|
||||
ajr.Parameters = map[string]interface{}{
|
||||
"redis_url_reg": os.Getenv("_REDIS_URL_REG"),
|
||||
}
|
||||
gc.submit(&ajr)
|
||||
gc.Redirect(http.StatusCreated, strconv.FormatInt(ajr.ID, 10))
|
||||
}
|
||||
|
||||
// Put ...
|
||||
// Put handles GC cron schedule update/delete.
|
||||
// Request: delete the schedule of GC
|
||||
// {
|
||||
// "schedule": {
|
||||
// "type": "None",
|
||||
// "cron": ""
|
||||
// }
|
||||
// }
|
||||
func (gc *GCAPI) Put() {
|
||||
gr := models.GCReq{}
|
||||
gc.DecodeJSONReqAndValidate(&gr)
|
||||
|
||||
if gr.Schedule.Type == models.ScheduleManual {
|
||||
gc.HandleInternalServerError(fmt.Sprintf("Fail to update GC schedule as wrong schedule type: %s.", gr.Schedule.Type))
|
||||
return
|
||||
}
|
||||
|
||||
query := &common_models.AdminJobQuery{
|
||||
Name: common_job.ImageGC,
|
||||
Kind: common_job.JobKindPeriodic,
|
||||
}
|
||||
jobs, err := dao.GetAdminJobs(query)
|
||||
if err != nil {
|
||||
gc.HandleInternalServerError(fmt.Sprintf("%v", err))
|
||||
return
|
||||
}
|
||||
if len(jobs) != 1 {
|
||||
gc.HandleInternalServerError("Fail to update GC schedule, only one schedule is accepted.")
|
||||
return
|
||||
}
|
||||
|
||||
// stop the scheduled job and remove it.
|
||||
if err = utils_core.GetJobServiceClient().PostAction(jobs[0].UUID, common_job.JobActionStop); err != nil {
|
||||
if e, ok := err.(*common_http.Error); !ok || e.Code != http.StatusNotFound {
|
||||
gc.HandleInternalServerError(fmt.Sprintf("%v", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err = dao.DeleteAdminJob(jobs[0].ID); err != nil {
|
||||
gc.HandleInternalServerError(fmt.Sprintf("%v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Set schedule to None means to cancel the schedule, won't add new job.
|
||||
if gr.Schedule.Type != models.ScheduleNone {
|
||||
gc.submitJob(&gr)
|
||||
}
|
||||
ajr := models.AdminJobReq{}
|
||||
gc.DecodeJSONReqAndValidate(&ajr)
|
||||
ajr.Name = common_job.ImageGC
|
||||
gc.updateSchedule(ajr)
|
||||
}
|
||||
|
||||
// GetGC ...
|
||||
@ -106,74 +89,17 @@ func (gc *GCAPI) GetGC() {
|
||||
gc.HandleInternalServerError(fmt.Sprintf("need to specify gc id"))
|
||||
return
|
||||
}
|
||||
|
||||
jobs, err := dao.GetAdminJobs(&common_models.AdminJobQuery{
|
||||
ID: id,
|
||||
})
|
||||
|
||||
gcreps := []*models.GCRep{}
|
||||
for _, job := range jobs {
|
||||
gcrep, err := convertToGCRep(job)
|
||||
if err != nil {
|
||||
gc.HandleInternalServerError(fmt.Sprintf("failed to convert gc response: %v", err))
|
||||
return
|
||||
}
|
||||
gcreps = append(gcreps, &gcrep)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
gc.HandleInternalServerError(fmt.Sprintf("failed to get admin jobs: %v", err))
|
||||
return
|
||||
}
|
||||
gc.Data["json"] = gcreps
|
||||
gc.ServeJSON()
|
||||
gc.get(id)
|
||||
}
|
||||
|
||||
// List ...
|
||||
// List returns the top 10 executions of GC which includes manual and cron.
|
||||
func (gc *GCAPI) List() {
|
||||
jobs, err := dao.GetTop10AdminJobsOfName(common_job.ImageGC)
|
||||
if err != nil {
|
||||
gc.HandleInternalServerError(fmt.Sprintf("failed to get admin jobs: %v", err))
|
||||
return
|
||||
}
|
||||
gcreps := []*models.GCRep{}
|
||||
for _, job := range jobs {
|
||||
gcrep, err := convertToGCRep(job)
|
||||
if err != nil {
|
||||
gc.HandleInternalServerError(fmt.Sprintf("failed to convert gc response: %v", err))
|
||||
return
|
||||
}
|
||||
gcreps = append(gcreps, &gcrep)
|
||||
}
|
||||
gc.Data["json"] = gcreps
|
||||
gc.ServeJSON()
|
||||
gc.list(common_job.ImageGC)
|
||||
}
|
||||
|
||||
// Get gets GC schedule ...
|
||||
func (gc *GCAPI) Get() {
|
||||
jobs, err := dao.GetAdminJobs(&common_models.AdminJobQuery{
|
||||
Name: common_job.ImageGC,
|
||||
Kind: common_job.JobKindPeriodic,
|
||||
})
|
||||
if err != nil {
|
||||
gc.HandleNotFound(fmt.Sprintf("failed to get admin jobs: %v", err))
|
||||
return
|
||||
}
|
||||
if len(jobs) > 1 {
|
||||
gc.HandleInternalServerError("Get more than one GC scheduled job, make sure there has only one.")
|
||||
return
|
||||
}
|
||||
gcreps := []*models.GCRep{}
|
||||
for _, job := range jobs {
|
||||
gcrep, err := convertToGCRep(job)
|
||||
if err != nil {
|
||||
gc.HandleInternalServerError(fmt.Sprintf("failed to convert gc response: %v", err))
|
||||
return
|
||||
}
|
||||
gcreps = append(gcreps, &gcrep)
|
||||
}
|
||||
gc.Data["json"] = gcreps
|
||||
gc.ServeJSON()
|
||||
gc.getSchedule(common_job.ImageGC)
|
||||
}
|
||||
|
||||
// GetLog ...
|
||||
@ -183,108 +109,5 @@ func (gc *GCAPI) GetLog() {
|
||||
gc.HandleBadRequest("invalid ID")
|
||||
return
|
||||
}
|
||||
job, err := dao.GetAdminJob(id)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to load job data for job: %d, error: %v", id, err)
|
||||
gc.CustomAbort(http.StatusInternalServerError, "Failed to get Job data")
|
||||
}
|
||||
if job == nil {
|
||||
log.Errorf("Failed to get admin job: %d", id)
|
||||
gc.CustomAbort(http.StatusNotFound, "Failed to get Job")
|
||||
}
|
||||
|
||||
logBytes, err := utils_core.GetJobServiceClient().GetJobLog(job.UUID)
|
||||
if err != nil {
|
||||
if httpErr, ok := err.(*common_http.Error); ok {
|
||||
gc.RenderError(httpErr.Code, "")
|
||||
log.Errorf(fmt.Sprintf("failed to get log of job %d: %d %s",
|
||||
id, httpErr.Code, httpErr.Message))
|
||||
return
|
||||
}
|
||||
gc.HandleInternalServerError(fmt.Sprintf("Failed to get job logs, uuid: %s, error: %v", job.UUID, err))
|
||||
return
|
||||
}
|
||||
gc.Ctx.ResponseWriter.Header().Set(http.CanonicalHeaderKey("Content-Length"), strconv.Itoa(len(logBytes)))
|
||||
gc.Ctx.ResponseWriter.Header().Set(http.CanonicalHeaderKey("Content-Type"), "text/plain")
|
||||
_, err = gc.Ctx.ResponseWriter.Write(logBytes)
|
||||
if err != nil {
|
||||
gc.HandleInternalServerError(fmt.Sprintf("Failed to write job logs, uuid: %s, error: %v", job.UUID, err))
|
||||
}
|
||||
}
|
||||
|
||||
// submitJob submits a job to job service per request
|
||||
func (gc *GCAPI) submitJob(gr *models.GCReq) {
|
||||
// cannot post multiple schedule for GC job.
|
||||
if gr.IsPeriodic() {
|
||||
jobs, err := dao.GetAdminJobs(&common_models.AdminJobQuery{
|
||||
Name: common_job.ImageGC,
|
||||
Kind: common_job.JobKindPeriodic,
|
||||
})
|
||||
if err != nil {
|
||||
gc.HandleInternalServerError(fmt.Sprintf("failed to get admin jobs: %v", err))
|
||||
return
|
||||
}
|
||||
if len(jobs) != 0 {
|
||||
gc.HandleStatusPreconditionFailed("Fail to set schedule for GC as always had one, please delete it firstly then to re-schedule.")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
id, err := dao.AddAdminJob(&common_models.AdminJob{
|
||||
Name: common_job.ImageGC,
|
||||
Kind: gr.JobKind(),
|
||||
Cron: gr.CronString(),
|
||||
})
|
||||
if err != nil {
|
||||
gc.HandleInternalServerError(fmt.Sprintf("%v", err))
|
||||
return
|
||||
}
|
||||
gr.ID = id
|
||||
gr.Parameters = map[string]interface{}{
|
||||
"redis_url_reg": os.Getenv("_REDIS_URL_REG"),
|
||||
}
|
||||
job := gr.ToJob()
|
||||
if err != nil {
|
||||
gc.HandleInternalServerError(fmt.Sprintf("%v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// submit job to jobservice
|
||||
log.Debugf("submiting GC admin job to jobservice")
|
||||
uuid, err := utils_core.GetJobServiceClient().SubmitJob(job)
|
||||
if err != nil {
|
||||
if err := dao.DeleteAdminJob(id); err != nil {
|
||||
log.Debugf("Failed to delete admin job, err: %v", err)
|
||||
}
|
||||
gc.HandleInternalServerError(fmt.Sprintf("%v", err))
|
||||
return
|
||||
}
|
||||
if err := dao.SetAdminJobUUID(id, uuid); err != nil {
|
||||
gc.HandleInternalServerError(fmt.Sprintf("%v", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func convertToGCRep(job *common_models.AdminJob) (models.GCRep, error) {
|
||||
if job == nil {
|
||||
return models.GCRep{}, nil
|
||||
}
|
||||
|
||||
gcrep := models.GCRep{
|
||||
ID: job.ID,
|
||||
Name: job.Name,
|
||||
Kind: job.Kind,
|
||||
Status: job.Status,
|
||||
Deleted: job.Deleted,
|
||||
CreationTime: job.CreationTime,
|
||||
UpdateTime: job.UpdateTime,
|
||||
}
|
||||
if len(job.Cron) > 0 {
|
||||
schedule := &models.ScheduleParam{}
|
||||
if err := json.Unmarshal([]byte(job.Cron), &schedule); err != nil {
|
||||
return models.GCRep{}, err
|
||||
}
|
||||
gcrep.Schedule = schedule
|
||||
}
|
||||
return gcrep, nil
|
||||
gc.getLog(id)
|
||||
}
|
||||
|
@ -3,16 +3,13 @@ package api
|
||||
import (
|
||||
"testing"
|
||||
|
||||
common_models "github.com/goharbor/harbor/src/common/models"
|
||||
api_modes "github.com/goharbor/harbor/src/core/api/models"
|
||||
"github.com/goharbor/harbor/tests/apitests/apilib"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var adminJob001 apilib.GCReq
|
||||
var adminJob001schdeule apilib.ScheduleParam
|
||||
var adminJob001 apilib.AdminJobReq
|
||||
|
||||
func TestAdminJobPost(t *testing.T) {
|
||||
func TestGCPost(t *testing.T) {
|
||||
|
||||
assert := assert.New(t)
|
||||
apiTest := newHarborAPI()
|
||||
@ -27,7 +24,7 @@ func TestAdminJobPost(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdminJobGet(t *testing.T) {
|
||||
func TestGCGet(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
apiTest := newHarborAPI()
|
||||
|
||||
@ -39,41 +36,3 @@ func TestAdminJobGet(t *testing.T) {
|
||||
assert.Equal(200, code, "Get adminjob status should be 200")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertToGCRep(t *testing.T) {
|
||||
cases := []struct {
|
||||
input *common_models.AdminJob
|
||||
expected api_modes.GCRep
|
||||
}{
|
||||
{
|
||||
input: nil,
|
||||
expected: api_modes.GCRep{},
|
||||
},
|
||||
{
|
||||
input: &common_models.AdminJob{
|
||||
ID: 1,
|
||||
Name: "IMAGE_GC",
|
||||
Kind: "Generic",
|
||||
Cron: "{\"Type\":\"Daily\",\"Cron\":\"20 3 0 * * *\"}",
|
||||
Status: "pending",
|
||||
Deleted: false,
|
||||
},
|
||||
expected: api_modes.GCRep{
|
||||
ID: 1,
|
||||
Name: "IMAGE_GC",
|
||||
Kind: "Generic",
|
||||
Schedule: &api_modes.ScheduleParam{
|
||||
Type: "Daily",
|
||||
Cron: "20 3 0 * * *",
|
||||
},
|
||||
Status: "pending",
|
||||
Deleted: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
actual, _ := convertToGCRep(c.input)
|
||||
assert.EqualValues(t, c.expected, actual)
|
||||
}
|
||||
}
|
||||
|
@ -1022,33 +1022,6 @@ func (ra *RepositoryAPI) VulnerabilityDetails() {
|
||||
ra.ServeJSON()
|
||||
}
|
||||
|
||||
// ScanAll handles the api to scan all images on Harbor.
|
||||
func (ra *RepositoryAPI) ScanAll() {
|
||||
if !config.WithClair() {
|
||||
log.Warningf("Harbor is not deployed with Clair, it's not possible to scan images.")
|
||||
ra.RenderError(http.StatusServiceUnavailable, "")
|
||||
return
|
||||
}
|
||||
if !ra.SecurityCtx.IsAuthenticated() {
|
||||
ra.HandleUnauthorized()
|
||||
return
|
||||
}
|
||||
if !ra.SecurityCtx.IsSysAdmin() {
|
||||
ra.HandleForbidden(ra.SecurityCtx.GetUsername())
|
||||
return
|
||||
}
|
||||
if err := coreutils.ScanAllImages(); err != nil {
|
||||
log.Errorf("Failed triggering scan all images, error: %v", err)
|
||||
if httpErr, ok := err.(*commonhttp.Error); ok && httpErr.Code == http.StatusConflict {
|
||||
ra.HandleConflict("Conflict when triggering scan all images, please try again later.")
|
||||
return
|
||||
}
|
||||
ra.HandleInternalServerError(fmt.Sprintf("Error: %v", err))
|
||||
return
|
||||
}
|
||||
ra.Ctx.ResponseWriter.WriteHeader(http.StatusAccepted)
|
||||
}
|
||||
|
||||
func getSignatures(username, repository string) (map[string][]notary.Target, error) {
|
||||
targets, err := notary.GetInternalTargets(config.InternalNotaryEndpoint(),
|
||||
username, repository)
|
||||
|
81
src/core/api/scan_all.go
Normal file
81
src/core/api/scan_all.go
Normal file
@ -0,0 +1,81 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
common_job "github.com/goharbor/harbor/src/common/job"
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
"github.com/goharbor/harbor/src/core/api/models"
|
||||
"github.com/goharbor/harbor/src/core/config"
|
||||
)
|
||||
|
||||
// ScanAllAPI handles request of scan all images...
|
||||
type ScanAllAPI struct {
|
||||
AJAPI
|
||||
}
|
||||
|
||||
// Prepare validates the URL and parms, it needs the system admin permission.
|
||||
func (sc *ScanAllAPI) Prepare() {
|
||||
sc.BaseController.Prepare()
|
||||
if !config.WithClair() {
|
||||
log.Warningf("Harbor is not deployed with Clair, it's not possible to scan images.")
|
||||
sc.RenderError(http.StatusServiceUnavailable, "")
|
||||
return
|
||||
}
|
||||
if !sc.SecurityCtx.IsAuthenticated() {
|
||||
sc.HandleUnauthorized()
|
||||
return
|
||||
}
|
||||
if !sc.SecurityCtx.IsSysAdmin() {
|
||||
sc.HandleForbidden(sc.SecurityCtx.GetUsername())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Post according to the request, it creates a cron schedule or a manual trigger for scan all.
|
||||
// create a daily schedule for scan all
|
||||
// {
|
||||
// "schedule": {
|
||||
// "type": "Daily",
|
||||
// "cron": "0 0 0 * * *"
|
||||
// }
|
||||
// }
|
||||
// create a manual trigger for scan all
|
||||
// {
|
||||
// "schedule": {
|
||||
// "type": "Manual"
|
||||
// }
|
||||
// }
|
||||
func (sc *ScanAllAPI) Post() {
|
||||
ajr := models.AdminJobReq{}
|
||||
sc.DecodeJSONReqAndValidate(&ajr)
|
||||
ajr.Name = common_job.ImageScanAllJob
|
||||
sc.submit(&ajr)
|
||||
sc.Redirect(http.StatusCreated, strconv.FormatInt(ajr.ID, 10))
|
||||
}
|
||||
|
||||
// Put handles scan all cron schedule update/delete.
|
||||
// Request: delete the schedule of scan all
|
||||
// {
|
||||
// "schedule": {
|
||||
// "type": "None",
|
||||
// "cron": ""
|
||||
// }
|
||||
// }
|
||||
func (sc *ScanAllAPI) Put() {
|
||||
ajr := models.AdminJobReq{}
|
||||
sc.DecodeJSONReqAndValidate(&ajr)
|
||||
ajr.Name = common_job.ImageScanAllJob
|
||||
sc.updateSchedule(ajr)
|
||||
}
|
||||
|
||||
// Get gets scan all schedule ...
|
||||
func (sc *ScanAllAPI) Get() {
|
||||
sc.getSchedule(common_job.ImageScanAllJob)
|
||||
}
|
||||
|
||||
// List returns the top 10 executions of scan all which includes manual and cron.
|
||||
func (sc *ScanAllAPI) List() {
|
||||
sc.list(common_job.ImageScanAllJob)
|
||||
}
|
38
src/core/api/scan_all_test.go
Normal file
38
src/core/api/scan_all_test.go
Normal file
@ -0,0 +1,38 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/goharbor/harbor/tests/apitests/apilib"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var adminJob002 apilib.AdminJobReq
|
||||
|
||||
func TestScanAllPost(t *testing.T) {
|
||||
|
||||
assert := assert.New(t)
|
||||
apiTest := newHarborAPI()
|
||||
|
||||
// case 1: add a new scan all job
|
||||
code, err := apiTest.AddScanAll(*admin, adminJob002)
|
||||
if err != nil {
|
||||
t.Error("Error occurred while add a scan all job", err.Error())
|
||||
t.Log(err)
|
||||
} else {
|
||||
assert.Equal(200, code, "Add scan all status should be 200")
|
||||
}
|
||||
}
|
||||
|
||||
func TestScanAllGet(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
apiTest := newHarborAPI()
|
||||
|
||||
code, _, err := apiTest.ScanAllScheduleGet(*admin)
|
||||
if err != nil {
|
||||
t.Error("Error occurred while get a scan all job", err.Error())
|
||||
t.Log(err)
|
||||
} else {
|
||||
assert.Equal(200, code, "Get scan all status should be 200")
|
||||
}
|
||||
}
|
@ -29,7 +29,6 @@ import (
|
||||
"github.com/goharbor/harbor/src/common/utils/registry"
|
||||
"github.com/goharbor/harbor/src/common/utils/registry/auth"
|
||||
"github.com/goharbor/harbor/src/core/config"
|
||||
"github.com/goharbor/harbor/src/core/notifier"
|
||||
"github.com/goharbor/harbor/src/core/promgr"
|
||||
"github.com/goharbor/harbor/src/core/service/token"
|
||||
coreutils "github.com/goharbor/harbor/src/core/utils"
|
||||
@ -312,9 +311,3 @@ func transformVulnerabilities(layerWithVuln *models.ClairLayerEnvelope) []*model
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Watch the configuration changes.
|
||||
// Wrap the same method in common utils.
|
||||
func watchConfigChanges(cfg map[string]interface{}) error {
|
||||
return notifier.WatchConfigChanges(cfg)
|
||||
}
|
||||
|
@ -36,7 +36,6 @@ import (
|
||||
_ "github.com/goharbor/harbor/src/core/auth/uaa"
|
||||
"github.com/goharbor/harbor/src/core/config"
|
||||
"github.com/goharbor/harbor/src/core/filter"
|
||||
"github.com/goharbor/harbor/src/core/notifier"
|
||||
"github.com/goharbor/harbor/src/core/proxy"
|
||||
"github.com/goharbor/harbor/src/core/service/token"
|
||||
"github.com/goharbor/harbor/src/replication/core"
|
||||
@ -122,11 +121,6 @@ func main() {
|
||||
log.Fatalf("Failed to initialize API handlers with error: %s", err.Error())
|
||||
}
|
||||
|
||||
// Subscribe the policy change topic.
|
||||
if err = notifier.Subscribe(notifier.ScanAllPolicyTopic, ¬ifier.ScanPolicyNotificationHandler{}); err != nil {
|
||||
log.Errorf("failed to subscribe scan all policy change topic: %v", err)
|
||||
}
|
||||
|
||||
if config.WithClair() {
|
||||
clairDB, err := config.ClairDB()
|
||||
if err != nil {
|
||||
|
@ -1,40 +0,0 @@
|
||||
package notifier
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/goharbor/harbor/src/common/utils"
|
||||
)
|
||||
|
||||
// WatchConfigChanges is used to watch the configuration changes.
|
||||
func WatchConfigChanges(cfg map[string]interface{}) error {
|
||||
if cfg == nil {
|
||||
return errors.New("Empty configurations")
|
||||
}
|
||||
|
||||
// Currently only watch the scan all policy change.
|
||||
if v, ok := cfg[ScanAllPolicyTopic]; ok {
|
||||
policyCfg := &models.ScanAllPolicy{}
|
||||
if err := utils.ConvertMapToStruct(policyCfg, v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
policyNotification := ScanPolicyNotification{
|
||||
Type: policyCfg.Type,
|
||||
DailyTime: 0,
|
||||
}
|
||||
|
||||
if t, yes := policyCfg.Parm["daily_time"]; yes {
|
||||
if dt, success := t.(float64); success {
|
||||
policyNotification.DailyTime = (int64)(dt)
|
||||
} else {
|
||||
return errors.New("Invalid daily_time type")
|
||||
}
|
||||
}
|
||||
|
||||
return Publish(ScanAllPolicyTopic, policyNotification)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,57 +0,0 @@
|
||||
package notifier
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var jsonText = `
|
||||
{
|
||||
"scan_all_policy": {
|
||||
"type": "daily",
|
||||
"parameter": {
|
||||
"daily_time": <PLACE_HOLDER>
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
func TestWatchConfiguration(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
offset := (now.Hour()+1)*3600 + now.Minute()*60
|
||||
jsonT := strings.Replace(jsonText, "<PLACE_HOLDER>", strconv.Itoa(offset), -1)
|
||||
v := make(map[string]interface{})
|
||||
if err := json.Unmarshal([]byte(jsonT), &v); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := WatchConfigChanges(v); err != nil {
|
||||
if !strings.Contains(err.Error(), "No handlers registered") {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var jsonText2 = `
|
||||
{
|
||||
"scan_all_policy": {
|
||||
"type": "none"
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
func TestWatchConfiguration2(t *testing.T) {
|
||||
v := make(map[string]interface{})
|
||||
if err := json.Unmarshal([]byte(jsonText2), &v); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := WatchConfigChanges(v); err != nil {
|
||||
if !strings.Contains(err.Error(), "No handlers registered") {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,104 +0,0 @@
|
||||
package notifier
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/dao"
|
||||
common_http "github.com/goharbor/harbor/src/common/http"
|
||||
"github.com/goharbor/harbor/src/common/job"
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
common_utils "github.com/goharbor/harbor/src/common/utils"
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
"github.com/goharbor/harbor/src/core/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
// PolicyTypeDaily specify the policy type is "daily"
|
||||
PolicyTypeDaily = "daily"
|
||||
// PolicyTypeNone specify the policy type is "none"
|
||||
PolicyTypeNone = "none"
|
||||
)
|
||||
|
||||
// ScanPolicyNotification is defined for pass the policy change data.
|
||||
type ScanPolicyNotification struct {
|
||||
// Type is used to keep the scan policy type: "none","daily" and "refresh".
|
||||
Type string
|
||||
|
||||
// DailyTime is used when the type is 'daily', the offset with UTC time 00:00.
|
||||
DailyTime int64
|
||||
}
|
||||
|
||||
// ScanPolicyNotificationHandler is defined to handle the changes of scanning
|
||||
// policy.
|
||||
type ScanPolicyNotificationHandler struct{}
|
||||
|
||||
// IsStateful to indicate this handler is stateful.
|
||||
func (s *ScanPolicyNotificationHandler) IsStateful() bool {
|
||||
// Policy change should be done one by one.
|
||||
return true
|
||||
}
|
||||
|
||||
// Handle the policy change notification.
|
||||
func (s *ScanPolicyNotificationHandler) Handle(value interface{}) error {
|
||||
notification, ok := value.(ScanPolicyNotification)
|
||||
if !ok {
|
||||
return errors.New("ScanPolicyNotificationHandler can not handle value with invalid type")
|
||||
}
|
||||
|
||||
if notification.Type == PolicyTypeDaily {
|
||||
if err := cancelScanAllJobs(); err != nil {
|
||||
return fmt.Errorf("Failed to cancel scan_all jobs, error: %v", err)
|
||||
}
|
||||
h, m, s := common_utils.ParseOfftime(notification.DailyTime)
|
||||
cron := fmt.Sprintf("%d %d %d * * *", s, m, h)
|
||||
if err := utils.ScheduleScanAllImages(cron); err != nil {
|
||||
return fmt.Errorf("Failed to schedule scan_all job, error: %v", err)
|
||||
}
|
||||
} else if notification.Type == PolicyTypeNone {
|
||||
if err := cancelScanAllJobs(); err != nil {
|
||||
return fmt.Errorf("Failed to cancel scan_all jobs, error: %v", err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("Notification type %s is not supported", notification.Type)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func cancelScanAllJobs(c ...job.Client) error {
|
||||
var client job.Client
|
||||
if c == nil || len(c) == 0 {
|
||||
client = utils.GetJobServiceClient()
|
||||
} else {
|
||||
client = c[0]
|
||||
}
|
||||
q := &models.AdminJobQuery{
|
||||
Name: job.ImageScanAllJob,
|
||||
Kind: job.JobKindPeriodic,
|
||||
}
|
||||
jobs, err := dao.GetAdminJobs(q)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to query sheduled scan_all jobs, error: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(jobs) > 1 {
|
||||
log.Warningf("Got more than one scheduled scan_all jobs: %+v", jobs)
|
||||
}
|
||||
for _, j := range jobs {
|
||||
if err := dao.DeleteAdminJob(j.ID); err != nil {
|
||||
log.Warningf("Failed to delete scan_all job from DB, job ID: %d, job UUID: %s, error: %v", j.ID, j.UUID, err)
|
||||
}
|
||||
if err := client.PostAction(j.UUID, job.JobActionStop); err != nil {
|
||||
if e, ok := err.(*common_http.Error); ok && e.Code == http.StatusNotFound {
|
||||
log.Warningf("scan_all job not found on jobservice, UUID: %s, skip", j.UUID)
|
||||
} else {
|
||||
log.Errorf("Failed to stop scan_all job, UUID: %s, error: %v", j.UUID, e)
|
||||
return e
|
||||
}
|
||||
}
|
||||
log.Infof("scan_all job canceled, uuid: %s, id: %d", j.UUID, j.ID)
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,16 +0,0 @@
|
||||
package notifier
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestScanPolicyNotificationHandler(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
s := &ScanPolicyNotificationHandler{}
|
||||
assert.True(s.IsStateful())
|
||||
err := s.Handle("")
|
||||
if assert.NotNil(err) {
|
||||
assert.Contains(err.Error(), "invalid type")
|
||||
}
|
||||
}
|
@ -72,7 +72,6 @@ func initRouters() {
|
||||
beego.Router("/api/projects/:pid([0-9]+)/robots/:id([0-9]+)", &api.RobotAPI{}, "get:Get;put:Put;delete:Delete")
|
||||
|
||||
beego.Router("/api/repositories", &api.RepositoryAPI{}, "get:Get")
|
||||
beego.Router("/api/repositories/scanAll", &api.RepositoryAPI{}, "post:ScanAll")
|
||||
beego.Router("/api/repositories/*", &api.RepositoryAPI{}, "delete:Delete;put:Put")
|
||||
beego.Router("/api/repositories/*/labels", &api.RepositoryLabelAPI{}, "get:GetOfRepository;post:AddToRepository")
|
||||
beego.Router("/api/repositories/*/labels/:id([0-9]+)", &api.RepositoryLabelAPI{}, "delete:RemoveFromRepository")
|
||||
@ -94,6 +93,7 @@ func initRouters() {
|
||||
beego.Router("/api/system/gc/:id", &api.GCAPI{}, "get:GetGC")
|
||||
beego.Router("/api/system/gc/:id([0-9]+)/log", &api.GCAPI{}, "get:GetLog")
|
||||
beego.Router("/api/system/gc/schedule", &api.GCAPI{}, "get:Get;put:Put;post:Post")
|
||||
beego.Router("/api/system/scanAll/schedule", &api.ScanAllAPI{}, "get:Get;put:Put;post:Post")
|
||||
|
||||
beego.Router("/api/policies/replication/:id([0-9]+)", &api.RepPolicyAPI{})
|
||||
beego.Router("/api/policies/replication", &api.RepPolicyAPI{}, "get:List")
|
||||
|
@ -33,51 +33,6 @@ var (
|
||||
jobServiceClient job.Client
|
||||
)
|
||||
|
||||
// ScanAllImages scans all images of Harbor by submiting a scan all job to jobservice, and the job handler will call API
|
||||
// on the "core" service
|
||||
func ScanAllImages() error {
|
||||
_, err := scanAll("")
|
||||
return err
|
||||
}
|
||||
|
||||
// ScheduleScanAllImages will schedule a scan all job based on the cron string, add append a record in admin job table.
|
||||
func ScheduleScanAllImages(cron string) error {
|
||||
_, err := scanAll(cron)
|
||||
return err
|
||||
}
|
||||
|
||||
func scanAll(cron string, c ...job.Client) (string, error) {
|
||||
var client job.Client
|
||||
if c == nil || len(c) == 0 {
|
||||
client = GetJobServiceClient()
|
||||
} else {
|
||||
client = c[0]
|
||||
}
|
||||
kind := job.JobKindGeneric
|
||||
if len(cron) > 0 {
|
||||
kind = job.JobKindPeriodic
|
||||
}
|
||||
meta := &jobmodels.JobMetadata{
|
||||
JobKind: kind,
|
||||
IsUnique: true,
|
||||
Cron: cron,
|
||||
}
|
||||
id, err := dao.AddAdminJob(&models.AdminJob{
|
||||
Name: job.ImageScanAllJob,
|
||||
Kind: kind,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
data := &jobmodels.JobData{
|
||||
Name: job.ImageScanAllJob,
|
||||
Metadata: meta,
|
||||
StatusHook: fmt.Sprintf("%s/service/notifications/jobs/adminjob/%d", config.InternalCoreURL(), id),
|
||||
}
|
||||
log.Infof("scan_all job scheduled/triggered, cron string: '%s'", cron)
|
||||
return client.SubmitJob(data)
|
||||
}
|
||||
|
||||
// GetJobServiceClient returns the job service client instance.
|
||||
func GetJobServiceClient() job.Client {
|
||||
cl.Lock()
|
||||
|
@ -3,6 +3,7 @@
|
||||
# These certs file is only for Harbor testing.
|
||||
IP='10.4.142.42'
|
||||
OPENSSLCNF=
|
||||
DATA_VOL='/data'
|
||||
|
||||
for path in /etc/openssl/openssl.cnf /etc/ssl/openssl.cnf /usr/local/etc/openssl/openssl.cnf; do
|
||||
if [[ -e ${path} ]]; then
|
||||
@ -30,6 +31,6 @@ openssl x509 -req -days 365 -in $IP.csr -CA harbor_ca.crt \
|
||||
-CAkey harbor_ca.key -CAcreateserial -extfile extfile.cnf -out $IP.crt
|
||||
|
||||
# Copy to harbor default location
|
||||
mkdir -p /data/cert
|
||||
cp $IP.crt /data/cert/server.crt
|
||||
cp $IP.key /data/cert/server.key
|
||||
mkdir -p $DATA_VOL/cert
|
||||
cp $IP.crt $DATA_VOL/cert/server.crt
|
||||
cp $IP.key $DATA_VOL/cert/server.key
|
||||
|
@ -13,7 +13,7 @@ import { HarborLibraryModule } from './harbor-library.module';
|
||||
BrowserModule,
|
||||
FormsModule,
|
||||
HttpModule,
|
||||
ClarityModule.forRoot(),
|
||||
ClarityModule,
|
||||
HarborLibraryModule.forRoot()
|
||||
],
|
||||
providers: [],
|
||||
|
@ -27,7 +27,6 @@ import { OPERATION_DIRECTIVES } from './operation/index';
|
||||
import { LABEL_DIRECTIVES } from "./label/index";
|
||||
import { CREATE_EDIT_LABEL_DIRECTIVES } from "./create-edit-label/index";
|
||||
import { LABEL_PIECE_DIRECTIVES } from "./label-piece/index";
|
||||
import { HELMCHART_DIRECTIVE } from "./helm-chart/index";
|
||||
import { IMAGE_NAME_INPUT_DIRECTIVES } from "./image-name-input/index";
|
||||
import { CRON_SCHEDULE_DIRECTIVES } from "./cron-schedule/index";
|
||||
import {
|
||||
@ -53,8 +52,6 @@ import {
|
||||
ProjectDefaultService,
|
||||
LabelService,
|
||||
LabelDefaultService,
|
||||
HelmChartService,
|
||||
HelmChartDefaultService,
|
||||
RetagService,
|
||||
RetagDefaultService,
|
||||
UserPermissionService,
|
||||
@ -209,7 +206,6 @@ export function initConfig(translateInitializer: TranslateServiceInitializer, co
|
||||
HBR_GRIDVIEW_DIRECTIVES,
|
||||
REPOSITORY_GRIDVIEW_DIRECTIVES,
|
||||
OPERATION_DIRECTIVES,
|
||||
HELMCHART_DIRECTIVE,
|
||||
IMAGE_NAME_INPUT_DIRECTIVES,
|
||||
CRON_SCHEDULE_DIRECTIVES
|
||||
],
|
||||
@ -237,9 +233,9 @@ export function initConfig(translateInitializer: TranslateServiceInitializer, co
|
||||
HBR_GRIDVIEW_DIRECTIVES,
|
||||
REPOSITORY_GRIDVIEW_DIRECTIVES,
|
||||
OPERATION_DIRECTIVES,
|
||||
HELMCHART_DIRECTIVE,
|
||||
IMAGE_NAME_INPUT_DIRECTIVES,
|
||||
CRON_SCHEDULE_DIRECTIVES
|
||||
CRON_SCHEDULE_DIRECTIVES,
|
||||
SharedModule
|
||||
],
|
||||
providers: []
|
||||
})
|
||||
@ -263,7 +259,6 @@ export class HarborLibraryModule {
|
||||
config.jobLogService || { provide: JobLogService, useClass: JobLogDefaultService },
|
||||
config.projectPolicyService || { provide: ProjectService, useClass: ProjectDefaultService },
|
||||
config.labelService || { provide: LabelService, useClass: LabelDefaultService },
|
||||
config.helmChartService || { provide: HelmChartService, useClass: HelmChartDefaultService },
|
||||
config.userPermissionService || { provide: UserPermissionService, useClass: UserPermissionDefaultService },
|
||||
config.gcApiRepository || {provide: GcApiRepository, useClass: GcApiDefaultRepository},
|
||||
// Do initializing
|
||||
@ -300,7 +295,6 @@ export class HarborLibraryModule {
|
||||
config.jobLogService || { provide: JobLogService, useClass: JobLogDefaultService },
|
||||
config.projectPolicyService || { provide: ProjectService, useClass: ProjectDefaultService },
|
||||
config.labelService || { provide: LabelService, useClass: LabelDefaultService },
|
||||
config.helmChartService || { provide: HelmChartService, useClass: HelmChartDefaultService },
|
||||
config.userPermissionService || { provide: UserPermissionService, useClass: UserPermissionDefaultService },
|
||||
config.gcApiRepository || {provide: GcApiRepository, useClass: GcApiDefaultRepository},
|
||||
ChannelService,
|
||||
|
@ -1,23 +0,0 @@
|
||||
import { Type } from '@angular/core';
|
||||
import { HelmChartComponent } from './helm-chart.component';
|
||||
import { ChartVersionComponent } from './versions/helm-chart-version.component';
|
||||
import { ChartDetailComponent } from './chart-detail/chart-detail.component';
|
||||
import { ChartDetailSummaryComponent } from './chart-detail/chart-detail-summary.component';
|
||||
import { ChartDetailDependencyComponent } from './chart-detail/chart-detail-dependency.component';
|
||||
import { ChartDetailValueComponent } from './chart-detail/chart-detail-value.component';
|
||||
|
||||
export * from "./helm-chart.component";
|
||||
export * from "./versions/helm-chart-version.component";
|
||||
export * from "./chart-detail/chart-detail.component";
|
||||
export * from "./chart-detail/chart-detail-summary.component";
|
||||
export * from "./chart-detail/chart-detail-dependency.component";
|
||||
export * from "./chart-detail/chart-detail-value.component";
|
||||
|
||||
export const HELMCHART_DIRECTIVE: Type<any>[] = [
|
||||
HelmChartComponent,
|
||||
ChartVersionComponent,
|
||||
ChartDetailComponent,
|
||||
ChartDetailSummaryComponent,
|
||||
ChartDetailDependencyComponent,
|
||||
ChartDetailValueComponent,
|
||||
];
|
@ -25,5 +25,4 @@ export * from './gridview/index';
|
||||
export * from './repository-gridview/index';
|
||||
export * from './operation/index';
|
||||
export * from './_animations/index';
|
||||
export * from './helm-chart/index';
|
||||
|
||||
|
@ -1,12 +1,8 @@
|
||||
import { Type } from '@angular/core';
|
||||
import { LabelComponent} from "./label.component";
|
||||
import { LabelMarkerComponent } from './label-marker/label-marker.component';
|
||||
import { LabelSignPostComponent } from './label-signpost/label-signpost.component';
|
||||
import { LabelFilterComponent } from './label-filter/label-filter.component';
|
||||
|
||||
export const LABEL_DIRECTIVES: Type<any>[] = [
|
||||
LabelComponent,
|
||||
LabelMarkerComponent,
|
||||
LabelSignPostComponent,
|
||||
LabelFilterComponent
|
||||
];
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user