mirror of
https://github.com/goharbor/harbor.git
synced 2024-11-22 02:05:41 +01:00
Refacotr the prepare script base on the proposal https://github.com/goharbor/community/pull/22
Signed-off-by: Qian Deng <dengq@vmware.com>
This commit is contained in:
parent
511a6dc9de
commit
5f80fe7b8a
57
Makefile
57
Makefile
@ -150,12 +150,13 @@ MIGRATEPATCHBINARYNAME=migrate-patch
|
||||
|
||||
# configfile
|
||||
CONFIGPATH=$(MAKEPATH)
|
||||
CONFIGFILE=harbor.cfg
|
||||
INSIDE_CONFIGPATH=/harbor_make
|
||||
CONFIGFILE=harbor.yml
|
||||
|
||||
# prepare parameters
|
||||
PREPAREPATH=$(TOOLSPATH)
|
||||
PREPARECMD=prepare
|
||||
PREPARECMD_PARA=--conf $(CONFIGPATH)/$(CONFIGFILE)
|
||||
PREPARECMD_PARA=--conf $(INSIDE_CONFIGPATH)/$(CONFIGFILE)
|
||||
ifeq ($(NOTARYFLAG), true)
|
||||
PREPARECMD_PARA+= --with-notary
|
||||
endif
|
||||
@ -221,27 +222,21 @@ PACKAGE_OFFLINE_PARA=-zcvf harbor-offline-installer-$(PKGVERSIONTAG).tgz \
|
||||
$(HARBORPKG)/common/templates $(HARBORPKG)/$(DOCKERIMGFILE).$(VERSIONTAG).tar.gz \
|
||||
$(HARBORPKG)/prepare \
|
||||
$(HARBORPKG)/LICENSE $(HARBORPKG)/install.sh \
|
||||
$(HARBORPKG)/harbor.cfg $(HARBORPKG)/$(DOCKERCOMPOSEFILENAME)
|
||||
|
||||
$(HARBORPKG)/harbor.yml
|
||||
|
||||
PACKAGE_ONLINE_PARA=-zcvf harbor-online-installer-$(PKGVERSIONTAG).tgz \
|
||||
$(HARBORPKG)/common/templates $(HARBORPKG)/prepare \
|
||||
$(HARBORPKG)/LICENSE \
|
||||
$(HARBORPKG)/install.sh $(HARBORPKG)/$(DOCKERCOMPOSEFILENAME) \
|
||||
$(HARBORPKG)/harbor.cfg
|
||||
|
||||
$(HARBORPKG)/install.sh \
|
||||
$(HARBORPKG)/harbor.yml
|
||||
|
||||
DOCKERCOMPOSE_LIST=-f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||
|
||||
ifeq ($(NOTARYFLAG), true)
|
||||
DOCKERSAVE_PARA+= goharbor/notary-server-photon:$(NOTARYVERSION)-$(VERSIONTAG) goharbor/notary-signer-photon:$(NOTARYVERSION)-$(VERSIONTAG)
|
||||
PACKAGE_OFFLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSENOTARYFILENAME)
|
||||
PACKAGE_ONLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSENOTARYFILENAME)
|
||||
DOCKERCOMPOSE_LIST+= -f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSENOTARYFILENAME)
|
||||
endif
|
||||
ifeq ($(CLAIRFLAG), true)
|
||||
DOCKERSAVE_PARA+= goharbor/clair-photon:$(CLAIRVERSION)-$(VERSIONTAG)
|
||||
PACKAGE_OFFLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSECLAIRFILENAME)
|
||||
PACKAGE_ONLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSECLAIRFILENAME)
|
||||
DOCKERCOMPOSE_LIST+= -f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECLAIRFILENAME)
|
||||
endif
|
||||
ifeq ($(MIGRATORFLAG), true)
|
||||
DOCKERSAVE_PARA+= goharbor/harbor-migrator:$(MIGRATORVERSION)
|
||||
@ -249,9 +244,6 @@ endif
|
||||
# append chartmuseum parameters if set
|
||||
ifeq ($(CHARTFLAG), true)
|
||||
DOCKERSAVE_PARA+= $(DOCKERIMAGENAME_CHART_SERVER):$(CHARTMUSEUMVERSION)-$(VERSIONTAG)
|
||||
PACKAGE_OFFLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSECHARTMUSEUMFILENAME)
|
||||
PACKAGE_ONLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSECHARTMUSEUMFILENAME)
|
||||
DOCKERCOMPOSE_LIST+= -f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECHARTMUSEUMFILENAME)
|
||||
endif
|
||||
|
||||
ui_version:
|
||||
@ -286,7 +278,7 @@ compile:check_environment compile_core compile_jobservice compile_registryctl co
|
||||
|
||||
prepare:
|
||||
@echo "preparing..."
|
||||
@$(MAKEPATH)/$(PREPARECMD) $(PREPARECMD_PARA)
|
||||
@MAKEPATH=$(MAKEPATH) $(MAKEPATH)/$(PREPARECMD) $(PREPARECMD_PARA)
|
||||
|
||||
build:
|
||||
make -f $(MAKEFILEPATH_PHOTON)/Makefile build -e DEVFLAG=$(DEVFLAG) \
|
||||
@ -295,30 +287,6 @@ build:
|
||||
-e BUILDBIN=$(BUILDBIN) -e REDISVERSION=$(REDISVERSION) -e MIGRATORVERSION=$(MIGRATORVERSION) \
|
||||
-e CHARTMUSEUMVERSION=$(CHARTMUSEUMVERSION) -e DOCKERIMAGENAME_CHART_SERVER=$(DOCKERIMAGENAME_CHART_SERVER)
|
||||
|
||||
modify_composefile: modify_composefile_notary modify_composefile_clair modify_composefile_chartmuseum
|
||||
@echo "preparing docker-compose file..."
|
||||
@cp $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSETPLFILENAME) $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||
@$(SEDCMD) -i -e 's/__version__/$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||
@$(SEDCMD) -i -e 's/__postgresql_version__/$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||
@$(SEDCMD) -i -e 's/__reg_version__/$(REGISTRYVERSION)-$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||
@$(SEDCMD) -i -e 's/__nginx_version__/$(NGINXVERSION)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||
@$(SEDCMD) -i -e 's/__redis_version__/$(REDISVERSION)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||
|
||||
modify_composefile_notary:
|
||||
@echo "preparing docker-compose notary file..."
|
||||
@cp $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSENOTARYTPLFILENAME) $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSENOTARYFILENAME)
|
||||
@$(SEDCMD) -i -e 's/__notary_version__/$(NOTARYVERSION)-$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSENOTARYFILENAME)
|
||||
|
||||
modify_composefile_clair:
|
||||
@echo "preparing docker-compose clair file..."
|
||||
@cp $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECLAIRTPLFILENAME) $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECLAIRFILENAME)
|
||||
@$(SEDCMD) -i -e 's/__postgresql_version__/$(CLAIRDBVERSION)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECLAIRFILENAME)
|
||||
@$(SEDCMD) -i -e 's/__clair_version__/$(CLAIRVERSION)-$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECLAIRFILENAME)
|
||||
modify_composefile_chartmuseum:
|
||||
@echo "preparing docker-compose chartmuseum file..."
|
||||
@cp $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECHARTMUSEUMTPLFILENAME) $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECHARTMUSEUMFILENAME)
|
||||
@$(SEDCMD) -i -e 's/__chartmuseum_version__/$(CHARTMUSEUMVERSION)-$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECHARTMUSEUMFILENAME)
|
||||
|
||||
modify_sourcefiles:
|
||||
@echo "change mode of source files."
|
||||
@chmod 600 $(MAKEPATH)/common/templates/notary/notary-signer.key
|
||||
@ -327,9 +295,10 @@ modify_sourcefiles:
|
||||
@chmod 600 $(MAKEPATH)/common/templates/core/private_key.pem
|
||||
@chmod 600 $(MAKEPATH)/common/templates/registry/root.crt
|
||||
|
||||
install: compile ui_version build modify_sourcefiles prepare modify_composefile start
|
||||
# install: compile ui_version build modify_sourcefiles prepare start
|
||||
install: compile ui_version build prepare start
|
||||
|
||||
package_online: modify_composefile
|
||||
package_online: prepare
|
||||
@echo "packing online package ..."
|
||||
@cp -r make $(HARBORPKG)
|
||||
@if [ -n "$(REGISTRYSERVER)" ] ; then \
|
||||
@ -342,7 +311,7 @@ package_online: modify_composefile
|
||||
@rm -rf $(HARBORPKG)
|
||||
@echo "Done."
|
||||
|
||||
package_offline: compile ui_version build modify_sourcefiles modify_composefile
|
||||
package_offline: compile ui_version build
|
||||
@echo "packing offline package ..."
|
||||
@cp -r make $(HARBORPKG)
|
||||
@cp LICENSE $(HARBORPKG)/LICENSE
|
||||
|
3
make/common/templates/clair/clair_env.jinja
Normal file
3
make/common/templates/clair/clair_env.jinja
Normal file
@ -0,0 +1,3 @@
|
||||
http_proxy={{http_proxy}}
|
||||
https_proxy={{https_proxy}}
|
||||
no_proxy={{no_proxy}}
|
25
make/common/templates/clair/config.yaml.jinja
Normal file
25
make/common/templates/clair/config.yaml.jinja
Normal file
@ -0,0 +1,25 @@
|
||||
clair:
|
||||
database:
|
||||
type: pgsql
|
||||
options:
|
||||
source: postgresql://{{username}}:{{password}}@{{host}}:{{port}}/{{dbname}}?sslmode=disable
|
||||
|
||||
# Number of elements kept in the cache
|
||||
# Values unlikely to change (e.g. namespaces) are cached in order to save prevent needless roundtrips to the database.
|
||||
cachesize: 16384
|
||||
|
||||
api:
|
||||
# API server port
|
||||
port: 6060
|
||||
healthport: 6061
|
||||
|
||||
# Deadline before an API request will respond with a 503
|
||||
timeout: 300s
|
||||
updater:
|
||||
interval: {{interval}}h
|
||||
|
||||
notifier:
|
||||
attempts: 3
|
||||
renotifyinterval: 2h
|
||||
http:
|
||||
endpoint: http://core:8080/service/notifications/clair
|
1
make/common/templates/clair/postgres_env.jinja
Normal file
1
make/common/templates/clair/postgres_env.jinja
Normal file
@ -0,0 +1 @@
|
||||
POSTGRES_PASSWORD={{password}}
|
@ -1,42 +0,0 @@
|
||||
version: '2'
|
||||
services:
|
||||
core:
|
||||
networks:
|
||||
harbor-chartmuseum:
|
||||
aliases:
|
||||
- harbor-core
|
||||
redis:
|
||||
networks:
|
||||
harbor-chartmuseum:
|
||||
aliases:
|
||||
- redis
|
||||
chartmuseum:
|
||||
container_name: chartmuseum
|
||||
image: goharbor/chartmuseum-photon:__chartmuseum_version__
|
||||
restart: always
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- CHOWN
|
||||
- DAC_OVERRIDE
|
||||
- SETGID
|
||||
- SETUID
|
||||
networks:
|
||||
- harbor-chartmuseum
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- redis
|
||||
volumes:
|
||||
- /data/chart_storage:/chart_storage:z
|
||||
- ./common/config/chartserver:/etc/chartserver:z
|
||||
- ./common/config/custom-ca-bundle.crt:/harbor_cust_cert/custom-ca-bundle.crt:z
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "chartmuseum"
|
||||
env_file:
|
||||
./common/config/chartserver/env
|
||||
networks:
|
||||
harbor-chartmuseum:
|
||||
external: false
|
@ -1,47 +0,0 @@
|
||||
version: '2'
|
||||
services:
|
||||
core:
|
||||
networks:
|
||||
harbor-clair:
|
||||
aliases:
|
||||
- harbor-core
|
||||
jobservice:
|
||||
networks:
|
||||
- harbor-clair
|
||||
registry:
|
||||
networks:
|
||||
- harbor-clair
|
||||
postgresql:
|
||||
networks:
|
||||
harbor-clair:
|
||||
aliases:
|
||||
- harbor-db
|
||||
clair:
|
||||
networks:
|
||||
- harbor-clair
|
||||
container_name: clair
|
||||
image: goharbor/clair-photon:__clair_version__
|
||||
restart: always
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- DAC_OVERRIDE
|
||||
- SETGID
|
||||
- SETUID
|
||||
cpu_quota: 50000
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- postgresql
|
||||
volumes:
|
||||
- ./common/config/clair/config.yaml:/etc/clair/config.yaml:z
|
||||
- ./common/config/custom-ca-bundle.crt:/harbor_cust_cert/custom-ca-bundle.crt:z
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "clair"
|
||||
env_file:
|
||||
./common/config/clair/clair_env
|
||||
networks:
|
||||
harbor-clair:
|
||||
external: false
|
@ -1,69 +0,0 @@
|
||||
version: '2'
|
||||
services:
|
||||
core:
|
||||
networks:
|
||||
- harbor-notary
|
||||
proxy:
|
||||
networks:
|
||||
- harbor-notary
|
||||
postgresql:
|
||||
networks:
|
||||
harbor-notary:
|
||||
aliases:
|
||||
- harbor-db
|
||||
notary-server:
|
||||
image: goharbor/notary-server-photon:__notary_version__
|
||||
container_name: notary-server
|
||||
restart: always
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- SETGID
|
||||
- SETUID
|
||||
networks:
|
||||
- notary-sig
|
||||
- harbor-notary
|
||||
dns_search: .
|
||||
volumes:
|
||||
- ./common/config/notary:/etc/notary:z
|
||||
env_file:
|
||||
- ./common/config/notary/server_env
|
||||
depends_on:
|
||||
- postgresql
|
||||
- notary-signer
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "notary-server"
|
||||
notary-signer:
|
||||
image: goharbor/notary-signer-photon:__notary_version__
|
||||
container_name: notary-signer
|
||||
restart: always
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- SETGID
|
||||
- SETUID
|
||||
networks:
|
||||
harbor-notary:
|
||||
notary-sig:
|
||||
aliases:
|
||||
- notarysigner
|
||||
dns_search: .
|
||||
volumes:
|
||||
- ./common/config/notary:/etc/notary:z
|
||||
env_file:
|
||||
- ./common/config/notary/signer_env
|
||||
depends_on:
|
||||
- postgresql
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "notary-signer"
|
||||
networks:
|
||||
harbor-notary:
|
||||
external: false
|
||||
notary-sig:
|
||||
external: false
|
@ -1,47 +1,53 @@
|
||||
## Configuration file of Harbor
|
||||
|
||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||
_version = 1.7.0
|
||||
_version: 1.7.0
|
||||
#The IP address or hostname to access admin UI and registry service.
|
||||
#DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
|
||||
#DO NOT comment out this line, modify the value of "hostname" directly, or the installation will fail.
|
||||
hostname = reg.mydomain.com
|
||||
hostname: reg.mydomain.com
|
||||
|
||||
#The protocol for accessing the UI and token/notification service, by default it is http.
|
||||
#It can be set to https if ssl is enabled on nginx.
|
||||
ui_url_protocol = http
|
||||
ui_url_protocol: https
|
||||
|
||||
#Maximum number of job workers in job service
|
||||
max_job_workers = 10
|
||||
max_job_workers: 10
|
||||
|
||||
#Determine whether or not to generate certificate for the registry's token.
|
||||
#If the value is on, the prepare script creates new root cert and private key
|
||||
#for generating token to access the registry. If the value is off the default key/cert will be used.
|
||||
#This flag also controls the creation of the notary signer's cert.
|
||||
customize_crt = on
|
||||
customize_crt: on
|
||||
|
||||
# The default data volume
|
||||
data_volume: /data
|
||||
|
||||
#The path of cert and key files for nginx, they are applied only the protocol is set to https
|
||||
ssl_cert = /data/cert/server.crt
|
||||
ssl_cert_key = /data/cert/server.key
|
||||
ssl_cert: /data/cert/server.crt
|
||||
ssl_cert_key: /data/cert/server.key
|
||||
|
||||
#The path of secretkey storage
|
||||
secretkey_path = /data
|
||||
secretkey_path: /data
|
||||
|
||||
#Admiral's url, comment this attribute, or set its value to NA when Harbor is standalone
|
||||
admiral_url = NA
|
||||
admiral_url: NA
|
||||
|
||||
#Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
log_rotate_count = 50
|
||||
log_rotate_count: 50
|
||||
#Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||
#If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||
#are all valid.
|
||||
log_rotate_size = 200M
|
||||
log_rotate_size: 200M
|
||||
|
||||
# The directory that store log files
|
||||
log_location: /var/log/harbor
|
||||
|
||||
#Config http proxy for Clair, e.g. http://my.proxy.com:3128
|
||||
#Clair doesn't need to connect to harbor internal components via http proxy.
|
||||
http_proxy =
|
||||
https_proxy =
|
||||
no_proxy = 127.0.0.1,localhost,core,registry
|
||||
http_proxy:
|
||||
https_proxy:
|
||||
no_proxy: 127.0.0.1,localhost,core,registry
|
||||
|
||||
#NOTES: The properties between BEGIN INITIAL PROPERTIES and END INITIAL PROPERTIES
|
||||
#only take effect in the first boot, the subsequent changes of these properties
|
||||
@ -53,152 +59,152 @@ no_proxy = 127.0.0.1,localhost,core,registry
|
||||
|
||||
#Email server uses the given username and password to authenticate on TLS connections to host and act as identity.
|
||||
#Identity left blank to act as username.
|
||||
email_identity =
|
||||
email_identity:
|
||||
|
||||
email_server = smtp.mydomain.com
|
||||
email_server_port = 25
|
||||
email_username = sample_admin@mydomain.com
|
||||
email_password = abc
|
||||
email_from = admin <sample_admin@mydomain.com>
|
||||
email_ssl = false
|
||||
email_insecure = false
|
||||
email_server: smtp.mydomain.com
|
||||
email_server_port: 25
|
||||
email_username: sample_admin@mydomain.com
|
||||
email_password: abc
|
||||
email_from: admin <sample_admin@mydomain.com>
|
||||
email_ssl: false
|
||||
email_insecure: false
|
||||
|
||||
##The initial password of Harbor admin, only works for the first time when Harbor starts.
|
||||
#It has no effect after the first launch of Harbor.
|
||||
#Change the admin password from UI after launching Harbor.
|
||||
harbor_admin_password = Harbor12345
|
||||
harbor_admin_password: Harbor12345
|
||||
|
||||
##By default the auth mode is db_auth, i.e. the credentials are stored in a local database.
|
||||
#Set it to ldap_auth if you want to verify a user's credentials against an LDAP server.
|
||||
auth_mode = db_auth
|
||||
auth_mode: db_auth
|
||||
|
||||
#The url for an ldap endpoint.
|
||||
ldap_url = ldaps://ldap.mydomain.com
|
||||
ldap_url: ldaps://ldap.mydomain.com
|
||||
|
||||
#A user's DN who has the permission to search the LDAP/AD server.
|
||||
#If your LDAP/AD server does not support anonymous search, you should configure this DN and ldap_search_pwd.
|
||||
#ldap_searchdn = uid=searchuser,ou=people,dc=mydomain,dc=com
|
||||
#ldap_searchdn: uid=searchuser,ou=people,dc=mydomain,dc=com
|
||||
|
||||
#the password of the ldap_searchdn
|
||||
#ldap_search_pwd = password
|
||||
#ldap_search_pwd: password
|
||||
|
||||
#The base DN from which to look up a user in LDAP/AD
|
||||
ldap_basedn = ou=people,dc=mydomain,dc=com
|
||||
ldap_basedn: ou=people,dc=mydomain,dc=com
|
||||
|
||||
#Search filter for LDAP/AD, make sure the syntax of the filter is correct.
|
||||
#ldap_filter = (objectClass=person)
|
||||
|
||||
# The attribute used in a search to match a user, it could be uid, cn, email, sAMAccountName or other attributes depending on your LDAP/AD
|
||||
ldap_uid = uid
|
||||
ldap_uid: uid
|
||||
|
||||
#the scope to search for users, 0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE
|
||||
ldap_scope = 2
|
||||
ldap_scope: 2
|
||||
|
||||
#Timeout (in seconds) when connecting to an LDAP Server. The default value (and most reasonable) is 5 seconds.
|
||||
ldap_timeout = 5
|
||||
ldap_timeout: 5
|
||||
|
||||
#Verify certificate from LDAP server
|
||||
ldap_verify_cert = true
|
||||
ldap_verify_cert: true
|
||||
|
||||
#The base dn from which to lookup a group in LDAP/AD
|
||||
ldap_group_basedn = ou=group,dc=mydomain,dc=com
|
||||
ldap_group_basedn: ou=group,dc=mydomain,dc=com
|
||||
|
||||
#filter to search LDAP/AD group
|
||||
ldap_group_filter = objectclass=group
|
||||
ldap_group_filter: objectclass=group
|
||||
|
||||
#The attribute used to name a LDAP/AD group, it could be cn, name
|
||||
ldap_group_gid = cn
|
||||
ldap_group_gid: cn
|
||||
|
||||
#The scope to search for ldap groups. 0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE
|
||||
ldap_group_scope = 2
|
||||
ldap_group_scope: 2
|
||||
|
||||
#Turn on or off the self-registration feature
|
||||
self_registration = on
|
||||
self_registration: on
|
||||
|
||||
#The expiration time (in minute) of token created by token service, default is 30 minutes
|
||||
token_expiration = 30
|
||||
token_expiration: 30
|
||||
|
||||
#The flag to control what users have permission to create projects
|
||||
#The default value "everyone" allows everyone to creates a project.
|
||||
#Set to "adminonly" so that only admin user can create project.
|
||||
project_creation_restriction = everyone
|
||||
project_creation_restriction: everyone
|
||||
|
||||
#************************END INITIAL PROPERTIES************************
|
||||
|
||||
#######Harbor DB configuration section#######
|
||||
|
||||
#The address of the Harbor database. Only need to change when using external db.
|
||||
db_host = postgresql
|
||||
db_host: postgresql
|
||||
|
||||
#The password for the root user of Harbor DB. Change this before any production use.
|
||||
db_password = root123
|
||||
db_password: root123
|
||||
|
||||
#The port of Harbor database host
|
||||
db_port = 5432
|
||||
db_port: 5432
|
||||
|
||||
#The user name of Harbor database
|
||||
db_user = postgres
|
||||
db_user: postgres
|
||||
|
||||
##### End of Harbor DB configuration#######
|
||||
|
||||
##########Redis server configuration.############
|
||||
|
||||
#Redis connection address
|
||||
redis_host = redis
|
||||
redis_host: redis
|
||||
|
||||
#Redis connection port
|
||||
redis_port = 6379
|
||||
redis_port: 6379
|
||||
|
||||
#Redis connection password
|
||||
redis_password =
|
||||
redis_password:
|
||||
|
||||
#Redis connection db index
|
||||
#db_index 1,2,3 is for registry, jobservice and chartmuseum.
|
||||
#db_index 0 is for UI, it's unchangeable
|
||||
redis_db_index = 1,2,3
|
||||
redis_db_index: 1,2,3
|
||||
|
||||
########## End of Redis server configuration ############
|
||||
|
||||
##########Clair DB configuration############
|
||||
|
||||
#Clair DB host address. Only change it when using an exteral DB.
|
||||
clair_db_host = postgresql
|
||||
clair_db_host: postgresql
|
||||
#The password of the Clair's postgres database. Only effective when Harbor is deployed with Clair.
|
||||
#Please update it before deployment. Subsequent update will cause Clair's API server and Harbor unable to access Clair's database.
|
||||
clair_db_password = root123
|
||||
clair_db_password: root123
|
||||
#Clair DB connect port
|
||||
clair_db_port = 5432
|
||||
clair_db_port: 5432
|
||||
#Clair DB username
|
||||
clair_db_username = postgres
|
||||
clair_db_username: postgres
|
||||
#Clair default database
|
||||
clair_db = postgres
|
||||
clair_db: postgres
|
||||
|
||||
#The interval of clair updaters, the unit is hour, set to 0 to disable the updaters.
|
||||
clair_updaters_interval = 12
|
||||
clair_updaters_interval: 12
|
||||
|
||||
##########End of Clair DB configuration############
|
||||
|
||||
#The following attributes only need to be set when auth mode is uaa_auth
|
||||
uaa_endpoint = uaa.mydomain.org
|
||||
uaa_clientid = id
|
||||
uaa_clientsecret = secret
|
||||
uaa_verify_cert = true
|
||||
uaa_ca_cert = /path/to/ca.pem
|
||||
uaa_endpoint: uaa.mydomain.org
|
||||
uaa_clientid: id
|
||||
uaa_clientsecret: secret
|
||||
uaa_verify_cert: true
|
||||
uaa_ca_cert: /path/to/ca.pem
|
||||
|
||||
|
||||
### Harbor Storage settings ###
|
||||
#Please be aware that the following storage settings will be applied to both docker registry and helm chart repository.
|
||||
#registry_storage_provider can be: filesystem, s3, gcs, azure, etc.
|
||||
registry_storage_provider_name = filesystem
|
||||
registry_storage_provider_name: filesystem
|
||||
#registry_storage_provider_config is a comma separated "key: value" pairs, e.g. "key1: value, key2: value2".
|
||||
#To avoid duplicated configurations, both docker registry and chart repository follow the same storage configuration specifications of docker registry.
|
||||
#Refer to https://docs.docker.com/registry/configuration/#storage for all available configuration.
|
||||
registry_storage_provider_config =
|
||||
registry_storage_provider_config:
|
||||
#registry_custom_ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||
#of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||
registry_custom_ca_bundle =
|
||||
registry_custom_ca_bundle:
|
||||
|
||||
#If reload_config=true, all settings which present in harbor.cfg take effect after prepare and restart harbor, it overwrites exsiting settings.
|
||||
#If reload_config=true, all settings which present in harbor.yml take effect after prepare and restart harbor, it overwrites exsiting settings.
|
||||
#reload_config=true
|
||||
#Regular expression to match skipped environment variables
|
||||
#skip_reload_env_pattern=(^EMAIL.*)|(^LDAP.*)
|
||||
#skip_reload_env_pattern: (^EMAIL.*)|(^LDAP.*)
|
@ -179,30 +179,15 @@ fi
|
||||
./prepare $prepare_para
|
||||
echo ""
|
||||
|
||||
h2 "[Step $item]: checking existing instance of Harbor ..."; let item+=1
|
||||
docker_compose_list='-f docker-compose.yml'
|
||||
if [ $with_notary ]
|
||||
then
|
||||
docker_compose_list="${docker_compose_list} -f docker-compose.notary.yml"
|
||||
fi
|
||||
if [ $with_clair ]
|
||||
then
|
||||
docker_compose_list="${docker_compose_list} -f docker-compose.clair.yml"
|
||||
fi
|
||||
if [ $with_chartmuseum ]
|
||||
then
|
||||
docker_compose_list="${docker_compose_list} -f docker-compose.chartmuseum.yml"
|
||||
fi
|
||||
|
||||
if [ -n "$(docker-compose $docker_compose_list ps -q)" ]
|
||||
if [ -n "$(docker-compose ps -q)" ]
|
||||
then
|
||||
note "stopping existing Harbor instance ..."
|
||||
docker-compose $docker_compose_list down -v
|
||||
docker-compose down -v
|
||||
fi
|
||||
echo ""
|
||||
|
||||
h2 "[Step $item]: starting Harbor ..."
|
||||
docker-compose $docker_compose_list up -d
|
||||
docker-compose up -d
|
||||
|
||||
protocol=http
|
||||
hostname=reg.mydomain.com
|
||||
|
18
make/photon/prepare/Dockerfile
Normal file
18
make/photon/prepare/Dockerfile
Normal file
@ -0,0 +1,18 @@
|
||||
FROM photon:2.0
|
||||
|
||||
ENV LANG en_US.UTF-8
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
RUN mkdir -p /harbor_make
|
||||
|
||||
RUN tdnf install -y python3 \
|
||||
&& tdnf install -y python3-pip
|
||||
RUN pip3 install pipenv
|
||||
|
||||
COPY . /usr/src/app
|
||||
RUN set -ex && pipenv install --deploy --system
|
||||
|
||||
ENTRYPOINT [ "python3", "main.py" ]
|
||||
|
||||
VOLUME ["/harbor_make"]
|
15
make/photon/prepare/Pipfile
Normal file
15
make/photon/prepare/Pipfile
Normal file
@ -0,0 +1,15 @@
|
||||
[[source]]
|
||||
url = "https://pypi.org/simple"
|
||||
verify_ssl = true
|
||||
name = "pypi"
|
||||
|
||||
[packages]
|
||||
pyyaml = "*"
|
||||
click = "*"
|
||||
"jinja2" = "*"
|
||||
|
||||
[dev-packages]
|
||||
pylint = "*"
|
||||
|
||||
[requires]
|
||||
python_version = "3.6"
|
192
make/photon/prepare/Pipfile.lock
generated
Normal file
192
make/photon/prepare/Pipfile.lock
generated
Normal file
@ -0,0 +1,192 @@
|
||||
{
|
||||
"_meta": {
|
||||
"hash": {
|
||||
"sha256": "8950f4066b83c5eb792e0f828de1530b2a61d19e45531660adfc8e06a02f2e71"
|
||||
},
|
||||
"pipfile-spec": 6,
|
||||
"requires": {
|
||||
"python_version": "3.6"
|
||||
},
|
||||
"sources": [
|
||||
{
|
||||
"name": "pypi",
|
||||
"url": "https://pypi.org/simple",
|
||||
"verify_ssl": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"default": {
|
||||
"click": {
|
||||
"hashes": [
|
||||
"sha256:2335065e6395b9e67ca716de5f7526736bfa6ceead690adf616d925bdc622b13",
|
||||
"sha256:5b94b49521f6456670fdb30cd82a4eca9412788a93fa6dd6df72c94d5a8ff2d7"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==7.0"
|
||||
},
|
||||
"jinja2": {
|
||||
"hashes": [
|
||||
"sha256:74c935a1b8bb9a3947c50a54766a969d4846290e1e788ea44c1392163723c3bd",
|
||||
"sha256:f84be1bb0040caca4cea721fcbbbbd61f9be9464ca236387158b0feea01914a4"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.10"
|
||||
},
|
||||
"markupsafe": {
|
||||
"hashes": [
|
||||
"sha256:048ef924c1623740e70204aa7143ec592504045ae4429b59c30054cb31e3c432",
|
||||
"sha256:130f844e7f5bdd8e9f3f42e7102ef1d49b2e6fdf0d7526df3f87281a532d8c8b",
|
||||
"sha256:19f637c2ac5ae9da8bfd98cef74d64b7e1bb8a63038a3505cd182c3fac5eb4d9",
|
||||
"sha256:1b8a7a87ad1b92bd887568ce54b23565f3fd7018c4180136e1cf412b405a47af",
|
||||
"sha256:1c25694ca680b6919de53a4bb3bdd0602beafc63ff001fea2f2fc16ec3a11834",
|
||||
"sha256:1f19ef5d3908110e1e891deefb5586aae1b49a7440db952454b4e281b41620cd",
|
||||
"sha256:1fa6058938190ebe8290e5cae6c351e14e7bb44505c4a7624555ce57fbbeba0d",
|
||||
"sha256:31cbb1359e8c25f9f48e156e59e2eaad51cd5242c05ed18a8de6dbe85184e4b7",
|
||||
"sha256:3e835d8841ae7863f64e40e19477f7eb398674da6a47f09871673742531e6f4b",
|
||||
"sha256:4e97332c9ce444b0c2c38dd22ddc61c743eb208d916e4265a2a3b575bdccb1d3",
|
||||
"sha256:525396ee324ee2da82919f2ee9c9e73b012f23e7640131dd1b53a90206a0f09c",
|
||||
"sha256:52b07fbc32032c21ad4ab060fec137b76eb804c4b9a1c7c7dc562549306afad2",
|
||||
"sha256:52ccb45e77a1085ec5461cde794e1aa037df79f473cbc69b974e73940655c8d7",
|
||||
"sha256:5c3fbebd7de20ce93103cb3183b47671f2885307df4a17a0ad56a1dd51273d36",
|
||||
"sha256:5e5851969aea17660e55f6a3be00037a25b96a9b44d2083651812c99d53b14d1",
|
||||
"sha256:5edfa27b2d3eefa2210fb2f5d539fbed81722b49f083b2c6566455eb7422fd7e",
|
||||
"sha256:7d263e5770efddf465a9e31b78362d84d015cc894ca2c131901a4445eaa61ee1",
|
||||
"sha256:83381342bfc22b3c8c06f2dd93a505413888694302de25add756254beee8449c",
|
||||
"sha256:857eebb2c1dc60e4219ec8e98dfa19553dae33608237e107db9c6078b1167856",
|
||||
"sha256:98e439297f78fca3a6169fd330fbe88d78b3bb72f967ad9961bcac0d7fdd1550",
|
||||
"sha256:bf54103892a83c64db58125b3f2a43df6d2cb2d28889f14c78519394feb41492",
|
||||
"sha256:d9ac82be533394d341b41d78aca7ed0e0f4ba5a2231602e2f05aa87f25c51672",
|
||||
"sha256:e982fe07ede9fada6ff6705af70514a52beb1b2c3d25d4e873e82114cf3c5401",
|
||||
"sha256:edce2ea7f3dfc981c4ddc97add8a61381d9642dc3273737e756517cc03e84dd6",
|
||||
"sha256:efdc45ef1afc238db84cb4963aa689c0408912a0239b0721cb172b4016eb31d6",
|
||||
"sha256:f137c02498f8b935892d5c0172560d7ab54bc45039de8805075e19079c639a9c",
|
||||
"sha256:f82e347a72f955b7017a39708a3667f106e6ad4d10b25f237396a7115d8ed5fd",
|
||||
"sha256:fb7c206e01ad85ce57feeaaa0bf784b97fa3cad0d4a5737bc5295785f5c613a1"
|
||||
],
|
||||
"version": "==1.1.0"
|
||||
},
|
||||
"pyyaml": {
|
||||
"hashes": [
|
||||
"sha256:3d7da3009c0f3e783b2c873687652d83b1bbfd5c88e9813fb7e5b03c0dd3108b",
|
||||
"sha256:3ef3092145e9b70e3ddd2c7ad59bdd0252a94dfe3949721633e41344de00a6bf",
|
||||
"sha256:40c71b8e076d0550b2e6380bada1f1cd1017b882f7e16f09a65be98e017f211a",
|
||||
"sha256:558dd60b890ba8fd982e05941927a3911dc409a63dcb8b634feaa0cda69330d3",
|
||||
"sha256:a7c28b45d9f99102fa092bb213aa12e0aaf9a6a1f5e395d36166639c1f96c3a1",
|
||||
"sha256:aa7dd4a6a427aed7df6fb7f08a580d68d9b118d90310374716ae90b710280af1",
|
||||
"sha256:bc558586e6045763782014934bfaf39d48b8ae85a2713117d16c39864085c613",
|
||||
"sha256:d46d7982b62e0729ad0175a9bc7e10a566fc07b224d2c79fafb5e032727eaa04",
|
||||
"sha256:d5eef459e30b09f5a098b9cea68bebfeb268697f78d647bd255a085371ac7f3f",
|
||||
"sha256:e01d3203230e1786cd91ccfdc8f8454c8069c91bee3962ad93b87a4b2860f537",
|
||||
"sha256:e170a9e6fcfd19021dd29845af83bb79236068bf5fd4df3327c1be18182b2531"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.13"
|
||||
}
|
||||
},
|
||||
"develop": {
|
||||
"astroid": {
|
||||
"hashes": [
|
||||
"sha256:35b032003d6a863f5dcd7ec11abd5cd5893428beaa31ab164982403bcb311f22",
|
||||
"sha256:6a5d668d7dc69110de01cdf7aeec69a679ef486862a0850cc0fd5571505b6b7e"
|
||||
],
|
||||
"version": "==2.1.0"
|
||||
},
|
||||
"isort": {
|
||||
"hashes": [
|
||||
"sha256:1153601da39a25b14ddc54955dbbacbb6b2d19135386699e2ad58517953b34af",
|
||||
"sha256:b9c40e9750f3d77e6e4d441d8b0266cf555e7cdabdcff33c4fd06366ca761ef8",
|
||||
"sha256:ec9ef8f4a9bc6f71eec99e1806bfa2de401650d996c59330782b89a5555c1497"
|
||||
],
|
||||
"version": "==4.3.4"
|
||||
},
|
||||
"lazy-object-proxy": {
|
||||
"hashes": [
|
||||
"sha256:0ce34342b419bd8f018e6666bfef729aec3edf62345a53b537a4dcc115746a33",
|
||||
"sha256:1b668120716eb7ee21d8a38815e5eb3bb8211117d9a90b0f8e21722c0758cc39",
|
||||
"sha256:209615b0fe4624d79e50220ce3310ca1a9445fd8e6d3572a896e7f9146bbf019",
|
||||
"sha256:27bf62cb2b1a2068d443ff7097ee33393f8483b570b475db8ebf7e1cba64f088",
|
||||
"sha256:27ea6fd1c02dcc78172a82fc37fcc0992a94e4cecf53cb6d73f11749825bd98b",
|
||||
"sha256:2c1b21b44ac9beb0fc848d3993924147ba45c4ebc24be19825e57aabbe74a99e",
|
||||
"sha256:2df72ab12046a3496a92476020a1a0abf78b2a7db9ff4dc2036b8dd980203ae6",
|
||||
"sha256:320ffd3de9699d3892048baee45ebfbbf9388a7d65d832d7e580243ade426d2b",
|
||||
"sha256:50e3b9a464d5d08cc5227413db0d1c4707b6172e4d4d915c1c70e4de0bbff1f5",
|
||||
"sha256:5276db7ff62bb7b52f77f1f51ed58850e315154249aceb42e7f4c611f0f847ff",
|
||||
"sha256:61a6cf00dcb1a7f0c773ed4acc509cb636af2d6337a08f362413c76b2b47a8dd",
|
||||
"sha256:6ae6c4cb59f199d8827c5a07546b2ab7e85d262acaccaacd49b62f53f7c456f7",
|
||||
"sha256:7661d401d60d8bf15bb5da39e4dd72f5d764c5aff5a86ef52a042506e3e970ff",
|
||||
"sha256:7bd527f36a605c914efca5d3d014170b2cb184723e423d26b1fb2fd9108e264d",
|
||||
"sha256:7cb54db3535c8686ea12e9535eb087d32421184eacc6939ef15ef50f83a5e7e2",
|
||||
"sha256:7f3a2d740291f7f2c111d86a1c4851b70fb000a6c8883a59660d95ad57b9df35",
|
||||
"sha256:81304b7d8e9c824d058087dcb89144842c8e0dea6d281c031f59f0acf66963d4",
|
||||
"sha256:933947e8b4fbe617a51528b09851685138b49d511af0b6c0da2539115d6d4514",
|
||||
"sha256:94223d7f060301b3a8c09c9b3bc3294b56b2188e7d8179c762a1cda72c979252",
|
||||
"sha256:ab3ca49afcb47058393b0122428358d2fbe0408cf99f1b58b295cfeb4ed39109",
|
||||
"sha256:bd6292f565ca46dee4e737ebcc20742e3b5be2b01556dafe169f6c65d088875f",
|
||||
"sha256:cb924aa3e4a3fb644d0c463cad5bc2572649a6a3f68a7f8e4fbe44aaa6d77e4c",
|
||||
"sha256:d0fc7a286feac9077ec52a927fc9fe8fe2fabab95426722be4c953c9a8bede92",
|
||||
"sha256:ddc34786490a6e4ec0a855d401034cbd1242ef186c20d79d2166d6a4bd449577",
|
||||
"sha256:e34b155e36fa9da7e1b7c738ed7767fc9491a62ec6af70fe9da4a057759edc2d",
|
||||
"sha256:e5b9e8f6bda48460b7b143c3821b21b452cb3a835e6bbd5dd33aa0c8d3f5137d",
|
||||
"sha256:e81ebf6c5ee9684be8f2c87563880f93eedd56dd2b6146d8a725b50b7e5adb0f",
|
||||
"sha256:eb91be369f945f10d3a49f5f9be8b3d0b93a4c2be8f8a5b83b0571b8123e0a7a",
|
||||
"sha256:f460d1ceb0e4a5dcb2a652db0904224f367c9b3c1470d5a7683c0480e582468b"
|
||||
],
|
||||
"version": "==1.3.1"
|
||||
},
|
||||
"mccabe": {
|
||||
"hashes": [
|
||||
"sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42",
|
||||
"sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"
|
||||
],
|
||||
"version": "==0.6.1"
|
||||
},
|
||||
"pylint": {
|
||||
"hashes": [
|
||||
"sha256:689de29ae747642ab230c6d37be2b969bf75663176658851f456619aacf27492",
|
||||
"sha256:771467c434d0d9f081741fec1d64dfb011ed26e65e12a28fe06ca2f61c4d556c"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.2.2"
|
||||
},
|
||||
"six": {
|
||||
"hashes": [
|
||||
"sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c",
|
||||
"sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73"
|
||||
],
|
||||
"version": "==1.12.0"
|
||||
},
|
||||
"typed-ast": {
|
||||
"hashes": [
|
||||
"sha256:0555eca1671ebe09eb5f2176723826f6f44cca5060502fea259de9b0e893ab53",
|
||||
"sha256:0ca96128ea66163aea13911c9b4b661cb345eb729a20be15c034271360fc7474",
|
||||
"sha256:16ccd06d614cf81b96de42a37679af12526ea25a208bce3da2d9226f44563868",
|
||||
"sha256:1e21ae7b49a3f744958ffad1737dfbdb43e1137503ccc59f4e32c4ac33b0bd1c",
|
||||
"sha256:37670c6fd857b5eb68aa5d193e14098354783b5138de482afa401cc2644f5a7f",
|
||||
"sha256:46d84c8e3806619ece595aaf4f37743083f9454c9ea68a517f1daa05126daf1d",
|
||||
"sha256:5b972bbb3819ece283a67358103cc6671da3646397b06e7acea558444daf54b2",
|
||||
"sha256:6306ffa64922a7b58ee2e8d6f207813460ca5a90213b4a400c2e730375049246",
|
||||
"sha256:6cb25dc95078931ecbd6cbcc4178d1b8ae8f2b513ae9c3bd0b7f81c2191db4c6",
|
||||
"sha256:7e19d439fee23620dea6468d85bfe529b873dace39b7e5b0c82c7099681f8a22",
|
||||
"sha256:7f5cd83af6b3ca9757e1127d852f497d11c7b09b4716c355acfbebf783d028da",
|
||||
"sha256:81e885a713e06faeef37223a5b1167615db87f947ecc73f815b9d1bbd6b585be",
|
||||
"sha256:94af325c9fe354019a29f9016277c547ad5d8a2d98a02806f27a7436b2da6735",
|
||||
"sha256:b1e5445c6075f509d5764b84ce641a1535748801253b97f3b7ea9d948a22853a",
|
||||
"sha256:cb061a959fec9a514d243831c514b51ccb940b58a5ce572a4e209810f2507dcf",
|
||||
"sha256:cc8d0b703d573cbabe0d51c9d68ab68df42a81409e4ed6af45a04a95484b96a5",
|
||||
"sha256:da0afa955865920edb146926455ec49da20965389982f91e926389666f5cf86a",
|
||||
"sha256:dc76738331d61818ce0b90647aedde17bbba3d3f9e969d83c1d9087b4f978862",
|
||||
"sha256:e7ec9a1445d27dbd0446568035f7106fa899a36f55e52ade28020f7b3845180d",
|
||||
"sha256:f741ba03feb480061ab91a465d1a3ed2d40b52822ada5b4017770dfcb88f839f",
|
||||
"sha256:fe800a58547dd424cd286b7270b967b5b3316b993d86453ede184a17b5a6b17d"
|
||||
],
|
||||
"markers": "python_version < '3.7' and implementation_name == 'cpython'",
|
||||
"version": "==1.1.1"
|
||||
},
|
||||
"wrapt": {
|
||||
"hashes": [
|
||||
"sha256:e03f19f64d81d0a3099518ca26b04550026f131eced2e76ced7b85c6b8d32128"
|
||||
],
|
||||
"version": "==1.11.0"
|
||||
}
|
||||
}
|
||||
}
|
0
make/photon/prepare/__init__.py
Normal file
0
make/photon/prepare/__init__.py
Normal file
20
make/photon/prepare/g.py
Normal file
20
make/photon/prepare/g.py
Normal file
@ -0,0 +1,20 @@
|
||||
import os
|
||||
|
||||
## Const
|
||||
DEFAULT_UID = 10000
|
||||
DEFAULT_GID = 10000
|
||||
|
||||
## Global variable
|
||||
base_dir = '/harbor_make'
|
||||
templates_dir = "/usr/src/app/templates"
|
||||
config_dir = os.path.join(base_dir, "common/config")
|
||||
config_file_path = os.path.join(base_dir, 'harbor.yml')
|
||||
|
||||
private_key_pem_template = os.path.join(templates_dir, "core", "private_key.pem")
|
||||
root_cert_path_template = os.path.join(templates_dir, "registry", "root.crt")
|
||||
|
||||
cert_dir = os.path.join(config_dir, "nginx", "cert")
|
||||
core_cert_dir = os.path.join(config_dir, "core", "certificates")
|
||||
private_key_pem = os.path.join(config_dir, "core", "private_key.pem")
|
||||
root_crt = os.path.join(config_dir, "registry", "root.crt")
|
||||
registry_custom_ca_bundle_config = os.path.join(config_dir, "custom-ca-bundle.crt")
|
69
make/photon/prepare/main.py
Normal file
69
make/photon/prepare/main.py
Normal file
@ -0,0 +1,69 @@
|
||||
import click
|
||||
|
||||
from utils.admin_server import prepare_adminserver
|
||||
from utils.misc import delfile
|
||||
from utils.configs import validate, parse_yaml_config
|
||||
from utils.cert import prepare_ca, SSL_CERT_KEY_PATH, SSL_CERT_PATH, get_secret_key
|
||||
from utils.db import prepare_db
|
||||
from utils.jobservice import prepare_job_service
|
||||
from utils.registry import prepare_registry
|
||||
from utils.registry_ctl import prepare_registry_ctl
|
||||
from utils.core import prepare_core
|
||||
from utils.uaa import prepare_uaa_cert_file
|
||||
from utils.notary import prepare_notary
|
||||
from utils.log import prepare_log_configs
|
||||
from utils.clair import prepare_clair
|
||||
from utils.chart import prepare_chartmuseum
|
||||
from utils.docker_compose import prepare_docker_compose
|
||||
from utils.nginx import prepare_nginx, nginx_confd_dir
|
||||
from g import (config_dir, private_key_pem_template, config_file_path, core_cert_dir, private_key_pem,
|
||||
root_crt, root_cert_path_template, registry_custom_ca_bundle_config)
|
||||
|
||||
# Main function
|
||||
@click.command()
|
||||
@click.option('--conf', default=config_file_path, help="the path of Harbor configuration file")
|
||||
@click.option('--with-notary', is_flag=True, help="the Harbor instance is to be deployed with notary")
|
||||
@click.option('--with-clair', is_flag=True, help="the Harbor instance is to be deployed with clair")
|
||||
@click.option('--with-chartmuseum', is_flag=True, help="the Harbor instance is to be deployed with chart repository supporting")
|
||||
def main(conf, with_notary, with_clair, with_chartmuseum):
|
||||
|
||||
delfile(config_dir)
|
||||
config_dict = parse_yaml_config(conf)
|
||||
validate(config_dict, notary_mode=with_notary)
|
||||
|
||||
prepare_log_configs(config_dict)
|
||||
prepare_nginx(config_dict)
|
||||
prepare_adminserver(config_dict, with_notary=with_notary, with_clair=with_clair, with_chartmuseum=with_chartmuseum)
|
||||
prepare_core(config_dict)
|
||||
prepare_registry(config_dict)
|
||||
prepare_registry_ctl(config_dict)
|
||||
prepare_db(config_dict)
|
||||
prepare_job_service(config_dict)
|
||||
|
||||
get_secret_key(config_dict['secretkey_path'])
|
||||
if config_dict['auth_mode'] == "uaa_auth":
|
||||
prepare_uaa_cert_file(config_dict['uaa_ca_cert'], core_cert_dir)
|
||||
|
||||
# If Customized cert enabled
|
||||
prepare_ca(
|
||||
customize_crt=config_dict['customize_crt'],
|
||||
private_key_pem_path=private_key_pem,
|
||||
private_key_pem_template=private_key_pem_template,
|
||||
root_crt_path=root_crt,
|
||||
root_cert_template_path=root_cert_path_template,
|
||||
registry_custom_ca_bundle_path=config_dict['registry_custom_ca_bundle_path'],
|
||||
registry_custom_ca_bundle_config=registry_custom_ca_bundle_config)
|
||||
|
||||
if with_notary:
|
||||
prepare_notary(config_dict, nginx_confd_dir, SSL_CERT_PATH, SSL_CERT_KEY_PATH)
|
||||
|
||||
if with_clair:
|
||||
prepare_clair(config_dict)
|
||||
|
||||
if with_chartmuseum:
|
||||
prepare_chartmuseum(config_dict)
|
||||
|
||||
prepare_docker_compose(config_dict, with_clair, with_notary, with_chartmuseum)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
68
make/photon/prepare/templates/adminserver/env.jinja
Normal file
68
make/photon/prepare/templates/adminserver/env.jinja
Normal file
@ -0,0 +1,68 @@
|
||||
PORT=8080
|
||||
LOG_LEVEL=info
|
||||
EXT_ENDPOINT={{public_url}}
|
||||
AUTH_MODE={{auth_mode}}
|
||||
SELF_REGISTRATION={{self_registration}}
|
||||
LDAP_URL={{ldap_url}}
|
||||
LDAP_SEARCH_DN={{ldap_searchdn}}
|
||||
LDAP_SEARCH_PWD={{ldap_search_pwd}}
|
||||
LDAP_BASE_DN={{ldap_basedn}}
|
||||
LDAP_FILTER={{ldap_filter}}
|
||||
LDAP_UID={{ldap_uid}}
|
||||
LDAP_SCOPE={{ldap_scope}}
|
||||
LDAP_TIMEOUT={{ldap_timeout}}
|
||||
LDAP_VERIFY_CERT={{ldap_verify_cert}}
|
||||
DATABASE_TYPE=postgresql
|
||||
POSTGRESQL_HOST={{db_host}}
|
||||
POSTGRESQL_PORT={{db_port}}
|
||||
POSTGRESQL_USERNAME={{db_user}}
|
||||
POSTGRESQL_PASSWORD={{db_password}}
|
||||
POSTGRESQL_DATABASE=registry
|
||||
POSTGRESQL_SSLMODE=disable
|
||||
LDAP_GROUP_BASEDN={{ldap_group_basedn}}
|
||||
LDAP_GROUP_FILTER={{ldap_group_filter}}
|
||||
LDAP_GROUP_GID={{ldap_group_gid}}
|
||||
LDAP_GROUP_SCOPE={{ldap_group_scope}}
|
||||
REGISTRY_URL={{registry_url}}
|
||||
TOKEN_SERVICE_URL={{token_service_url}}
|
||||
EMAIL_HOST={{email_host}}
|
||||
EMAIL_PORT={{email_port}}
|
||||
EMAIL_USR={{email_usr}}
|
||||
EMAIL_PWD={{email_pwd}}
|
||||
EMAIL_SSL={{email_ssl}}
|
||||
EMAIL_FROM={{email_from}}
|
||||
EMAIL_IDENTITY={{email_identity}}
|
||||
EMAIL_INSECURE={{email_insecure}}
|
||||
HARBOR_ADMIN_PASSWORD={{harbor_admin_password}}
|
||||
PROJECT_CREATION_RESTRICTION={{project_creation_restriction}}
|
||||
MAX_JOB_WORKERS={{max_job_workers}}
|
||||
CORE_SECRET={{core_secret}}
|
||||
JOBSERVICE_SECRET={{jobservice_secret}}
|
||||
TOKEN_EXPIRATION={{token_expiration}}
|
||||
CFG_EXPIRATION=5
|
||||
ADMIRAL_URL={{admiral_url}}
|
||||
WITH_NOTARY={{with_notary}}
|
||||
WITH_CLAIR={{with_clair}}
|
||||
CLAIR_DB_PASSWORD={{clair_db_password}}
|
||||
CLAIR_DB_HOST={{clair_db_host}}
|
||||
CLAIR_DB_PORT={{clair_db_port}}
|
||||
CLAIR_DB_USERNAME={{clair_db_username}}
|
||||
CLAIR_DB={{clair_db}}
|
||||
CLAIR_DB_SSLMODE=disable
|
||||
RESET={{reload_config}}
|
||||
UAA_ENDPOINT={{uaa_endpoint}}
|
||||
UAA_CLIENTID={{uaa_clientid}}
|
||||
UAA_CLIENTSECRET={{uaa_clientsecret}}
|
||||
UAA_VERIFY_CERT={{uaa_verify_cert}}
|
||||
CORE_URL={{core_url}}
|
||||
JOBSERVICE_URL={{jobservice_url}}
|
||||
CLAIR_URL={{clair_url}}
|
||||
NOTARY_URL={{notary_url}}
|
||||
REGISTRY_STORAGE_PROVIDER_NAME={{storage_provider_name}}
|
||||
READ_ONLY=false
|
||||
SKIP_RELOAD_ENV_PATTERN={{skip_reload_env_pattern}}
|
||||
RELOAD_KEY={{reload_key}}
|
||||
CHART_REPOSITORY_URL={{chart_repository_url}}
|
||||
LDAP_GROUP_ADMIN_DN={{ldap_group_admin_dn}}
|
||||
REGISTRY_CONTROLLER_URL={{registry_controller_url}}
|
||||
WITH_CHARTMUSEUM={{with_chartmuseum}}
|
41
make/photon/prepare/templates/chartserver/env.jinja
Normal file
41
make/photon/prepare/templates/chartserver/env.jinja
Normal file
@ -0,0 +1,41 @@
|
||||
## Settings should be set
|
||||
PORT=9999
|
||||
|
||||
# Only support redis now. If redis is setup, then enable cache
|
||||
CACHE={{cache_store}}
|
||||
CACHE_REDIS_ADDR={{cache_redis_addr}}
|
||||
CACHE_REDIS_PASSWORD={{cache_redis_password}}
|
||||
CACHE_REDIS_DB={{cache_redis_db_index}}
|
||||
|
||||
# Credential for internal communication
|
||||
BASIC_AUTH_USER=chart_controller
|
||||
BASIC_AUTH_PASS={{core_secret}}
|
||||
|
||||
# Multiple tenants
|
||||
# Must be set with 1 to support project namespace
|
||||
DEPTH=1
|
||||
|
||||
# Backend storage driver: e.g. "local", "amazon", "google" etc.
|
||||
STORAGE={{storage_driver}}
|
||||
|
||||
# Storage driver settings
|
||||
{{all_storage_driver_configs}}
|
||||
|
||||
## Settings with default values. Just put here for future changes
|
||||
DEBUG=false
|
||||
LOG_JSON=true
|
||||
DISABLE_METRICS=false
|
||||
DISABLE_API=false
|
||||
DISABLE_STATEFILES=false
|
||||
ALLOW_OVERWRITE=true
|
||||
CHART_URL=
|
||||
AUTH_ANONYMOUS_GET=false
|
||||
TLS_CERT=
|
||||
TLS_KEY=
|
||||
CONTEXT_PATH=
|
||||
INDEX_LIMIT=0
|
||||
MAX_STORAGE_OBJECTS=0
|
||||
MAX_UPLOAD_SIZE=20971520
|
||||
CHART_POST_FORM_FIELD_NAME=chart
|
||||
PROV_POST_FORM_FIELD_NAME=prov
|
||||
|
3
make/photon/prepare/templates/clair/clair_env.jinja
Normal file
3
make/photon/prepare/templates/clair/clair_env.jinja
Normal file
@ -0,0 +1,3 @@
|
||||
http_proxy={{http_proxy}}
|
||||
https_proxy={{https_proxy}}
|
||||
no_proxy={{no_proxy}}
|
25
make/photon/prepare/templates/clair/config.yaml.jinja
Normal file
25
make/photon/prepare/templates/clair/config.yaml.jinja
Normal file
@ -0,0 +1,25 @@
|
||||
clair:
|
||||
database:
|
||||
type: pgsql
|
||||
options:
|
||||
source: postgresql://{{username}}:{{password}}@{{host}}:{{port}}/{{dbname}}?sslmode=disable
|
||||
|
||||
# Number of elements kept in the cache
|
||||
# Values unlikely to change (e.g. namespaces) are cached in order to save prevent needless roundtrips to the database.
|
||||
cachesize: 16384
|
||||
|
||||
api:
|
||||
# API server port
|
||||
port: 6060
|
||||
healthport: 6061
|
||||
|
||||
# Deadline before an API request will respond with a 503
|
||||
timeout: 300s
|
||||
updater:
|
||||
interval: {{interval}}h
|
||||
|
||||
notifier:
|
||||
attempts: 3
|
||||
renotifyinterval: 2h
|
||||
http:
|
||||
endpoint: http://core:8080/service/notifications/clair
|
1
make/photon/prepare/templates/clair/postgres_env.jinja
Normal file
1
make/photon/prepare/templates/clair/postgres_env.jinja
Normal file
@ -0,0 +1 @@
|
||||
POSTGRES_PASSWORD={{password}}
|
@ -0,0 +1,7 @@
|
||||
This folder used to run some initial sql for clair if needed.
|
||||
|
||||
Just put the sql file in this directory and then start the
|
||||
clair .
|
||||
|
||||
both .sql and .gz format supported
|
||||
|
6
make/photon/prepare/templates/core/app.conf.jinja
Normal file
6
make/photon/prepare/templates/core/app.conf.jinja
Normal file
@ -0,0 +1,6 @@
|
||||
appname = Harbor
|
||||
runmode = dev
|
||||
enablegzip = true
|
||||
|
||||
[dev]
|
||||
httpport = 8080
|
10
make/photon/prepare/templates/core/env.jinja
Normal file
10
make/photon/prepare/templates/core/env.jinja
Normal file
@ -0,0 +1,10 @@
|
||||
LOG_LEVEL=info
|
||||
CONFIG_PATH=/etc/core/app.conf
|
||||
CORE_SECRET={{core_secret}}
|
||||
JOBSERVICE_SECRET={{jobservice_secret}}
|
||||
ADMINSERVER_URL={{adminserver_url}}
|
||||
UAA_CA_ROOT=/etc/core/certificates/uaa_ca.pem
|
||||
_REDIS_URL={{redis_host}}:{{redis_port}},100,{{redis_password}}
|
||||
SYNC_REGISTRY=false
|
||||
CHART_CACHE_DRIVER={{chart_cache_driver}}
|
||||
_REDIS_URL_REG={{redis_url_reg}}
|
51
make/photon/prepare/templates/core/private_key.pem
Normal file
51
make/photon/prepare/templates/core/private_key.pem
Normal file
@ -0,0 +1,51 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIJKAIBAAKCAgEAtpMvyv153iSmwm6TrFpUOzsIGBEDbGtOOEZMEm08D8IC2n1G
|
||||
d6/XOZ5FxPAD6gIpE0EAcMojY5O0Hl4CDoyV3e/iKcBqFOgYtpogNtan7yT5J8gw
|
||||
KsPbU/8nBkK75GOq56nfvq4t9GVAclIDtHbuvmlh6O2n+fxtR0M9LbuotbSBdXYU
|
||||
hzXqiSsMclBvLyIk/z327VP5l0nUNOzPuKIwQjuxYKDkvq1oGy98oVlE6wl0ldh2
|
||||
ZYZLGAYbVhqBVUT1Un/PYqi9Nofa2RI5n1WOkUJQp87vb+PUPFhVOdvH/oAzV6/b
|
||||
9dzyhA5paDM06lj2gsg9hQWxCgbFh1x39c6pSI8hmVe6x2d4tAtSyOm3Qwz+zO2l
|
||||
bPDvkY8Svh5nxUYObrNreoO8wHr8MC6TGUQLnUt/RfdVKe5fYPFl6VYqJP/L3LDn
|
||||
Xj771nFq6PKiYbhBwJw3TM49gpKNS/Of70TP2m7nVlyuyMdE5T1j3xyXNkixXqqn
|
||||
JuSMqX/3Bmm0On9KEbemwn7KRYF/bqc50+RcGUdKNcOkN6vuMVZei4GbxALnVqac
|
||||
s+/UQAiQP4212UO7iZFwMaCNJ3r/b4GOlyalI1yEA4odoZov7k5zVOzHu8O6QmCj
|
||||
3R5TVOudpGiUh+lumRRpNqxDgjngLljvaWU6ttyIbjnAwCjnJoppZM2lkRkCAwEA
|
||||
AQKCAgAvsvCPlf2a3fR7Y6xNISRUfS22K+u7DaXX6fXB8qv4afWY45Xfex89vG35
|
||||
78L2Bi55C0h0LztjrpkmPeVHq88TtrJduhl88M5UFpxH93jUb9JwZErBQX4xyb2G
|
||||
UzUHjEqAT89W3+a9rR5TP74cDd59/MZJtp1mIF7keVqochi3sDsKVxkx4hIuWALe
|
||||
csk5hTApRyUWCBRzRCSe1yfF0wnMpA/JcP+SGXfTcmqbNNlelo/Q/kaga59+3UmT
|
||||
C0Wy41s8fIvP+MnGT2QLxkkrqYyfwrWTweqoTtuKEIHjpdnwUcoYJKfQ6jKp8aH0
|
||||
STyP5UIyFOKNuFjyh6ZfoPbuT1nGW+YKlUnK4hQ9N/GE0oMoecTaHTbqM+psQvbj
|
||||
6+CG/1ukA5ZTQyogNyuOApArFBQ+RRmVudPKA3JYygIhwctuB2oItsVEOEZMELCn
|
||||
g2aVFAVXGfGRDXvpa8oxs3Pc6RJEp/3tON6+w7cMCx0lwN/Jk2Ie6RgTzUycT3k6
|
||||
MoTQJRoO6/ZHcx3hTut/CfnrWiltyAUZOsefLuLg+Pwf9GHhOycLRI6gHfgSwdIV
|
||||
S77UbbELWdscVr1EoPIasUm1uYWBBcFRTturRW+GHJ8TZX+mcWSBcWwBhp15LjEl
|
||||
tJf+9U6lWMOSB2LvT+vFmR0M9q56fo7UeKFIR7mo7/GpiVu5AQKCAQEA6Qs7G9mw
|
||||
N/JZOSeQO6xIQakC+sKApPyXO58fa7WQzri+l2UrLNp0DEQfZCujqDgwys6OOzR/
|
||||
xg8ZKQWVoad08Ind3ZwoJgnLn6QLENOcE6PpWxA/JjnVGP4JrXCYR98cP0sf9jEI
|
||||
xkR1qT50GbeqU3RDFliI4kGRvbZ8cekzuWppfQcjstSBPdvuxqAcUVmTnTw83nvD
|
||||
FmBbhlLiEgI3iKtJ97UB7480ivnWnOuusduk7FO4jF3hkrOa+YRidinTCi8JBo0Y
|
||||
jx4Ci3Y5x6nvwkXhKzXapd7YmPNisUc5xA7/a+W71cyC0IKUwRc/8pYWLL3R3CpR
|
||||
YiV8gf6gwzOckQKCAQEAyI9CSNoAQH4zpS8B9PF8zILqEEuun8m1f5JB3hQnfWzm
|
||||
7uz/zg6I0TkcCE0AJVSKPHQm1V9+TRbF9+DiOWHEYYzPmK8h63SIufaWxZPqai4E
|
||||
PUj6eQWykBUVJ96n6/AW0JHRZ+WrJ5RXBqCLuY7NP6wDhORrCJjBwaGMohNpbKPS
|
||||
H3QewsoxCh+CEXKdKyy+/yU/f4E89PlHapkW1/bDJ5u7puSD+KvmiDDIXSBncdOO
|
||||
uFT8n+XH5IwgjdXFSDim15rQ8jD2l2xLcwKboTpx5GeRl8oB1VGm0fUbBn1dvGPG
|
||||
4WfHGyrp9VNZtP160WoHr+vRVPqvHNkoeAlCfEwQCQKCAQBN1dtzLN0HgqE8TrOE
|
||||
ysEDdTCykj4nXNoiJr522hi4gsndhQPLolb6NdKKQW0S5Vmekyi8K4e1nhtYMS5N
|
||||
5MFRCasZtmtOcR0af87WWucZRDjPmniNCunaxBZ1YFLsRl+H4E6Xir8UgY8O7PYY
|
||||
FNkFsKIrl3x4nU/RHl8oKKyG9Dyxbq4Er6dPAuMYYiezIAkGjjUCVjHNindnQM2T
|
||||
GDx2IEe/PSydV6ZD+LguhyU88FCAQmI0N7L8rZJIXmgIcWW0VAterceTHYHaFK2t
|
||||
u1uB9pcDOKSDnA+Z3kiLT2/CxQOYhQ2clgbnH4YRi/Nm0awsW2X5dATklAKm5GXL
|
||||
bLSRAoIBAQClaNnPQdTBXBR2IN3pSZ2XAkXPKMwdxvtk+phOc6raHA4eceLL7FrU
|
||||
y9gd1HvRTfcwws8gXcDKDYU62gNaNhMELWEt2QsNqS/2x7Qzwbms1sTyUpUZaSSL
|
||||
BohLOKyfv4ThgdIGcXoGi6Z2tcRnRqpq4BCK8uR/05TBgN5+8amaS0ZKYLfaCW4G
|
||||
nlPk1fVgHWhtAChtnYZLuKg494fKmB7+NMfAbmmVlxjrq+gkPkxyqXvk9Vrg+V8y
|
||||
VIuozu0Fkouv+GRpyw4ldtCHS1hV0eEK8ow2dwmqCMygDxm58X10mYn2b2PcOTl5
|
||||
9sNerUw1GNC8O66K+rGgBk4FKgXmg8kZAoIBABBcuisK250fXAfjAWXGqIMs2+Di
|
||||
vqAdT041SNZEOJSGNFsLJbhd/3TtCLf29PN/YXtnvBmC37rqryTsqjSbx/YT2Jbr
|
||||
Bk3jOr9JVbmcoSubXl8d/uzf7IGs91qaCgBwPZHgeH+kK13FCLexz+U9zYMZ78fF
|
||||
/yO82CpoekT+rcl1jzYn43b6gIklHABQU1uCD6MMyMhJ9Op2WmbDk3X+py359jMc
|
||||
+Cr2zfzdHAIVff2dOV3OL+ZHEWbwtnn3htKUdOmjoTJrciFx0xNZJS5Q7QYHMONj
|
||||
yPqbajyhopiN01aBQpCSGF1F1uRpWeIjTrAZPbrwLl9YSYXz0AT05QeFEFk=
|
||||
-----END RSA PRIVATE KEY-----
|
1
make/photon/prepare/templates/db/env.jinja
Normal file
1
make/photon/prepare/templates/db/env.jinja
Normal file
@ -0,0 +1 @@
|
||||
POSTGRES_PASSWORD={{db_password}}
|
@ -0,0 +1,349 @@
|
||||
version: '2'
|
||||
services:
|
||||
log:
|
||||
image: goharbor/harbor-log:{{version}}
|
||||
container_name: harbor-log
|
||||
restart: always
|
||||
dns_search: .
|
||||
volumes:
|
||||
- {{log_location}}/:/var/log/docker/:z
|
||||
- ./common/config/log/:/etc/logrotate.d/:z
|
||||
ports:
|
||||
- 127.0.0.1:1514:10514
|
||||
networks:
|
||||
- harbor
|
||||
registry:
|
||||
image: goharbor/registry-photon:{{reg_version}}
|
||||
container_name: registry
|
||||
restart: always
|
||||
volumes:
|
||||
- {{data_volume}}/registry:/storage:z
|
||||
- ./common/config/registry/:/etc/registry/:z
|
||||
- ./common/config/custom-ca-bundle.crt:/harbor_cust_cert/custom-ca-bundle.crt:z
|
||||
networks:
|
||||
- harbor
|
||||
{% if with_clair %}
|
||||
- harbor-clair
|
||||
{% endif %}
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- log
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "registry"
|
||||
registryctl:
|
||||
image: goharbor/harbor-registryctl:{{version}}
|
||||
container_name: registryctl
|
||||
env_file:
|
||||
- ./common/config/registryctl/env
|
||||
restart: always
|
||||
volumes:
|
||||
- {{data_volume}}/registry:/storage:z
|
||||
- ./common/config/registry/:/etc/registry/:z
|
||||
- ./common/config/registryctl/config.yml:/etc/registryctl/config.yml:z
|
||||
networks:
|
||||
- harbor
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- log
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "registryctl"
|
||||
postgresql:
|
||||
image: goharbor/harbor-db:{{version}}
|
||||
container_name: harbor-db
|
||||
restart: always
|
||||
volumes:
|
||||
- {{data_volume}}/database:/var/lib/postgresql/data:z
|
||||
networks:
|
||||
harbor:
|
||||
{% if with_notary %}
|
||||
harbor-notary:
|
||||
aliases:
|
||||
- harbor-db
|
||||
{% endif %}
|
||||
{% if with_clair %}
|
||||
harbor-clair:
|
||||
aliases:
|
||||
- harbor-db
|
||||
{% endif %}
|
||||
dns_search: .
|
||||
env_file:
|
||||
- ./common/config/db/env
|
||||
depends_on:
|
||||
- log
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "postgresql"
|
||||
adminserver:
|
||||
image: goharbor/harbor-adminserver:{{version}}
|
||||
container_name: harbor-adminserver
|
||||
env_file:
|
||||
- ./common/config/adminserver/env
|
||||
restart: always
|
||||
volumes:
|
||||
- {{data_volume}}/config/:/etc/adminserver/config/:z
|
||||
- {{secretkey_path}}/secretkey:/etc/adminserver/key:z
|
||||
- {{data_volume}}/:/data/:z
|
||||
networks:
|
||||
- harbor
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- log
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "adminserver"
|
||||
core:
|
||||
image: goharbor/harbor-core:{{version}}
|
||||
container_name: harbor-core
|
||||
env_file:
|
||||
- ./common/config/core/env
|
||||
restart: always
|
||||
volumes:
|
||||
- ./common/config/core/app.conf:/etc/core/app.conf:z
|
||||
- ./common/config/core/private_key.pem:/etc/core/private_key.pem:z
|
||||
- ./common/config/core/certificates/:/etc/core/certificates/:z
|
||||
- {{secretkey_path}}/secretkey:/etc/core/key:z
|
||||
- {{data_volume}}/ca_download/:/etc/core/ca/:z
|
||||
- {{data_volume}}/psc/:/etc/core/token/:z
|
||||
- {{data_volume}}/:/data/:z
|
||||
networks:
|
||||
harbor:
|
||||
{% if with_notary %}
|
||||
harbor-notary:
|
||||
{% endif %}
|
||||
{% if with_clair %}
|
||||
harbor-clair:
|
||||
aliases:
|
||||
- harbor-core
|
||||
{% endif %}
|
||||
{% if with_chartmuseum %}
|
||||
harbor-chartmuseum:
|
||||
aliases:
|
||||
- harbor-core
|
||||
{% endif %}
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- log
|
||||
- adminserver
|
||||
- registry
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "core"
|
||||
portal:
|
||||
image: goharbor/harbor-portal:{{version}}
|
||||
container_name: harbor-portal
|
||||
restart: always
|
||||
networks:
|
||||
- harbor
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- log
|
||||
- core
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "portal"
|
||||
|
||||
jobservice:
|
||||
image: goharbor/harbor-jobservice:{{version}}
|
||||
container_name: harbor-jobservice
|
||||
env_file:
|
||||
- ./common/config/jobservice/env
|
||||
restart: always
|
||||
volumes:
|
||||
- {{data_volume}}/job_logs:/var/log/jobs:z
|
||||
- ./common/config/jobservice/config.yml:/etc/jobservice/config.yml:z
|
||||
networks:
|
||||
- harbor
|
||||
{% if with_clair %}
|
||||
- harbor-clair
|
||||
{% endif %}
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- redis
|
||||
- core
|
||||
- adminserver
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "jobservice"
|
||||
redis:
|
||||
image: goharbor/redis-photon:{{redis_version}}
|
||||
container_name: redis
|
||||
restart: always
|
||||
volumes:
|
||||
- {{data_volume}}/redis:/var/lib/redis
|
||||
networks:
|
||||
harbor:
|
||||
{% if with_chartmuseum %}
|
||||
harbor-chartmuseum:
|
||||
aliases:
|
||||
- redis
|
||||
{% endif %}
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- log
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "redis"
|
||||
proxy:
|
||||
image: goharbor/nginx-photon:{{redis_version}}
|
||||
container_name: nginx
|
||||
restart: always
|
||||
volumes:
|
||||
- ./common/config/nginx:/etc/nginx:z
|
||||
- {{cert_key_path}}:/etc/nginx/cert/server.key
|
||||
- {{cert_path}}:/etc/nginx/cert/server.crt
|
||||
networks:
|
||||
- harbor
|
||||
{% if with_notary %}
|
||||
- harbor-notary
|
||||
{% endif %}
|
||||
dns_search: .
|
||||
ports:
|
||||
- 80:80
|
||||
- 443:443
|
||||
- 4443:4443
|
||||
depends_on:
|
||||
- postgresql
|
||||
- registry
|
||||
- core
|
||||
- portal
|
||||
- log
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "proxy"
|
||||
{% if with_notary %}
|
||||
notary-server:
|
||||
image: goharbor/notary-server-photon:{{notary_version}}
|
||||
container_name: notary-server
|
||||
restart: always
|
||||
networks:
|
||||
- notary-sig
|
||||
- harbor-notary
|
||||
dns_search: .
|
||||
volumes:
|
||||
- ./common/config/notary:/etc/notary:z
|
||||
env_file:
|
||||
- ./common/config/notary/server_env
|
||||
depends_on:
|
||||
- postgresql
|
||||
- notary-signer
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "notary-server"
|
||||
notary-signer:
|
||||
image: goharbor/notary-signer-photon:{{notary_version}}
|
||||
container_name: notary-signer
|
||||
restart: always
|
||||
networks:
|
||||
harbor-notary:
|
||||
notary-sig:
|
||||
aliases:
|
||||
- notarysigner
|
||||
dns_search: .
|
||||
volumes:
|
||||
- ./common/config/notary:/etc/notary:z
|
||||
env_file:
|
||||
- ./common/config/notary/signer_env
|
||||
depends_on:
|
||||
- postgresql
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "notary-signer"
|
||||
{% endif %}
|
||||
{% if with_clair %}
|
||||
clair:
|
||||
networks:
|
||||
- harbor-clair
|
||||
container_name: clair
|
||||
image: goharbor/clair-photon:{{clair_version}}
|
||||
restart: always
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- DAC_OVERRIDE
|
||||
- SETGID
|
||||
- SETUID
|
||||
cpu_quota: 50000
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- postgresql
|
||||
volumes:
|
||||
- ./common/config/clair/config.yaml:/etc/clair/config.yaml:z
|
||||
- ./common/config/custom-ca-bundle.crt:/harbor_cust_cert/custom-ca-bundle.crt:z
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "clair"
|
||||
env_file:
|
||||
./common/config/clair/clair_env
|
||||
{% endif %}
|
||||
{% if with_chartmuseum %}
|
||||
chartmuseum:
|
||||
container_name: chartmuseum
|
||||
image: goharbor/chartmuseum-photon:{{chartmuseum_version}}
|
||||
restart: always
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- CHOWN
|
||||
- DAC_OVERRIDE
|
||||
- SETGID
|
||||
- SETUID
|
||||
networks:
|
||||
- harbor-chartmuseum
|
||||
dns_search: .
|
||||
depends_on:
|
||||
- redis
|
||||
volumes:
|
||||
- {{data_volume}}/chart_storage:/chart_storage:z
|
||||
- ./common/config/chartserver:/etc/chartserver:z
|
||||
- ./common/config/custom-ca-bundle.crt:/harbor_cust_cert/custom-ca-bundle.crt:z
|
||||
logging:
|
||||
driver: "syslog"
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "chartmuseum"
|
||||
env_file:
|
||||
./common/config/chartserver/env
|
||||
{% endif %}
|
||||
networks:
|
||||
harbor:
|
||||
external: false
|
||||
{% if with_notary %}
|
||||
harbor-notary:
|
||||
external: false
|
||||
notary-sig:
|
||||
external: false
|
||||
{% endif %}
|
||||
{% if with_clair %}
|
||||
harbor-clair:
|
||||
external: false
|
||||
{% endif %}
|
||||
{% if with_chartmuseum %}
|
||||
harbor-chartmuseum:
|
||||
external: false
|
||||
{% endif %}
|
41
make/photon/prepare/templates/jobservice/config.yml.jinja
Normal file
41
make/photon/prepare/templates/jobservice/config.yml.jinja
Normal file
@ -0,0 +1,41 @@
|
||||
---
|
||||
#Protocol used to serve
|
||||
protocol: "http"
|
||||
|
||||
#Config certification if use 'https' protocol
|
||||
#https_config:
|
||||
# cert: "server.crt"
|
||||
# key: "server.key"
|
||||
|
||||
#Server listening port
|
||||
port: 8080
|
||||
|
||||
#Worker pool
|
||||
worker_pool:
|
||||
#Worker concurrency
|
||||
workers: {{max_job_workers}}
|
||||
backend: "redis"
|
||||
#Additional config if use 'redis' backend
|
||||
redis_pool:
|
||||
#redis://[arbitrary_username:password@]ipaddress:port/database_index
|
||||
redis_url: {{redis_url}}
|
||||
namespace: "harbor_job_service_namespace"
|
||||
#Loggers for the running job
|
||||
job_loggers:
|
||||
- name: "STD_OUTPUT" # logger backend name, only support "FILE" and "STD_OUTPUT"
|
||||
level: "INFO" # INFO/DEBUG/WARNING/ERROR/FATAL
|
||||
- name: "FILE"
|
||||
level: "INFO"
|
||||
settings: # Customized settings of logger
|
||||
base_dir: "/var/log/jobs"
|
||||
sweeper:
|
||||
duration: 1 #days
|
||||
settings: # Customized settings of sweeper
|
||||
work_dir: "/var/log/jobs"
|
||||
|
||||
#Loggers for the job service
|
||||
loggers:
|
||||
- name: "STD_OUTPUT" # Same with above
|
||||
level: "INFO"
|
||||
#Admin server endpoint
|
||||
admin_server: "http://adminserver:8080/"
|
3
make/photon/prepare/templates/jobservice/env.jinja
Normal file
3
make/photon/prepare/templates/jobservice/env.jinja
Normal file
@ -0,0 +1,3 @@
|
||||
CORE_SECRET={{core_secret}}
|
||||
JOBSERVICE_SECRET={{jobservice_secret}}
|
||||
CORE_URL={{core_url}}
|
8
make/photon/prepare/templates/log/logrotate.conf.jinja
Normal file
8
make/photon/prepare/templates/log/logrotate.conf.jinja
Normal file
@ -0,0 +1,8 @@
|
||||
/var/log/docker/*.log {
|
||||
rotate {{log_rotate_count}}
|
||||
size {{log_rotate_size}}
|
||||
copytruncate
|
||||
compress
|
||||
missingok
|
||||
nodateext
|
||||
}
|
124
make/photon/prepare/templates/nginx/nginx.http.conf.jinja
Normal file
124
make/photon/prepare/templates/nginx/nginx.http.conf.jinja
Normal file
@ -0,0 +1,124 @@
|
||||
worker_processes auto;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
use epoll;
|
||||
multi_accept on;
|
||||
}
|
||||
|
||||
http {
|
||||
tcp_nodelay on;
|
||||
|
||||
# this is necessary for us to be able to disable request buffering in all cases
|
||||
proxy_http_version 1.1;
|
||||
|
||||
upstream core {
|
||||
server core:8080;
|
||||
}
|
||||
|
||||
upstream portal {
|
||||
server portal:80;
|
||||
}
|
||||
|
||||
log_format timed_combined '$remote_addr - '
|
||||
'"$request" $status $body_bytes_sent '
|
||||
'"$http_referer" "$http_user_agent" '
|
||||
'$request_time $upstream_response_time $pipe';
|
||||
|
||||
access_log /dev/stdout timed_combined;
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
server_tokens off;
|
||||
# disable any limits to avoid HTTP 413 for large image uploads
|
||||
client_max_body_size 0;
|
||||
|
||||
# costumized location config file can place to /etc/nginx/etc with prefix harbor.http. and suffix .conf
|
||||
include /etc/nginx/conf.d/harbor.http.*.conf;
|
||||
|
||||
location / {
|
||||
proxy_pass http://portal/;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
}
|
||||
|
||||
location /c/ {
|
||||
proxy_pass http://core/c/;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
}
|
||||
|
||||
location /api/ {
|
||||
proxy_pass http://core/api/;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
}
|
||||
|
||||
location /chartrepo/ {
|
||||
proxy_pass http://core/chartrepo/;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
}
|
||||
|
||||
location /v1/ {
|
||||
return 404;
|
||||
}
|
||||
|
||||
location /v2/ {
|
||||
proxy_pass http://core/v2/;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
}
|
||||
|
||||
location /service/ {
|
||||
proxy_pass http://core/service/;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
}
|
||||
|
||||
location /service/notifications {
|
||||
return 404;
|
||||
}
|
||||
}
|
||||
}
|
149
make/photon/prepare/templates/nginx/nginx.https.conf.jinja
Normal file
149
make/photon/prepare/templates/nginx/nginx.https.conf.jinja
Normal file
@ -0,0 +1,149 @@
|
||||
worker_processes auto;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
use epoll;
|
||||
multi_accept on;
|
||||
}
|
||||
|
||||
http {
|
||||
tcp_nodelay on;
|
||||
include /etc/nginx/conf.d/*.upstream.conf;
|
||||
|
||||
# this is necessary for us to be able to disable request buffering in all cases
|
||||
proxy_http_version 1.1;
|
||||
|
||||
upstream core {
|
||||
server core:8080;
|
||||
}
|
||||
|
||||
upstream portal {
|
||||
server portal:80;
|
||||
}
|
||||
|
||||
log_format timed_combined '$remote_addr - '
|
||||
'"$request" $status $body_bytes_sent '
|
||||
'"$http_referer" "$http_user_agent" '
|
||||
'$request_time $upstream_response_time $pipe';
|
||||
|
||||
access_log /dev/stdout timed_combined;
|
||||
|
||||
include /etc/nginx/conf.d/*.server.conf;
|
||||
|
||||
server {
|
||||
listen 443 ssl;
|
||||
# server_name harbordomain.com;
|
||||
server_tokens off;
|
||||
# SSL
|
||||
ssl_certificate {{ssl_cert}};
|
||||
ssl_certificate_key {{ssl_cert_key}};
|
||||
|
||||
# Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
|
||||
ssl_protocols TLSv1.1 TLSv1.2;
|
||||
ssl_ciphers '!aNULL:kECDH+AESGCM:ECDH+AESGCM:RSA+AESGCM:kECDH+AES:ECDH+AES:RSA+AES:';
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_session_cache shared:SSL:10m;
|
||||
|
||||
# disable any limits to avoid HTTP 413 for large image uploads
|
||||
client_max_body_size 0;
|
||||
|
||||
# required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486)
|
||||
chunked_transfer_encoding on;
|
||||
|
||||
# costumized location config file can place to /etc/nginx dir with prefix harbor.https. and suffix .conf
|
||||
include /etc/nginx/conf.d/harbor.https.*.conf;
|
||||
|
||||
location / {
|
||||
proxy_pass http://portal/;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# Add Secure flag when serving HTTPS
|
||||
proxy_cookie_path / "/; secure";
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
}
|
||||
|
||||
location /c/ {
|
||||
proxy_pass http://core/c/;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
}
|
||||
|
||||
location /api/ {
|
||||
proxy_pass http://core/api/;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
}
|
||||
|
||||
location /chartrepo/ {
|
||||
proxy_pass http://core/chartrepo/;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
}
|
||||
|
||||
location /v1/ {
|
||||
return 404;
|
||||
}
|
||||
|
||||
location /v2/ {
|
||||
proxy_pass http://core/v2/;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
}
|
||||
|
||||
location /service/ {
|
||||
proxy_pass http://core/service/;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
}
|
||||
|
||||
location /service/notifications {
|
||||
return 404;
|
||||
}
|
||||
}
|
||||
server {
|
||||
listen 80;
|
||||
#server_name harbordomain.com;
|
||||
return 308 https://$host$request_uri;
|
||||
}
|
||||
}
|
33
make/photon/prepare/templates/nginx/notary.server.conf.jinja
Normal file
33
make/photon/prepare/templates/nginx/notary.server.conf.jinja
Normal file
@ -0,0 +1,33 @@
|
||||
server {
|
||||
listen 4443 ssl;
|
||||
server_tokens off;
|
||||
# ssl
|
||||
ssl_certificate {{ssl_cert}};
|
||||
ssl_certificate_key {{ssl_cert_key}};
|
||||
|
||||
# recommendations from https://raymii.org/s/tutorials/strong_ssl_security_on_nginx.html
|
||||
ssl_protocols tlsv1.1 tlsv1.2;
|
||||
ssl_ciphers '!aNULL:kECDH+AESGCM:ECDH+AESGCM:RSA+AESGCM:kECDH+AES:ECDH+AES:RSA+AES:';
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_session_cache shared:ssl:10m;
|
||||
|
||||
# disable any limits to avoid http 413 for large image uploads
|
||||
client_max_body_size 0;
|
||||
|
||||
# required to avoid http 411: see issue #1486 (https://github.com/docker/docker/issues/1486)
|
||||
chunked_transfer_encoding on;
|
||||
|
||||
location /v2/ {
|
||||
proxy_pass http://notary-server/v2/;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,3 @@
|
||||
upstream notary-server {
|
||||
server notary-server:4443;
|
||||
}
|
32
make/photon/prepare/templates/notary/notary-signer-ca.crt
Normal file
32
make/photon/prepare/templates/notary/notary-signer-ca.crt
Normal file
@ -0,0 +1,32 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFhjCCA26gAwIBAgIJALJdsE+BUxypMA0GCSqGSIb3DQEBCwUAMF8xCzAJBgNV
|
||||
BAYTAlVTMQswCQYDVQQIDAJDQTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzEPMA0G
|
||||
A1UECgwGRG9ja2VyMRowGAYDVQQDDBFOb3RhcnkgVGVzdGluZyBDQTAeFw0xNzAx
|
||||
MjMwNjAzMzZaFw0yNzAxMjEwNjAzMzZaMF8xCzAJBgNVBAYTAlVTMQswCQYDVQQI
|
||||
DAJDQTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzEPMA0GA1UECgwGRG9ja2VyMRow
|
||||
GAYDVQQDDBFOb3RhcnkgVGVzdGluZyBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIP
|
||||
ADCCAgoCggIBALIZNBcIoQDJql5w+XULXq9W3tmD47xnf+IG4u7hkDVPCT4xRG74
|
||||
LBoSuFyPUrfT+tsibMlNG6XRtSfLQdNNeQuyIuiilNXV0kXB0RR3TrhxCaKdhRU5
|
||||
oQGfpYMvbPNFB7WU/5aAiQutHH85hEMPECf1qPjq8YlUaXJLGFY3WRkW+OOBZ78U
|
||||
00PqKlvC1kR/NbsV3IkMrO+vWWJQrPFusyYjQ511eQXnRtt8P0Qic0azPffQDVxC
|
||||
WUe47hmdQ1AULbxQ9AZcPlMI7UFqo+/w/4hPEGJMeOWirLvHLXg4nsOwy7DfWl/n
|
||||
MqLdJOC/KNfQVAQtkteeZZkkIIV1gxTPYsJqPNwkP9GdJK1A8NW1ef75v7xbQCPY
|
||||
03QQonBEK7ny7b1xXGGgJzXvK9RP0UUwjt/815c4d0cgUHsy4yuvl2F44EObRshk
|
||||
fjJVsN/0wrtq4QLE5ZvbeO+7to8dLcRxkmB8axhxahega7akUyY0WxZ+iSn6fzft
|
||||
/xeCcs/L10V5z0kK4PbiNnooDzV4B6Dy/5oyNExw0jgpD0mzOK5aLb0tXGqFT/ZJ
|
||||
9vydelBq5q4jLV7SHhHM1dBJSv1fl7vOpDlEr7LBd4YAO2BowoyGLHtLhgYybXF+
|
||||
CZ9ywPb1dIIcdK5IVeZECNHMSBuhCRZUu+aun8tRcdSgLEX7mQ/GKWELAgMBAAGj
|
||||
RTBDMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYDVR0PAQH/BAQDAgFGMB0GA1UdDgQW
|
||||
BBSWWbcCebeEgZlWk2/k+abh+bEFpDANBgkqhkiG9w0BAQsFAAOCAgEAQ9gA3Q4b
|
||||
r2+ZJdIDoDzCNdtHQbb/d1NiUP/Na1MFo7omR3MnKGXy3dIp9IrQq6ROhlqUhDvl
|
||||
pZegYhTbunTVv1KKJ+5n1hY6pG/Jr8oLY3b9i4qwDLKfQGm5PmrfwAtqbLSfY2M0
|
||||
2AZyAhCdGbqB7WpTdG1J7DzGbVVWAtS05e24Mu0qZJvpHdtl4+t89vXgJ/bPrPxF
|
||||
cpAlT9DOtobTEqrXZeS937F1qNyIgyBki+7mtxkwng5cf3zQM2BJ9lSFQJOBSRDr
|
||||
haMcnaPI4pknO7OfYf5W9LaS1Dx/U/NeMBfnVBd9NjUw+TMjy2MdMLUaLa9EF7Jo
|
||||
Gjk+fKaTaUgO8I487wHPMeoEA4A4dEePzGrybRLfl1ZYGQ0xcgunz64n2xfQIy2y
|
||||
swiyaofYlLxzHzOL0N+Y76P0ic37t9R2F5ggNhfbXhClK2h4HmdjRRRt3VkxR4AD
|
||||
7OM09bEhlZby34HOlCaC0PHKwYBMjneAG3ycPN88YTMYR2/KizExe71ayNwX2KHL
|
||||
ib1nOZgZT6s+YvgsZ7lRmMD4iqjuAEh5SRAcWlolVif8bAy09BkY1vwrtgV73q88
|
||||
heEbsCE1fsfk1OfH5W4yjjiSDZFRt5oTCPQWJp+2P0RJ9LCxcbf0RrCg3hg5rD9N
|
||||
lVTA0dsixv5zF3wTuad9inhk9Rmlq1KoaqA=
|
||||
-----END CERTIFICATE-----
|
32
make/photon/prepare/templates/notary/notary-signer.crt
Normal file
32
make/photon/prepare/templates/notary/notary-signer.crt
Normal file
@ -0,0 +1,32 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFdjCCA14CCQCeVwANSZmmiDANBgkqhkiG9w0BAQsFADCBhDELMAkGA1UEBhMC
|
||||
VVMxEzARBgNVBAgMCkNhbGlmb3JuaWExEjAQBgNVBAcMCVBhbG8gQWx0bzEVMBMG
|
||||
A1UECgwMVk13YXJlLCBJbmMuMQ8wDQYDVQQLDAZIYXJib3IxJDAiBgNVBAMMG1Nl
|
||||
bGYtc2lnbmVkIGJ5IFZNd2FyZSwgSW5jLjAeFw0xNzAzMjQwNTMyMDBaFw0yNzAz
|
||||
MjIwNTMyMDBaMHUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRIw
|
||||
EAYDVQQHDAlQYWxvIEFsdG8xFTATBgNVBAoMDFZNd2FyZSwgSW5jLjEPMA0GA1UE
|
||||
CwwGSGFyYm9yMRUwEwYDVQQDDAxub3RhcnlzaWduZXIwggIiMA0GCSqGSIb3DQEB
|
||||
AQUAA4ICDwAwggIKAoICAQC6TV2RCoH8d1g6xFvDo4FL9v+pGLe5+bu9ryjTaLbN
|
||||
dH/Cmf5/8WrmgJ3vG2Ksk796J7qsVddwvQkZn6NwDm2Tm+ETMCG85yEA3jl4Kr9R
|
||||
XfWHYWEavv0vsq6M+bUSSq7VJAhgk4wfx6qJBnFX2qKpODeYLHaHxU1EnIXrStNf
|
||||
IqR4Eu0Xre8jAkzrDdaFy/KnX4HGgNdz413CXzBCKEuu3VJj07ZvonnTzOgoLvh8
|
||||
+PCoQ2M4OBPT9gHqUov1I8nWnrjc+HuM1BW3YIGCB5TV9x0Y7hjvkr4E38gbJURj
|
||||
uDwg8jof4lMRmU/FHXFLt1ucGwNFUJdPwI7dyEKRA03Lr7htfP5sa9tmv3L93dKD
|
||||
po1gW1LsfiM3Cur5jARM/hBA+eYJr12Laf9oL59r8JmweqF3zRSwGSY336XoR/Fv
|
||||
/PAFs9vfKKWZp0uiRtuY9JZNRTF8trnfNf1957bND+DS2HWPmWkw4yK6CGa0s55X
|
||||
adiDt4gDFvKjl68dBWZoHutY+cZy/hK1D5uqagcX1kzbr/Pzy1gsq9FBBwaTJqBu
|
||||
YIAsSuzP+7NNZXoPd3rg13V93pbZr8eQN5VOQIBZK83xZEtHSJBEdUSuBOo3JS7j
|
||||
/rjEnspRqOI4soFnx1vaK0TrRyzJ5KBOuGpW4u8/ZUdIq8KIE30Mj/XI/sgAPr5j
|
||||
UQIDAQABMA0GCSqGSIb3DQEBCwUAA4ICAQBjqYBm/FRqyMH2hnHA0TMXY/WPufJ8
|
||||
TX10daELCAYJCEETXmUt1i7dnFxdAZXTnHENHdNYiS4nGBfqMLmODtcAamcv6Dcl
|
||||
JnyQPt3QlCDPKkcHgz3y4tvDDx6M5rFWYzN9QLiWAYrunIk1R4Jj7FODrM6/NODE
|
||||
0Mz1czWfsmLfX/jF80SsxnY1DCLKGgo6/RID3xTp4eIMboxCfeH2/yDA+6YPyYbV
|
||||
Si4ccwo9Foq0IYU8bimPNTyBQ0N+8ajcn328ql6aazmr894Ch5pWA3Qxaa98FcKS
|
||||
zokBvmmCuvCJ9HOmxKWdFEhSRS9GWxn7wg78UIlLP/8RfUrsecBJHgyhWRA7Qs3K
|
||||
keiG68Zrhn456IdMxjCZXgJ7gAAe77n4Cz8sFEHAvnAg9JLNEHuEBV5H1Hb7TzET
|
||||
k0lPiEY78QjutOpqHsWiagqSjlGEMqKI9c8WxXHh9030T/6NnWkdXFo+4HaEZEpp
|
||||
0JryASS53B5SwLIPrn0Y2/io/kRgbglGktPt6Ex0DwW3f96lcz3me34Nw+HOYYnz
|
||||
b0cz7JqJZgFXfEnykic3IwZs7m7Xrl9B/vvaVub9Fb5LQ7rIzrO7VkoILov/G41B
|
||||
Pd4/kagjXDTWd+UBMvZF6YGjr+TUZi5ooi7bvQ3X6N9WNYKW4a1DOokz9janStiL
|
||||
MrTKyOEOBi0Aew==
|
||||
-----END CERTIFICATE-----
|
52
make/photon/prepare/templates/notary/notary-signer.key
Normal file
52
make/photon/prepare/templates/notary/notary-signer.key
Normal file
@ -0,0 +1,52 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQC6TV2RCoH8d1g6
|
||||
xFvDo4FL9v+pGLe5+bu9ryjTaLbNdH/Cmf5/8WrmgJ3vG2Ksk796J7qsVddwvQkZ
|
||||
n6NwDm2Tm+ETMCG85yEA3jl4Kr9RXfWHYWEavv0vsq6M+bUSSq7VJAhgk4wfx6qJ
|
||||
BnFX2qKpODeYLHaHxU1EnIXrStNfIqR4Eu0Xre8jAkzrDdaFy/KnX4HGgNdz413C
|
||||
XzBCKEuu3VJj07ZvonnTzOgoLvh8+PCoQ2M4OBPT9gHqUov1I8nWnrjc+HuM1BW3
|
||||
YIGCB5TV9x0Y7hjvkr4E38gbJURjuDwg8jof4lMRmU/FHXFLt1ucGwNFUJdPwI7d
|
||||
yEKRA03Lr7htfP5sa9tmv3L93dKDpo1gW1LsfiM3Cur5jARM/hBA+eYJr12Laf9o
|
||||
L59r8JmweqF3zRSwGSY336XoR/Fv/PAFs9vfKKWZp0uiRtuY9JZNRTF8trnfNf19
|
||||
57bND+DS2HWPmWkw4yK6CGa0s55XadiDt4gDFvKjl68dBWZoHutY+cZy/hK1D5uq
|
||||
agcX1kzbr/Pzy1gsq9FBBwaTJqBuYIAsSuzP+7NNZXoPd3rg13V93pbZr8eQN5VO
|
||||
QIBZK83xZEtHSJBEdUSuBOo3JS7j/rjEnspRqOI4soFnx1vaK0TrRyzJ5KBOuGpW
|
||||
4u8/ZUdIq8KIE30Mj/XI/sgAPr5jUQIDAQABAoICAQCqIgbFcqwcK7zWBgWrFsD3
|
||||
53u4J4t4+df6NGB7F9CAtdgKlej1XDl8gI46Em89HLwqyOdPhCD3opoR3Vg69+IX
|
||||
f62+gSD+SrA4A7jFxXvryXt0g3hTHYFHssx2j39NUghxOrOvxm6bgxJ4ifqt+Uq8
|
||||
cEtM26Xu/T4/3xTpN+7pnVBHGzmLe1q8RNiLe5qhmwtgz/ZKmdSnz0YLQDRo5jWf
|
||||
Xhxkb63WKrFIu4JzV9my/v9/GfMdHxD0a196ZqHLX0Buj4pQuVbS18dxLF94qIXC
|
||||
FCZtYtpAxmhjOR2btJ/M1S2MBMkR3vRvSOuxHd8d/zdYys5k2WElArs1TDGGDldW
|
||||
jp3FYkoygsdWTs056HM1Y9F8dV2KAWfAhEQD8mBIGVjMrCqpnyZcK6JkqVg9c7YW
|
||||
IYQ2JRwsHq58FMNa3TLTvf/OClhEfSbRWAF0AhMTpnSUgP06cbJeXyzqzHdE37hv
|
||||
74OBx7KNoS+PEQ3lVgbHsWoUzf3SqB1IOzLyzuEUgHqON2GKmmCNcRMBi3DuV9tw
|
||||
Q8LWynNxhD8vyBkmo0kAd/FwgXrxJTGdYvxyn29I7QanCTH7o8wtjSE0jj9Qo7oC
|
||||
McAYGR6oTAjrT78KhI7aZJU5nuA6ySSCJRa6et1CC+SseWknyMMJ5HTo8l7jjXJA
|
||||
9hjNGGs6giOxznizf+2YAQKCAQEA9wRQk4yN402tfuicvfQBnFUtcpqctWSgGc0T
|
||||
qzWJgH/W07FMUHzAvqCgsYMMaeteXOMZH7jijvtIlhYfIg5w+RJ9PSsSu680OzGN
|
||||
R31+l2B/QzRAHUJ6+OVgWxAn6awU1mYLaiwVmSNWEnjAPE4XeSK708OOganI3pBQ
|
||||
8zOHj+j6uV8ddG79D6FqNJHAQwpou/p+XO/BGDFgX22x4F68Z0gCQcmoyAE7ppOp
|
||||
dqq3lPoDbRQ02/5cqaIA6dhmfjK2cpz4y1nUxffzY7qJjpoB/YSdR66cCNiYcJzp
|
||||
fMVBXhF9Iyj/Cah1w+hc0NOy9dW15afFaLFK0zrtAzEaVxH/0QKCAQEAwRPOwSCl
|
||||
XrMYXmc91TF6XbhErILHK/pIEOIMF09KNJvSjY0188Ram/pFbPRYh0cIyASmRGXL
|
||||
Qq5B1Qi0vx5TCq1OCrW2yeE7zboAlnADhk1u9N8YmL6JrCKVGQO7wFD3V8uphXdM
|
||||
tixNa5WvJ6eE5Vq+SVy99V5pQgb8ErrISlW4MYK7LI7DruSDuM2tHtiOcXcdTVej
|
||||
1stXJZkH46RYvxxid9tRzfiB8K5ziZfLwPNf2wRyj1J4ojn5pPNhhfkjJ24LCZGt
|
||||
JxwSXqdP+4x7by6x3mU+hutU/lF3jl+0edSnU0cZ6lvuq2T5YGgda/VXlv1ZFQUw
|
||||
rwUXD9unU+aLgQKCAQEA9R74/pI5sthAVHFsKStb9dComtNGstI59aCF5h3oZvV1
|
||||
Lvj/q9dARWqMS9qplOoV58MMCWikmhJNw3IMTvVZsjBgyzRVEJ4aDKttcQXde0Ys
|
||||
w3m0LdTsxtSHu5XapY032FHG/gLlI+Pm48mjqbQsou6OyOOEJLNhO0qmqc/2tB4T
|
||||
v6PdTM9enAYnqCcCTQSlTfSTNJJOYT2OTuRB4U7hUvQoGTSOInrmwLRDNBjQuCso
|
||||
/zNQCQbu2P6EPYmam5yjZDTUxqZL+G/GvK49Fp9JXlQc5ycke7rD+uwa3s+3wCtG
|
||||
rH9gJitfQZrxj+Cj9EOwj0bfJLbac6ZD0CkH5GNeIQKCAQBdoGFOPapzdZ2HicDu
|
||||
NQQFlmmWzgQPS1rO9Q6v7v8o67b6dVOIVdsqb/5ii0qyrruPYtHNsR8TwrShvYsI
|
||||
cogKUWfawatV0ibR6DSIvuC2q632iIjA6QSRuGNcsfbFl32Z0WTvF57XaDxSw08g
|
||||
h5dmMM69fH+REKsyHXj3DCQ8B70+JQrm3IP/t0g4wWQF5TWNyBkpfCoy6n/j94Vf
|
||||
2j4+zmDhhjTxEGTSdYYJXtarRllhN5Ll9TQSVtK8LllIQjvNzwsDJOU2ZeJyi+e5
|
||||
L7Jbg+U01xuvCUc52/+Bxt8ZhQlu1Le4ccQW0Ows19AMnfhPe6NLEi09cdZxFi7Z
|
||||
/J4BAoIBABCzkBDFxZdfWYt69VBt9PSG8eJ6avny3hXCtKaHIQb+aD5nKjRP0DVh
|
||||
gyutCo6RasMEc6D1tJGyR/Xvhm64q4JPb5UbSaRQiVYKdgRtMM9pZeBkcBtNs18K
|
||||
yMx5ajgYorrbi86hXHX7q+JYP8MCbcqqAUSl/Hi8nPxc1foTiCNDf4kGoHvXmoxt
|
||||
0tA65tFFQhEA6KBn68SDkyTsl/zb5Sx0GJY4kZkOeF3GaxPFX12skgXv95GJUskX
|
||||
88RJsH4Qqqtzbzj8R241BH8OrcOoyELc6xPioEqUHKVxSIf2ylITbj0UQHd2u0mN
|
||||
tajKl+aoc+CDxUYbilzhhKetWWF/cJY=
|
||||
-----END PRIVATE KEY-----
|
@ -0,0 +1,28 @@
|
||||
{
|
||||
"server": {
|
||||
"http_addr": ":4443"
|
||||
},
|
||||
"trust_service": {
|
||||
"type": "remote",
|
||||
"hostname": "notarysigner",
|
||||
"port": "7899",
|
||||
"tls_ca_file": "./notary-signer-ca.crt",
|
||||
"key_algorithm": "ecdsa"
|
||||
},
|
||||
"logging": {
|
||||
"level": "debug"
|
||||
},
|
||||
"storage": {
|
||||
"backend": "postgres",
|
||||
"db_url": "postgres://server:password@postgresql:5432/notaryserver?sslmode=disable"
|
||||
},
|
||||
"auth": {
|
||||
"type": "token",
|
||||
"options": {
|
||||
"realm": "{{token_endpoint}}/service/token",
|
||||
"service": "harbor-notary",
|
||||
"issuer": "harbor-token-issuer",
|
||||
"rootcertbundle": "/etc/notary/root.crt"
|
||||
}
|
||||
}
|
||||
}
|
2
make/photon/prepare/templates/notary/server_env.jinja
Normal file
2
make/photon/prepare/templates/notary/server_env.jinja
Normal file
@ -0,0 +1,2 @@
|
||||
MIGRATIONS_PATH=migrations/server/postgresql
|
||||
DB_URL=postgres://server:password@postgresql:5432/notaryserver?sslmode=disable
|
@ -0,0 +1,15 @@
|
||||
{
|
||||
"server": {
|
||||
"grpc_addr": ":7899",
|
||||
"tls_cert_file": "./notary-signer.crt",
|
||||
"tls_key_file": "./notary-signer.key"
|
||||
},
|
||||
"logging": {
|
||||
"level": "debug"
|
||||
},
|
||||
"storage": {
|
||||
"backend": "postgres",
|
||||
"db_url": "postgres://signer:password@postgresql:5432/notarysigner?sslmode=disable",
|
||||
"default_alias":"defaultalias"
|
||||
}
|
||||
}
|
3
make/photon/prepare/templates/notary/signer_env.jinja
Normal file
3
make/photon/prepare/templates/notary/signer_env.jinja
Normal file
@ -0,0 +1,3 @@
|
||||
NOTARY_SIGNER_DEFAULTALIAS={{alias}}
|
||||
MIGRATIONS_PATH=migrations/signer/postgresql
|
||||
DB_URL=postgres://signer:password@postgresql:5432/notarysigner?sslmode=disable
|
37
make/photon/prepare/templates/registry/config.yml.jinja
Normal file
37
make/photon/prepare/templates/registry/config.yml.jinja
Normal file
@ -0,0 +1,37 @@
|
||||
version: 0.1
|
||||
log:
|
||||
level: info
|
||||
fields:
|
||||
service: registry
|
||||
storage:
|
||||
cache:
|
||||
layerinfo: redis
|
||||
{{storage_provider_info}}
|
||||
maintenance:
|
||||
uploadpurging:
|
||||
enabled: false
|
||||
delete:
|
||||
enabled: true
|
||||
redis:
|
||||
addr: {{redis_host}}:{{redis_port}}
|
||||
password: {{redis_password}}
|
||||
db: {{redis_db_index_reg}}
|
||||
http:
|
||||
addr: :5000
|
||||
secret: placeholder
|
||||
debug:
|
||||
addr: localhost:5001
|
||||
auth:
|
||||
token:
|
||||
issuer: harbor-token-issuer
|
||||
realm: {{public_url}}/service/token
|
||||
rootcertbundle: /etc/registry/root.crt
|
||||
service: harbor-registry
|
||||
notifications:
|
||||
endpoints:
|
||||
- name: harbor
|
||||
disabled: false
|
||||
url: {{core_url}}/service/notifications
|
||||
timeout: 3000ms
|
||||
threshold: 5
|
||||
backoff: 1s
|
35
make/photon/prepare/templates/registry/root.crt
Normal file
35
make/photon/prepare/templates/registry/root.crt
Normal file
@ -0,0 +1,35 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIGBzCCA++gAwIBAgIJAKB8CNqCxhr7MA0GCSqGSIb3DQEBCwUAMIGZMQswCQYD
|
||||
VQQGEwJDTjEOMAwGA1UECAwFU3RhdGUxCzAJBgNVBAcMAkNOMRUwEwYDVQQKDAxv
|
||||
cmdhbml6YXRpb24xHDAaBgNVBAsME29yZ2FuaXphdGlvbmFsIHVuaXQxFDASBgNV
|
||||
BAMMC2V4YW1wbGUuY29tMSIwIAYJKoZIhvcNAQkBFhNleGFtcGxlQGV4YW1wbGUu
|
||||
Y29tMB4XDTE2MDUxNjAyNDY1NVoXDTI2MDUxNDAyNDY1NVowgZkxCzAJBgNVBAYT
|
||||
AkNOMQ4wDAYDVQQIDAVTdGF0ZTELMAkGA1UEBwwCQ04xFTATBgNVBAoMDG9yZ2Fu
|
||||
aXphdGlvbjEcMBoGA1UECwwTb3JnYW5pemF0aW9uYWwgdW5pdDEUMBIGA1UEAwwL
|
||||
ZXhhbXBsZS5jb20xIjAgBgkqhkiG9w0BCQEWE2V4YW1wbGVAZXhhbXBsZS5jb20w
|
||||
ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2ky/K/XneJKbCbpOsWlQ7
|
||||
OwgYEQNsa044RkwSbTwPwgLafUZ3r9c5nkXE8APqAikTQQBwyiNjk7QeXgIOjJXd
|
||||
7+IpwGoU6Bi2miA21qfvJPknyDAqw9tT/ycGQrvkY6rnqd++ri30ZUByUgO0du6+
|
||||
aWHo7af5/G1HQz0tu6i1tIF1dhSHNeqJKwxyUG8vIiT/PfbtU/mXSdQ07M+4ojBC
|
||||
O7FgoOS+rWgbL3yhWUTrCXSV2HZlhksYBhtWGoFVRPVSf89iqL02h9rZEjmfVY6R
|
||||
QlCnzu9v49Q8WFU528f+gDNXr9v13PKEDmloMzTqWPaCyD2FBbEKBsWHXHf1zqlI
|
||||
jyGZV7rHZ3i0C1LI6bdDDP7M7aVs8O+RjxK+HmfFRg5us2t6g7zAevwwLpMZRAud
|
||||
S39F91Up7l9g8WXpViok/8vcsOdePvvWcWro8qJhuEHAnDdMzj2Cko1L85/vRM/a
|
||||
budWXK7Ix0TlPWPfHJc2SLFeqqcm5Iypf/cGabQ6f0oRt6bCfspFgX9upznT5FwZ
|
||||
R0o1w6Q3q+4xVl6LgZvEAudWppyz79RACJA/jbXZQ7uJkXAxoI0nev9vgY6XJqUj
|
||||
XIQDih2hmi/uTnNU7Me7w7pCYKPdHlNU652kaJSH6W6ZFGk2rEOCOeAuWO9pZTq2
|
||||
3IhuOcDAKOcmimlkzaWRGQIDAQABo1AwTjAdBgNVHQ4EFgQUPJF++WMsv1OJvf7F
|
||||
oCew37JTnfQwHwYDVR0jBBgwFoAUPJF++WMsv1OJvf7FoCew37JTnfQwDAYDVR0T
|
||||
BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAb5LvqukMxWd5Zajbh3orfYsXmhWn
|
||||
UWiwG176+bd3b5xMlG9iLd4vQ11lTZoIhFOfprRQzbizQ8BzR2JBQckpLcy+5hyA
|
||||
D3M9vLL37OwA0wT6kxFnd6LtlFaH5gG++huw2ts2PDXFz0jqw+0YE/R8ov2+YdaZ
|
||||
aPSEMunmAuEY1TbYWzz4u6PxycxhQzDQ34ZmJZ34Elvw1NYMfPMGTKp34PsxIcgT
|
||||
ao5jqb9RMU6JAumfXrOvXRjjl573vX2hgMZzEU6OF2/+uyg95chn6nO1GUQrT2+F
|
||||
/1xIqfHfFCm8+jujSDgqfBtGI+2C7No+Dq8LEyEINZe6wSQ81+ryt5jy5SZmAsnj
|
||||
V4OsSIwlpR5fLUwrFStVoUWHEKl1DflkYki/cAC1TL0Om+ldJ219kcOnaXDNaq66
|
||||
3I75BvRY7/88MYLl4Fgt7sn05Mn3uNPrCrci8d0R1tlXIcwMdCowIHeZdWHX43f7
|
||||
NsVk/7VSOxJ343csgaQc+3WxEFK0tBxGO6GP+Xj0XmdVGLhalVBsEhPjnmx+Yyrn
|
||||
oMsTA1Yrs88C8ItQn7zuO/30eKNGTnby0gptHiS6sa/c3O083Mpi8y33GPVZDvBl
|
||||
l9PfSZT8LG7SvpjsdgdNZlyFvTY4vsB+Vd5Howh7gXYPVXdCs4k7HMyo7zvzliZS
|
||||
ekCw9NGLoNqQqnA=
|
||||
-----END CERTIFICATE-----
|
@ -0,0 +1,8 @@
|
||||
---
|
||||
protocol: "http"
|
||||
port: 8080
|
||||
log_level: "INFO"
|
||||
|
||||
#https_config:
|
||||
# cert: "server.crt"
|
||||
# key: "server.key"
|
3
make/photon/prepare/templates/registryctl/env.jinja
Normal file
3
make/photon/prepare/templates/registryctl/env.jinja
Normal file
@ -0,0 +1,3 @@
|
||||
CORE_SECRET={{core_secret}}
|
||||
JOBSERVICE_SECRET={{jobservice_secret}}
|
||||
|
0
make/photon/prepare/utils/__init__.py
Normal file
0
make/photon/prepare/utils/__init__.py
Normal file
30
make/photon/prepare/utils/admin_server.py
Normal file
30
make/photon/prepare/utils/admin_server.py
Normal file
@ -0,0 +1,30 @@
|
||||
import os
|
||||
|
||||
from g import config_dir, templates_dir
|
||||
from utils.misc import prepare_config_dir, generate_random_string
|
||||
from utils.jinja import render_jinja
|
||||
|
||||
adminserver_config_dir = os.path.join(config_dir, 'adminserver')
|
||||
adminserver_env_template = os.path.join(templates_dir, "adminserver", "env.jinja")
|
||||
adminserver_conf_env = os.path.join(config_dir, "adminserver", "env")
|
||||
|
||||
def prepare_adminserver(config_dict, with_notary, with_clair, with_chartmuseum):
|
||||
prepare_adminserver_config_dir()
|
||||
render_adminserver(config_dict, with_notary, with_clair, with_chartmuseum)
|
||||
|
||||
def prepare_adminserver_config_dir():
|
||||
prepare_config_dir(adminserver_config_dir)
|
||||
|
||||
def render_adminserver(config_dict, with_notary, with_clair, with_chartmuseum):
|
||||
# Use reload_key to avoid reload config after restart harbor
|
||||
reload_key = generate_random_string(6) if config_dict['reload_config'] == "true" else ""
|
||||
|
||||
render_jinja(
|
||||
adminserver_env_template,
|
||||
adminserver_conf_env,
|
||||
with_notary=with_notary,
|
||||
with_clair=with_clair,
|
||||
with_chartmuseum=with_chartmuseum,
|
||||
reload_key=reload_key,
|
||||
**config_dict
|
||||
)
|
101
make/photon/prepare/utils/cert.py
Normal file
101
make/photon/prepare/utils/cert.py
Normal file
@ -0,0 +1,101 @@
|
||||
# Get or generate private key
|
||||
import os, sys, subprocess, shutil
|
||||
from subprocess import DEVNULL
|
||||
from functools import wraps
|
||||
|
||||
from .misc import mark_file
|
||||
from .misc import generate_random_string
|
||||
|
||||
SSL_CERT_PATH = os.path.join("/etc/nginx/cert", "server.crt")
|
||||
SSL_CERT_KEY_PATH = os.path.join("/etc/nginx/cert", "server.key")
|
||||
|
||||
def _get_secret(folder, filename, length=16):
|
||||
key_file = os.path.join(folder, filename)
|
||||
if os.path.isfile(key_file):
|
||||
with open(key_file, 'r') as f:
|
||||
key = f.read()
|
||||
print("loaded secret from file: %s" % key_file)
|
||||
mark_file(key_file)
|
||||
return key
|
||||
if not os.path.isdir(folder):
|
||||
os.makedirs(folder)
|
||||
key = generate_random_string(length)
|
||||
with open(key_file, 'w') as f:
|
||||
f.write(key)
|
||||
print("Generated and saved secret to file: %s" % key_file)
|
||||
mark_file(key_file)
|
||||
return key
|
||||
|
||||
|
||||
def get_secret_key(path):
|
||||
secret_key = _get_secret(path, "secretkey")
|
||||
if len(secret_key) != 16:
|
||||
raise Exception("secret key's length has to be 16 chars, current length: %d" % len(secret_key))
|
||||
return secret_key
|
||||
|
||||
|
||||
def get_alias(path):
|
||||
alias = _get_secret(path, "defaultalias", length=8)
|
||||
return alias
|
||||
|
||||
## decorator actions
|
||||
def stat_decorator(func):
|
||||
@wraps(func)
|
||||
def check_wrapper(*args, **kw):
|
||||
stat = func(*args, **kw)
|
||||
if stat == 0:
|
||||
print("Generated certificate, key file: {key_path}, cert file: {cert_path}".format(**kw))
|
||||
else:
|
||||
print("Fail to generate key file: {key_path}, cert file: {cert_path}".format(**kw))
|
||||
sys.exit(1)
|
||||
return check_wrapper
|
||||
|
||||
|
||||
@stat_decorator
|
||||
def create_root_cert(subj, key_path="./k.key", cert_path="./cert.crt"):
|
||||
rc = subprocess.call(["openssl", "genrsa", "-out", key_path, "4096"], stdout=DEVNULL, stderr=subprocess.STDOUT)
|
||||
if rc != 0:
|
||||
return rc
|
||||
return subprocess.call(["openssl", "req", "-new", "-x509", "-key", key_path,\
|
||||
"-out", cert_path, "-days", "3650", "-subj", subj], stdout=DEVNULL, stderr=subprocess.STDOUT)
|
||||
|
||||
@stat_decorator
|
||||
def create_cert(subj, ca_key, ca_cert, key_path="./k.key", cert_path="./cert.crt"):
|
||||
cert_dir = os.path.dirname(cert_path)
|
||||
csr_path = os.path.join(cert_dir, "tmp.csr")
|
||||
rc = subprocess.call(["openssl", "req", "-newkey", "rsa:4096", "-nodes","-sha256","-keyout", key_path,\
|
||||
"-out", csr_path, "-subj", subj], stdout=DEVNULL, stderr=subprocess.STDOUT)
|
||||
if rc != 0:
|
||||
return rc
|
||||
return subprocess.call(["openssl", "x509", "-req", "-days", "3650", "-in", csr_path, "-CA", \
|
||||
ca_cert, "-CAkey", ca_key, "-CAcreateserial", "-out", cert_path], stdout=DEVNULL, stderr=subprocess.STDOUT)
|
||||
|
||||
|
||||
def openssl_installed():
|
||||
shell_stat = subprocess.check_call(["which", "openssl"], stdout=DEVNULL, stderr=subprocess.STDOUT)
|
||||
if shell_stat != 0:
|
||||
print("Cannot find openssl installed in this computer\nUse default SSL certificate file")
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def prepare_ca(
|
||||
customize_crt,
|
||||
private_key_pem_path, private_key_pem_template,
|
||||
root_crt_path, root_cert_template_path,
|
||||
registry_custom_ca_bundle_path, registry_custom_ca_bundle_config):
|
||||
|
||||
if (customize_crt == 'on' or customize_crt == True) and openssl_installed():
|
||||
empty_subj = "/"
|
||||
create_root_cert(empty_subj, key_path=private_key_pem_path, cert_path=root_crt_path)
|
||||
mark_file(private_key_pem_path)
|
||||
mark_file(root_crt_path)
|
||||
else:
|
||||
print("Copied configuration file: %s" % private_key_pem_path)
|
||||
shutil.copyfile(private_key_pem_template, private_key_pem_path)
|
||||
print("Copied configuration file: %s" % root_crt_path)
|
||||
shutil.copyfile(root_cert_template_path, root_crt_path)
|
||||
|
||||
if len(registry_custom_ca_bundle_path) > 0 and os.path.isfile(registry_custom_ca_bundle_path):
|
||||
shutil.copyfile(registry_custom_ca_bundle_path, registry_custom_ca_bundle_config)
|
||||
print("Copied custom ca bundle: %s" % registry_custom_ca_bundle_config)
|
121
make/photon/prepare/utils/chart.py
Normal file
121
make/photon/prepare/utils/chart.py
Normal file
@ -0,0 +1,121 @@
|
||||
import os, shutil
|
||||
|
||||
from g import templates_dir, config_dir
|
||||
from .jinja import render_jinja
|
||||
|
||||
chartm_temp_dir = os.path.join(templates_dir, "chartserver")
|
||||
chartm_env_temp = os.path.join(chartm_temp_dir, "env.jinja")
|
||||
|
||||
chartm_config_dir = os.path.join(config_dir, "chartserver")
|
||||
chartm_env = os.path.join(config_dir, "chartserver", "env")
|
||||
|
||||
def prepare_chartmuseum(config_dict):
|
||||
|
||||
core_secret = config_dict['core_secret']
|
||||
registry_custom_ca_bundle_path = config_dict['registry_custom_ca_bundle_path']
|
||||
redis_host = config_dict['redis_host']
|
||||
redis_port = config_dict['redis_port']
|
||||
redis_password = config_dict['redis_password']
|
||||
redis_db_index_chart = config_dict['redis_db_index_chart']
|
||||
storage_provider_config = config_dict['storage_provider_config']
|
||||
storage_provider_name = config_dict['storage_provider_name']
|
||||
|
||||
if not os.path.isdir(chartm_config_dir):
|
||||
print ("Create config folder: %s" % chartm_config_dir)
|
||||
os.makedirs(chartm_config_dir)
|
||||
|
||||
# handle custom ca bundle
|
||||
if len(registry_custom_ca_bundle_path) > 0 and os.path.isfile(registry_custom_ca_bundle_path):
|
||||
shutil.copyfile(registry_custom_ca_bundle_path, os.path.join(chartm_config_dir, "custom-ca-bundle.crt"))
|
||||
print("Copied custom ca bundle: %s" % os.path.join(chartm_config_dir, "custom-ca-bundle.crt"))
|
||||
|
||||
# process redis info
|
||||
cache_store = "redis"
|
||||
cache_redis_password = redis_password
|
||||
cache_redis_addr = "{}:{}".format(redis_host, redis_port)
|
||||
cache_redis_db_index = redis_db_index_chart
|
||||
|
||||
|
||||
# process storage info
|
||||
#default using local file system
|
||||
storage_driver = "local"
|
||||
# storage provider configurations
|
||||
# please be aware that, we do not check the validations of the values for the specified keys
|
||||
# convert the configs to config map
|
||||
storage_provider_configs = storage_provider_config.split(",")
|
||||
storgae_provider_confg_map = {}
|
||||
storage_provider_config_options = []
|
||||
|
||||
for k_v in storage_provider_configs:
|
||||
if len(k_v) > 0:
|
||||
kvs = k_v.split(": ") # add space suffix to avoid existing ":" in the value
|
||||
if len(kvs) == 2:
|
||||
#key must not be empty
|
||||
if kvs[0].strip() != "":
|
||||
storgae_provider_confg_map[kvs[0].strip()] = kvs[1].strip()
|
||||
|
||||
if storage_provider_name == "s3":
|
||||
# aws s3 storage
|
||||
storage_driver = "amazon"
|
||||
storage_provider_config_options.append("STORAGE_AMAZON_BUCKET=%s" % storgae_provider_confg_map.get("bucket", ""))
|
||||
storage_provider_config_options.append("STORAGE_AMAZON_PREFIX=%s" % storgae_provider_confg_map.get("rootdirectory", ""))
|
||||
storage_provider_config_options.append("STORAGE_AMAZON_REGION=%s" % storgae_provider_confg_map.get("region", ""))
|
||||
storage_provider_config_options.append("STORAGE_AMAZON_ENDPOINT=%s" % storgae_provider_confg_map.get("regionendpoint", ""))
|
||||
storage_provider_config_options.append("AWS_ACCESS_KEY_ID=%s" % storgae_provider_confg_map.get("accesskey", ""))
|
||||
storage_provider_config_options.append("AWS_SECRET_ACCESS_KEY=%s" % storgae_provider_confg_map.get("secretkey", ""))
|
||||
elif storage_provider_name == "gcs":
|
||||
# google cloud storage
|
||||
storage_driver = "google"
|
||||
storage_provider_config_options.append("STORAGE_GOOGLE_BUCKET=%s" % storgae_provider_confg_map.get("bucket", ""))
|
||||
storage_provider_config_options.append("STORAGE_GOOGLE_PREFIX=%s" % storgae_provider_confg_map.get("rootdirectory", ""))
|
||||
|
||||
keyFileOnHost = storgae_provider_confg_map.get("keyfile", "")
|
||||
if os.path.isfile(keyFileOnHost):
|
||||
shutil.copyfile(keyFileOnHost, os.path.join(chartm_config_dir, "gcs.key"))
|
||||
targetKeyFile = "/etc/chartserver/gcs.key"
|
||||
storage_provider_config_options.append("GOOGLE_APPLICATION_CREDENTIALS=%s" % targetKeyFile)
|
||||
elif storage_provider_name == "azure":
|
||||
# azure storage
|
||||
storage_driver = "microsoft"
|
||||
storage_provider_config_options.append("STORAGE_MICROSOFT_CONTAINER=%s" % storgae_provider_confg_map.get("container", ""))
|
||||
storage_provider_config_options.append("AZURE_STORAGE_ACCOUNT=%s" % storgae_provider_confg_map.get("accountname", ""))
|
||||
storage_provider_config_options.append("AZURE_STORAGE_ACCESS_KEY=%s" % storgae_provider_confg_map.get("accountkey", ""))
|
||||
storage_provider_config_options.append("STORAGE_MICROSOFT_PREFIX=/azure/harbor/charts")
|
||||
elif storage_provider_name == "swift":
|
||||
# open stack swift
|
||||
storage_driver = "openstack"
|
||||
storage_provider_config_options.append("STORAGE_OPENSTACK_CONTAINER=%s" % storgae_provider_confg_map.get("container", ""))
|
||||
storage_provider_config_options.append("STORAGE_OPENSTACK_PREFIX=%s" % storgae_provider_confg_map.get("rootdirectory", ""))
|
||||
storage_provider_config_options.append("STORAGE_OPENSTACK_REGION=%s" % storgae_provider_confg_map.get("region", ""))
|
||||
storage_provider_config_options.append("OS_AUTH_URL=%s" % storgae_provider_confg_map.get("authurl", ""))
|
||||
storage_provider_config_options.append("OS_USERNAME=%s" % storgae_provider_confg_map.get("username", ""))
|
||||
storage_provider_config_options.append("OS_PASSWORD=%s" % storgae_provider_confg_map.get("password", ""))
|
||||
storage_provider_config_options.append("OS_PROJECT_ID=%s" % storgae_provider_confg_map.get("tenantid", ""))
|
||||
storage_provider_config_options.append("OS_PROJECT_NAME=%s" % storgae_provider_confg_map.get("tenant", ""))
|
||||
storage_provider_config_options.append("OS_DOMAIN_ID=%s" % storgae_provider_confg_map.get("domainid", ""))
|
||||
storage_provider_config_options.append("OS_DOMAIN_NAME=%s" % storgae_provider_confg_map.get("domain", ""))
|
||||
elif storage_provider_name == "oss":
|
||||
# aliyun OSS
|
||||
storage_driver = "alibaba"
|
||||
storage_provider_config_options.append("STORAGE_ALIBABA_BUCKET=%s" % storgae_provider_confg_map.get("bucket", ""))
|
||||
storage_provider_config_options.append("STORAGE_ALIBABA_PREFIX=%s" % storgae_provider_confg_map.get("rootdirectory", ""))
|
||||
storage_provider_config_options.append("STORAGE_ALIBABA_ENDPOINT=%s" % storgae_provider_confg_map.get("endpoint", ""))
|
||||
storage_provider_config_options.append("ALIBABA_CLOUD_ACCESS_KEY_ID=%s" % storgae_provider_confg_map.get("accesskeyid", ""))
|
||||
storage_provider_config_options.append("ALIBABA_CLOUD_ACCESS_KEY_SECRET=%s" % storgae_provider_confg_map.get("accesskeysecret", ""))
|
||||
else:
|
||||
# use local file system
|
||||
storage_provider_config_options.append("STORAGE_LOCAL_ROOTDIR=/chart_storage")
|
||||
|
||||
# generate storage provider configuration
|
||||
all_storage_provider_configs = ('\n').join(storage_provider_config_options)
|
||||
|
||||
render_jinja(
|
||||
chartm_env_temp,
|
||||
chartm_env,
|
||||
cache_store=cache_store,
|
||||
cache_redis_addr=cache_redis_addr,
|
||||
cache_redis_password=cache_redis_password,
|
||||
cache_redis_db_index=cache_redis_db_index,
|
||||
core_secret=core_secret,
|
||||
storage_driver=storage_driver,
|
||||
all_storage_driver_configs=all_storage_provider_configs)
|
48
make/photon/prepare/utils/clair.py
Normal file
48
make/photon/prepare/utils/clair.py
Normal file
@ -0,0 +1,48 @@
|
||||
import os, shutil
|
||||
|
||||
from g import templates_dir, config_dir, DEFAULT_UID, DEFAULT_GID
|
||||
from .jinja import render_jinja
|
||||
from .misc import prepare_config_dir
|
||||
|
||||
clair_template_dir = os.path.join(templates_dir, "clair")
|
||||
|
||||
def prepare_clair(config_dict):
|
||||
clair_config_dir = prepare_config_dir(config_dir, "clair")
|
||||
|
||||
if os.path.exists(os.path.join(clair_config_dir, "postgresql-init.d")):
|
||||
print("Copying offline data file for clair DB")
|
||||
shutil.rmtree(os.path.join(clair_config_dir, "postgresql-init.d"))
|
||||
|
||||
shutil.copytree(os.path.join(clair_template_dir, "postgresql-init.d"), os.path.join(clair_config_dir, "postgresql-init.d"))
|
||||
|
||||
postgres_env_path = os.path.join(clair_config_dir, "postgres_env")
|
||||
postgres_env_template = os.path.join(clair_template_dir, "postgres_env.jinja")
|
||||
|
||||
clair_config_path = os.path.join(clair_config_dir, "config.yaml")
|
||||
clair_config_template = os.path.join(clair_template_dir, "config.yaml.jinja")
|
||||
|
||||
clair_env_path = os.path.join(clair_config_dir, "clair_env")
|
||||
clair_env_template = os.path.join(clair_template_dir, "clair_env.jinja")
|
||||
|
||||
render_jinja(
|
||||
postgres_env_template,
|
||||
postgres_env_path,
|
||||
password=config_dict['clair_db_password'])
|
||||
|
||||
render_jinja(
|
||||
clair_config_template,
|
||||
clair_config_path,
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID,
|
||||
password= config_dict['clair_db_password'],
|
||||
username= config_dict['clair_db_username'],
|
||||
host= config_dict['clair_db_host'],
|
||||
port= config_dict['clair_db_port'],
|
||||
dbname= config_dict['clair_db'],
|
||||
interval= config_dict['clair_updaters_interval'])
|
||||
|
||||
# config http proxy for Clair
|
||||
render_jinja(
|
||||
clair_env_template,
|
||||
clair_env_path,
|
||||
**config_dict)
|
354
make/photon/prepare/utils/configs.py
Normal file
354
make/photon/prepare/utils/configs.py
Normal file
@ -0,0 +1,354 @@
|
||||
import yaml, configparser
|
||||
from .misc import generate_random_string
|
||||
|
||||
def validate(conf, **kwargs):
|
||||
protocol = conf.get("protocol")
|
||||
if protocol != "https" and kwargs.get('notary_mode'):
|
||||
raise Exception(
|
||||
"Error: the protocol must be https when Harbor is deployed with Notary")
|
||||
if protocol == "https":
|
||||
if not conf.get("cert_path"): ## ssl_path in config
|
||||
raise Exception("Error: The protocol is https but attribute ssl_cert is not set")
|
||||
if not conf.get("cert_key_path"):
|
||||
raise Exception("Error: The protocol is https but attribute ssl_cert_key is not set")
|
||||
|
||||
# Project validate
|
||||
project_creation = conf.get("project_creation_restriction")
|
||||
if project_creation != "everyone" and project_creation != "adminonly":
|
||||
raise Exception(
|
||||
"Error invalid value for project_creation_restriction: %s" % project_creation)
|
||||
|
||||
# Storage validate
|
||||
valid_storage_drivers = ["filesystem", "azure", "gcs", "s3", "swift", "oss"]
|
||||
storage_provider_name = conf.get("storage_provider_name")
|
||||
if storage_provider_name not in valid_storage_drivers:
|
||||
raise Exception("Error: storage driver %s is not supported, only the following ones are supported: %s" % (
|
||||
storage_provider_name, ",".join(valid_storage_drivers)))
|
||||
|
||||
storage_provider_config = conf.get("storage_provider_config") ## original is registry_storage_provider_config
|
||||
if storage_provider_name != "filesystem":
|
||||
if storage_provider_config == "":
|
||||
raise Exception(
|
||||
"Error: no provider configurations are provided for provider %s" % storage_provider_name)
|
||||
|
||||
# Redis validate
|
||||
redis_host = conf.get("redis_host")
|
||||
if redis_host is None or len(redis_host) < 1:
|
||||
raise Exception(
|
||||
"Error: redis_host in harbor.cfg needs to point to an endpoint of Redis server or cluster.")
|
||||
|
||||
redis_port = conf.get("redis_port")
|
||||
if redis_host is None or (redis_port < 1 or redis_port > 65535):
|
||||
raise Exception(
|
||||
"Error: redis_port in harbor.cfg needs to point to the port of Redis server or cluster.")
|
||||
|
||||
redis_db_index = conf.get("redis_db_index")
|
||||
if len(redis_db_index.split(",")) != 3:
|
||||
raise Exception(
|
||||
"Error invalid value for redis_db_index: %s. please set it as 1,2,3" % redis_db_index)
|
||||
|
||||
|
||||
def parse_configs(config_file_path):
|
||||
'''
|
||||
:param configs: config_parser object
|
||||
:returns: dict of configs
|
||||
'''
|
||||
with open(config_file_path, 'r') as f:
|
||||
formated_config = u'[configuration]\n' + f.read()
|
||||
|
||||
configs = configparser.ConfigParser()
|
||||
configs.read_string(formated_config)
|
||||
|
||||
config_dict = {}
|
||||
config_dict['adminserver_url'] = "http://adminserver:8080"
|
||||
config_dict['registry_url'] = "http://registry:5000"
|
||||
config_dict['registry_controller_url'] = "http://registryctl:8080"
|
||||
config_dict['core_url'] = "http://core:8080"
|
||||
config_dict['token_service_url'] = "http://core:8080/service/token"
|
||||
|
||||
config_dict['jobservice_url'] = "http://jobservice:8080"
|
||||
config_dict['clair_url'] = "http://clair:6060"
|
||||
config_dict['notary_url'] = "http://notary-server:4443"
|
||||
config_dict['chart_repository_url'] = "http://chartmuseum:9999"
|
||||
|
||||
if configs.has_option("configuration", "reload_config"):
|
||||
config_dict['reload_config'] = configs.get("configuration", "reload_config")
|
||||
else:
|
||||
config_dict['reload_config'] = "false"
|
||||
config_dict['hostname'] = configs.get("configuration", "hostname")
|
||||
config_dict['protocol'] = configs.get("configuration", "ui_url_protocol")
|
||||
config_dict['public_url'] = config_dict['protocol'] + "://" + config_dict['hostname']
|
||||
|
||||
# Data path volume
|
||||
config_dict['data_volume'] = configs.get("configuration", "data_volume")
|
||||
|
||||
# Email related configs
|
||||
config_dict['email_identity'] = configs.get("configuration", "email_identity")
|
||||
config_dict['email_host'] = configs.get("configuration", "email_server")
|
||||
config_dict['email_port'] = configs.get("configuration", "email_server_port")
|
||||
config_dict['email_usr'] = configs.get("configuration", "email_username")
|
||||
config_dict['email_pwd'] = configs.get("configuration", "email_password")
|
||||
config_dict['email_from'] = configs.get("configuration", "email_from")
|
||||
config_dict['email_ssl'] = configs.get("configuration", "email_ssl")
|
||||
config_dict['email_insecure'] = configs.get("configuration", "email_insecure")
|
||||
config_dict['harbor_admin_password'] = configs.get("configuration", "harbor_admin_password")
|
||||
config_dict['auth_mode'] = configs.get("configuration", "auth_mode")
|
||||
config_dict['ldap_url'] = configs.get("configuration", "ldap_url")
|
||||
|
||||
# LDAP related configs
|
||||
# this two options are either both set or unset
|
||||
if configs.has_option("configuration", "ldap_searchdn"):
|
||||
config_dict['ldap_searchdn'] = configs.get("configuration", "ldap_searchdn")
|
||||
config_dict['ldap_search_pwd'] = configs.get("configuration", "ldap_search_pwd")
|
||||
else:
|
||||
config_dict['ldap_searchdn'] = ""
|
||||
config_dict['ldap_search_pwd'] = ""
|
||||
config_dict['ldap_basedn'] = configs.get("configuration", "ldap_basedn")
|
||||
# ldap_filter is null by default
|
||||
if configs.has_option("configuration", "ldap_filter"):
|
||||
config_dict['ldap_filter'] = configs.get("configuration", "ldap_filter")
|
||||
else:
|
||||
config_dict['ldap_filter'] = ""
|
||||
config_dict['ldap_uid'] = configs.get("configuration", "ldap_uid")
|
||||
config_dict['ldap_scope'] = configs.get("configuration", "ldap_scope")
|
||||
config_dict['ldap_timeout'] = configs.get("configuration", "ldap_timeout")
|
||||
config_dict['ldap_verify_cert'] = configs.get("configuration", "ldap_verify_cert")
|
||||
config_dict['ldap_group_basedn'] = configs.get("configuration", "ldap_group_basedn")
|
||||
config_dict['ldap_group_filter'] = configs.get("configuration", "ldap_group_filter")
|
||||
config_dict['ldap_group_gid'] = configs.get("configuration", "ldap_group_gid")
|
||||
config_dict['ldap_group_scope'] = configs.get("configuration", "ldap_group_scope")
|
||||
|
||||
# DB configs
|
||||
config_dict['db_password'] = configs.get("configuration", "db_password")
|
||||
config_dict['db_host'] = configs.get("configuration", "db_host")
|
||||
config_dict['db_user'] = configs.get("configuration", "db_user")
|
||||
config_dict['db_port'] = configs.get("configuration", "db_port")
|
||||
|
||||
config_dict['self_registration'] = configs.get("configuration", "self_registration")
|
||||
config_dict['project_creation_restriction'] = configs.get("configuration", "project_creation_restriction")
|
||||
|
||||
# secure configs
|
||||
if config_dict['protocol'] == "https":
|
||||
config_dict['cert_path'] = configs.get("configuration", "ssl_cert")
|
||||
config_dict['cert_key_path'] = configs.get("configuration", "ssl_cert_key")
|
||||
config_dict['customize_crt'] = configs.get("configuration", "customize_crt")
|
||||
config_dict['max_job_workers'] = configs.get("configuration", "max_job_workers")
|
||||
config_dict['token_expiration'] = configs.get("configuration", "token_expiration")
|
||||
config_dict['secretkey_path'] = configs.get("configuration", "secretkey_path")
|
||||
|
||||
# Admiral configs
|
||||
if configs.has_option("configuration", "admiral_url"):
|
||||
config_dict['admiral_url'] = configs.get("configuration", "admiral_url")
|
||||
else:
|
||||
config_dict['admiral_url'] = ""
|
||||
|
||||
# Clair configs
|
||||
config_dict['clair_db_password'] = configs.get("configuration", "clair_db_password")
|
||||
config_dict['clair_db_host'] = configs.get("configuration", "clair_db_host")
|
||||
config_dict['clair_db_port'] = configs.get("configuration", "clair_db_port")
|
||||
config_dict['clair_db_username'] = configs.get("configuration", "clair_db_username")
|
||||
config_dict['clair_db'] = configs.get("configuration", "clair_db")
|
||||
config_dict['clair_updaters_interval'] = configs.get("configuration", "clair_updaters_interval")
|
||||
|
||||
# UAA configs
|
||||
config_dict['uaa_endpoint'] = configs.get("configuration", "uaa_endpoint")
|
||||
config_dict['uaa_clientid'] = configs.get("configuration", "uaa_clientid")
|
||||
config_dict['uaa_clientsecret'] = configs.get("configuration", "uaa_clientsecret")
|
||||
config_dict['uaa_verify_cert'] = configs.get("configuration", "uaa_verify_cert")
|
||||
config_dict['uaa_ca_cert'] = configs.get("configuration", "uaa_ca_cert")
|
||||
|
||||
# Log configs
|
||||
config_dict['log_rotate_count'] = configs.get("configuration", "log_rotate_count")
|
||||
config_dict['log_rotate_size'] = configs.get("configuration", "log_rotate_size")
|
||||
|
||||
# Redis configs
|
||||
config_dict['redis_host'] = configs.get("configuration", "redis_host")
|
||||
config_dict['redis_port'] = int(configs.get("configuration", "redis_port"))
|
||||
config_dict['redis_password'] = configs.get("configuration", "redis_password")
|
||||
config_dict['redis_db_index'] = configs.get("configuration", "redis_db_index")
|
||||
|
||||
db_indexs = config_dict['redis_db_index'].split(',')
|
||||
config_dict['redis_db_index_reg'] = db_indexs[0]
|
||||
config_dict['redis_db_index_js'] = db_indexs[1]
|
||||
config_dict['redis_db_index_chart'] = db_indexs[2]
|
||||
|
||||
# redis://[arbitrary_username:password@]ipaddress:port/database_index
|
||||
if len(config_dict['redis_password']) > 0:
|
||||
config_dict['redis_url_js'] = "redis://anonymous:%s@%s:%s/%s" % (config_dict['redis_password'], config_dict['redis_host'], config_dict['redis_port'], config_dict['redis_db_index_js'])
|
||||
config_dict['redis_url_reg'] = "redis://anonymous:%s@%s:%s/%s" % (config_dict['redis_password'], config_dict['redis_host'], config_dict['redis_port'], config_dict['redis_db_index_reg'])
|
||||
else:
|
||||
config_dict['redis_url_js'] = "redis://%s:%s/%s" % (config_dict['redis_host'], config_dict['redis_port'], config_dict['redis_db_index_js'])
|
||||
config_dict['redis_url_reg'] = "redis://%s:%s/%s" % (config_dict['redis_host'], config_dict['redis_port'], config_dict['redis_db_index_reg'])
|
||||
|
||||
if configs.has_option("configuration", "skip_reload_env_pattern"):
|
||||
config_dict['skip_reload_env_pattern'] = configs.get("configuration", "skip_reload_env_pattern")
|
||||
else:
|
||||
config_dict['skip_reload_env_pattern'] = "$^"
|
||||
|
||||
# Registry storage configs
|
||||
config_dict['storage_provider_name'] = configs.get("configuration", "registry_storage_provider_name").strip()
|
||||
config_dict['storage_provider_config'] = configs.get("configuration", "registry_storage_provider_config").strip()
|
||||
|
||||
# yaml requires 1 or more spaces between the key and value
|
||||
config_dict['storage_provider_config'] = config_dict['storage_provider_config'].replace(":", ": ", 1)
|
||||
config_dict['registry_custom_ca_bundle_path'] = configs.get("configuration", "registry_custom_ca_bundle").strip()
|
||||
config_dict['core_secret'] = generate_random_string(16)
|
||||
config_dict['jobservice_secret'] = generate_random_string(16)
|
||||
|
||||
# Admin dn
|
||||
config_dict['ldap_group_admin_dn'] = configs.get("configuration", "ldap_group_admin_dn") if configs.has_option("configuration", "ldap_group_admin_dn") else ""
|
||||
|
||||
return config_dict
|
||||
|
||||
|
||||
def parse_yaml_config(config_file_path):
|
||||
'''
|
||||
:param configs: config_parser object
|
||||
:returns: dict of configs
|
||||
'''
|
||||
|
||||
with open(config_file_path) as f:
|
||||
configs = yaml.load(f)
|
||||
|
||||
config_dict = {}
|
||||
config_dict['adminserver_url'] = "http://adminserver:8080"
|
||||
config_dict['registry_url'] = "http://registry:5000"
|
||||
config_dict['registry_controller_url'] = "http://registryctl:8080"
|
||||
config_dict['core_url'] = "http://core:8080"
|
||||
config_dict['token_service_url'] = "http://core:8080/service/token"
|
||||
|
||||
config_dict['jobservice_url'] = "http://jobservice:8080"
|
||||
config_dict['clair_url'] = "http://clair:6060"
|
||||
config_dict['notary_url'] = "http://notary-server:4443"
|
||||
config_dict['chart_repository_url'] = "http://chartmuseum:9999"
|
||||
|
||||
if configs.get("reload_config"):
|
||||
config_dict['reload_config'] = configs.get("reload_config")
|
||||
else:
|
||||
config_dict['reload_config'] = "false"
|
||||
|
||||
config_dict['hostname'] = configs.get("hostname")
|
||||
config_dict['protocol'] = configs.get("ui_url_protocol")
|
||||
config_dict['public_url'] = config_dict['protocol'] + "://" + config_dict['hostname']
|
||||
|
||||
# Data path volume
|
||||
config_dict['data_volume'] = configs.get("data_volume")
|
||||
|
||||
# Email related configs
|
||||
config_dict['email_identity'] = configs.get("email_identity")
|
||||
config_dict['email_host'] = configs.get("email_server")
|
||||
config_dict['email_port'] = configs.get("email_server_port")
|
||||
config_dict['email_usr'] = configs.get("email_username")
|
||||
config_dict['email_pwd'] = configs.get("email_password")
|
||||
config_dict['email_from'] = configs.get("email_from")
|
||||
config_dict['email_ssl'] = configs.get("email_ssl")
|
||||
config_dict['email_insecure'] = configs.get("email_insecure")
|
||||
config_dict['harbor_admin_password'] = configs.get("harbor_admin_password")
|
||||
config_dict['auth_mode'] = configs.get("auth_mode")
|
||||
config_dict['ldap_url'] = configs.get("ldap_url")
|
||||
|
||||
# LDAP related configs
|
||||
# this two options are either both set or unset
|
||||
if configs.get("ldap_searchdn"):
|
||||
config_dict['ldap_searchdn'] = configs["ldap_searchdn"]
|
||||
config_dict['ldap_search_pwd'] = configs["ldap_search_pwd"]
|
||||
else:
|
||||
config_dict['ldap_searchdn'] = ""
|
||||
config_dict['ldap_search_pwd'] = ""
|
||||
config_dict['ldap_basedn'] = configs.get("ldap_basedn")
|
||||
# ldap_filter is null by default
|
||||
if configs.get("ldap_filter"):
|
||||
config_dict['ldap_filter'] = configs["ldap_filter"]
|
||||
else:
|
||||
config_dict['ldap_filter'] = ""
|
||||
config_dict['ldap_uid'] = configs.get("ldap_uid")
|
||||
config_dict['ldap_scope'] = configs.get("ldap_scope")
|
||||
config_dict['ldap_timeout'] = configs.get("ldap_timeout")
|
||||
config_dict['ldap_verify_cert'] = configs.get("ldap_verify_cert")
|
||||
config_dict['ldap_group_basedn'] = configs.get("ldap_group_basedn")
|
||||
config_dict['ldap_group_filter'] = configs.get("ldap_group_filter")
|
||||
config_dict['ldap_group_gid'] = configs.get("ldap_group_gid")
|
||||
config_dict['ldap_group_scope'] = configs.get("ldap_group_scope")
|
||||
|
||||
# DB configs
|
||||
config_dict['db_password'] = configs.get("db_password")
|
||||
config_dict['db_host'] = configs.get("db_host")
|
||||
config_dict['db_user'] = configs.get("db_user")
|
||||
config_dict['db_port'] = configs.get("db_port")
|
||||
|
||||
config_dict['self_registration'] = configs.get("self_registration")
|
||||
config_dict['project_creation_restriction'] = configs.get("project_creation_restriction")
|
||||
|
||||
# secure configs
|
||||
if config_dict['protocol'] == "https":
|
||||
config_dict['cert_path'] = configs.get("ssl_cert")
|
||||
config_dict['cert_key_path'] = configs.get("ssl_cert_key")
|
||||
config_dict['customize_crt'] = configs.get("customize_crt")
|
||||
config_dict['max_job_workers'] = configs.get("max_job_workers")
|
||||
config_dict['token_expiration'] = configs.get("token_expiration")
|
||||
|
||||
config_dict['secretkey_path'] = configs["secretkey_path"]
|
||||
# Admiral configs
|
||||
if configs.get("admiral_url"):
|
||||
config_dict['admiral_url'] = configs["admiral_url"]
|
||||
else:
|
||||
config_dict['admiral_url'] = ""
|
||||
|
||||
# Clair configs
|
||||
config_dict['clair_db_password'] = configs.get("clair_db_password")
|
||||
config_dict['clair_db_host'] = configs.get("clair_db_host")
|
||||
config_dict['clair_db_port'] = configs.get("clair_db_port")
|
||||
config_dict['clair_db_username'] = configs.get("clair_db_username")
|
||||
config_dict['clair_db'] = configs.get("clair_db")
|
||||
config_dict['clair_updaters_interval'] = configs.get("clair_updaters_interval")
|
||||
|
||||
# UAA configs
|
||||
config_dict['uaa_endpoint'] = configs.get("uaa_endpoint")
|
||||
config_dict['uaa_clientid'] = configs.get("uaa_clientid")
|
||||
config_dict['uaa_clientsecret'] = configs.get("uaa_clientsecret")
|
||||
config_dict['uaa_verify_cert'] = configs.get("uaa_verify_cert")
|
||||
config_dict['uaa_ca_cert'] = configs.get("uaa_ca_cert")
|
||||
|
||||
# Log configs
|
||||
config_dict['log_location'] = configs.get("log_location")
|
||||
config_dict['log_rotate_count'] = configs.get("log_rotate_count")
|
||||
config_dict['log_rotate_size'] = configs.get("log_rotate_size")
|
||||
|
||||
# Redis configs
|
||||
config_dict['redis_host'] = configs.get("redis_host") or ''
|
||||
config_dict['redis_port'] = configs.get("redis_port") or ''
|
||||
config_dict['redis_password'] = configs.get("redis_password") or ''
|
||||
config_dict['redis_db_index'] = configs.get("redis_db_index") or ''
|
||||
|
||||
db_indexs = config_dict['redis_db_index'].split(',')
|
||||
config_dict['redis_db_index_reg'] = db_indexs[0]
|
||||
config_dict['redis_db_index_js'] = db_indexs[1]
|
||||
config_dict['redis_db_index_chart'] = db_indexs[2]
|
||||
|
||||
# redis://[arbitrary_username:password@]ipaddress:port/database_index
|
||||
if config_dict.get('redis_password'):
|
||||
config_dict['redis_url_js'] = "redis://anonymous:%s@%s:%s/%s" % (config_dict['redis_password'], config_dict['redis_host'], config_dict['redis_port'], config_dict['redis_db_index_js'])
|
||||
config_dict['redis_url_reg'] = "redis://anonymous:%s@%s:%s/%s" % (config_dict['redis_password'], config_dict['redis_host'], config_dict['redis_port'], config_dict['redis_db_index_reg'])
|
||||
else:
|
||||
config_dict['redis_url_js'] = "redis://%s:%s/%s" % (config_dict['redis_host'], config_dict['redis_port'], config_dict['redis_db_index_js'])
|
||||
config_dict['redis_url_reg'] = "redis://%s:%s/%s" % (config_dict['redis_host'], config_dict['redis_port'], config_dict['redis_db_index_reg'])
|
||||
|
||||
if configs.get("skip_reload_env_pattern"):
|
||||
config_dict['skip_reload_env_pattern'] = configs["skip_reload_env_pattern"]
|
||||
else:
|
||||
config_dict['skip_reload_env_pattern'] = "$^"
|
||||
|
||||
# Registry storage configs
|
||||
config_dict['storage_provider_name'] = configs.get("registry_storage_provider_name") or ''
|
||||
config_dict['storage_provider_config'] = configs.get("registry_storage_provider_config") or ''
|
||||
|
||||
# yaml requires 1 or more spaces between the key and value
|
||||
config_dict['storage_provider_config'] = config_dict['storage_provider_config'].replace(":", ": ", 1)
|
||||
config_dict['registry_custom_ca_bundle_path'] = configs.get("registry_custom_ca_bundle") or ''
|
||||
config_dict['core_secret'] = generate_random_string(16)
|
||||
config_dict['jobservice_secret'] = generate_random_string(16)
|
||||
|
||||
# Admin dn
|
||||
config_dict['ldap_group_admin_dn'] = configs["ldap_group_admin_dn"] if configs.get("ldap_group_admin_dn") else ""
|
||||
|
||||
return config_dict
|
36
make/photon/prepare/utils/core.py
Normal file
36
make/photon/prepare/utils/core.py
Normal file
@ -0,0 +1,36 @@
|
||||
import shutil, os
|
||||
|
||||
from g import config_dir, templates_dir
|
||||
from utils.misc import prepare_config_dir
|
||||
from utils.jinja import render_jinja
|
||||
|
||||
core_config_dir = os.path.join(config_dir, "core", "certificates")
|
||||
core_env_template_path = os.path.join(templates_dir, "core", "env.jinja")
|
||||
core_conf_env = os.path.join(config_dir, "core", "env")
|
||||
core_conf_template_path = os.path.join(templates_dir, "core", "app.conf.jinja")
|
||||
core_conf = os.path.join(config_dir, "core", "app.conf")
|
||||
|
||||
def prepare_core(config_dict):
|
||||
prepare_core_config_dir()
|
||||
# Render Core
|
||||
# set cache for chart repo server
|
||||
# default set 'memory' mode, if redis is configured then set to 'redis'
|
||||
chart_cache_driver = "memory"
|
||||
if len(config_dict['redis_host']) > 0:
|
||||
chart_cache_driver = "redis"
|
||||
|
||||
render_jinja(
|
||||
core_env_template_path,
|
||||
core_conf_env,
|
||||
chart_cache_driver=chart_cache_driver,
|
||||
**config_dict)
|
||||
|
||||
# Copy Core app.conf
|
||||
copy_core_config(core_conf_template_path, core_conf)
|
||||
|
||||
def prepare_core_config_dir():
|
||||
prepare_config_dir(core_config_dir)
|
||||
|
||||
def copy_core_config(core_templates_path, core_config_path):
|
||||
shutil.copyfile(core_templates_path, core_config_path)
|
||||
print("Generated configuration file: %s" % core_config_path)
|
20
make/photon/prepare/utils/db.py
Normal file
20
make/photon/prepare/utils/db.py
Normal file
@ -0,0 +1,20 @@
|
||||
import os
|
||||
|
||||
from g import config_dir, templates_dir
|
||||
from utils.misc import prepare_config_dir
|
||||
from utils.jinja import render_jinja
|
||||
|
||||
db_config_dir = os.path.join(config_dir, "db")
|
||||
db_env_template_path = os.path.join(templates_dir, "db", "env.jinja")
|
||||
db_conf_env = os.path.join(config_dir, "db", "env")
|
||||
|
||||
def prepare_db(config_dict):
|
||||
prepare_db_config_dir()
|
||||
|
||||
render_jinja(
|
||||
db_env_template_path,
|
||||
db_conf_env,
|
||||
db_password=config_dict['db_password'])
|
||||
|
||||
def prepare_db_config_dir():
|
||||
prepare_config_dir(db_config_dir)
|
45
make/photon/prepare/utils/docker_compose.py
Normal file
45
make/photon/prepare/utils/docker_compose.py
Normal file
@ -0,0 +1,45 @@
|
||||
import os
|
||||
|
||||
from g import base_dir, templates_dir
|
||||
from .jinja import render_jinja
|
||||
|
||||
|
||||
# render docker-compose
|
||||
VERSION_TAG = 'dev'
|
||||
REGISTRY_VERSION = 'v2.7.1'
|
||||
NOTARY_VERSION = 'v0.6.1-v1.7.1'
|
||||
CLAIR_VERSION = 'v2.0.7-dev'
|
||||
CHARTMUSEUM_VERSION = 'v0.7.1-dev'
|
||||
CLAIR_DB_VERSION = VERSION_TAG
|
||||
MIGRATOR_VERSION = VERSION_TAG
|
||||
REDIS_VERSION = VERSION_TAG
|
||||
NGINX_VERSION = VERSION_TAG
|
||||
# version of chartmuseum
|
||||
|
||||
docker_compose_template_path = os.path.join(templates_dir, 'docker_compose', 'docker-compose.yml.jinja')
|
||||
docker_compose_yml_path = os.path.join(base_dir, 'docker-compose.yml')
|
||||
|
||||
def check_configs(configs):
|
||||
pass
|
||||
|
||||
def prepare_docker_compose(configs, with_clair, with_notary, with_chartmuseum):
|
||||
check_configs(configs)
|
||||
|
||||
rendering_variables = {
|
||||
'version': VERSION_TAG,
|
||||
'reg_version': "{}-{}".format(REGISTRY_VERSION, VERSION_TAG),
|
||||
'redis_version': REDIS_VERSION,
|
||||
'notary_version': NOTARY_VERSION,
|
||||
'clair_version': CLAIR_VERSION,
|
||||
'chartmuseum_version': CHARTMUSEUM_VERSION,
|
||||
'data_volume': configs['data_volume'],
|
||||
'log_location': configs['log_location'],
|
||||
'cert_key_path': configs['cert_key_path'],
|
||||
'cert_path': configs['cert_path'],
|
||||
'with_notary': with_notary,
|
||||
'with_clair': with_clair,
|
||||
'with_chartmuseum': with_chartmuseum
|
||||
}
|
||||
rendering_variables['secretkey_path'] = configs['secretkey_path']
|
||||
|
||||
render_jinja(docker_compose_template_path, docker_compose_yml_path, **rendering_variables)
|
12
make/photon/prepare/utils/jinja.py
Normal file
12
make/photon/prepare/utils/jinja.py
Normal file
@ -0,0 +1,12 @@
|
||||
from jinja2 import Environment, FileSystemLoader, select_autoescape
|
||||
from g import templates_dir
|
||||
from .misc import mark_file
|
||||
|
||||
jinja_env = Environment(loader=FileSystemLoader('/'), trim_blocks=True)
|
||||
|
||||
def render_jinja(src, dest,mode=0o640, uid=0, gid=0, **kw):
|
||||
t = jinja_env.get_template(src)
|
||||
with open(dest, 'w') as f:
|
||||
f.write(t.render(**kw))
|
||||
mark_file(dest, mode, uid, gid)
|
||||
print("Generated configuration file: %s" % dest)
|
34
make/photon/prepare/utils/jobservice.py
Normal file
34
make/photon/prepare/utils/jobservice.py
Normal file
@ -0,0 +1,34 @@
|
||||
import os
|
||||
|
||||
from g import config_dir, DEFAULT_GID, DEFAULT_UID, templates_dir
|
||||
from utils.misc import prepare_config_dir, mark_file
|
||||
from utils.jinja import render_jinja
|
||||
|
||||
job_config_dir = os.path.join(config_dir, "jobservice")
|
||||
job_service_env_template_path = os.path.join(templates_dir, "jobservice", "env.jinja")
|
||||
job_service_conf_env = os.path.join(config_dir, "jobservice", "env")
|
||||
job_service_conf_template_path = os.path.join(templates_dir, "jobservice", "config.yml.jinja")
|
||||
jobservice_conf = os.path.join(config_dir, "jobservice", "config.yml")
|
||||
|
||||
|
||||
def prepare_job_service(config_dict):
|
||||
prepare_config_dir(job_config_dir)
|
||||
|
||||
# Job log is stored in data dir
|
||||
job_log_dir = os.path.join('/data', "job_logs")
|
||||
prepare_config_dir(job_log_dir)
|
||||
|
||||
# Render Jobservice env
|
||||
render_jinja(
|
||||
job_service_env_template_path,
|
||||
job_service_conf_env,
|
||||
**config_dict)
|
||||
|
||||
# Render Jobservice config
|
||||
render_jinja(
|
||||
job_service_conf_template_path,
|
||||
jobservice_conf,
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID,
|
||||
max_job_workers=config_dict['max_job_workers'],
|
||||
redis_url=config_dict['redis_url_js'])
|
20
make/photon/prepare/utils/log.py
Normal file
20
make/photon/prepare/utils/log.py
Normal file
@ -0,0 +1,20 @@
|
||||
import os
|
||||
|
||||
from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID
|
||||
from utils.misc import prepare_config_dir
|
||||
from utils.jinja import render_jinja
|
||||
|
||||
log_config_dir = os.path.join(config_dir, "log")
|
||||
logrotate_template_path = os.path.join(templates_dir, "log", "logrotate.conf.jinja")
|
||||
log_rotate_config = os.path.join(config_dir, "log", "logrotate.conf")
|
||||
|
||||
def prepare_log_configs(config_dict):
|
||||
prepare_config_dir(log_config_dir)
|
||||
|
||||
# Render Log config
|
||||
render_jinja(
|
||||
logrotate_template_path,
|
||||
log_rotate_config,
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID,
|
||||
**config_dict)
|
106
make/photon/prepare/utils/misc.py
Normal file
106
make/photon/prepare/utils/misc.py
Normal file
@ -0,0 +1,106 @@
|
||||
import os
|
||||
import string
|
||||
import random
|
||||
|
||||
from g import DEFAULT_UID, DEFAULT_GID
|
||||
|
||||
|
||||
# To meet security requirement
|
||||
# By default it will change file mode to 0600, and make the owner of the file to 10000:10000
|
||||
def mark_file(path, mode=0o600, uid=DEFAULT_UID, gid=DEFAULT_GID):
|
||||
# if mode > 0:
|
||||
# os.chmod(path, mode)
|
||||
# if uid > 0 and gid > 0:
|
||||
# os.chown(path, uid, gid)
|
||||
pass
|
||||
|
||||
|
||||
def validate(conf, **kwargs):
|
||||
# Protocol validate
|
||||
protocol = conf.get("configuration", "ui_url_protocol")
|
||||
if protocol != "https" and kwargs.get('notary_mode'):
|
||||
raise Exception(
|
||||
"Error: the protocol must be https when Harbor is deployed with Notary")
|
||||
if protocol == "https":
|
||||
if not conf.has_option("configuration", "ssl_cert"):
|
||||
raise Exception(
|
||||
"Error: The protocol is https but attribute ssl_cert is not set")
|
||||
cert_path = conf.get("configuration", "ssl_cert")
|
||||
if not os.path.isfile(cert_path):
|
||||
raise Exception(
|
||||
"Error: The path for certificate: %s is invalid" % cert_path)
|
||||
if not conf.has_option("configuration", "ssl_cert_key"):
|
||||
raise Exception(
|
||||
"Error: The protocol is https but attribute ssl_cert_key is not set")
|
||||
cert_key_path = conf.get("configuration", "ssl_cert_key")
|
||||
if not os.path.isfile(cert_key_path):
|
||||
raise Exception(
|
||||
"Error: The path for certificate key: %s is invalid" % cert_key_path)
|
||||
|
||||
# Project validate
|
||||
project_creation = conf.get(
|
||||
"configuration", "project_creation_restriction")
|
||||
if project_creation != "everyone" and project_creation != "adminonly":
|
||||
raise Exception(
|
||||
"Error invalid value for project_creation_restriction: %s" % project_creation)
|
||||
|
||||
# Storage validate
|
||||
valid_storage_drivers = ["filesystem",
|
||||
"azure", "gcs", "s3", "swift", "oss"]
|
||||
storage_provider_name = conf.get(
|
||||
"configuration", "registry_storage_provider_name").strip()
|
||||
if storage_provider_name not in valid_storage_drivers:
|
||||
raise Exception("Error: storage driver %s is not supported, only the following ones are supported: %s" % (
|
||||
storage_provider_name, ",".join(valid_storage_drivers)))
|
||||
|
||||
storage_provider_config = conf.get(
|
||||
"configuration", "registry_storage_provider_config").strip()
|
||||
if storage_provider_name != "filesystem":
|
||||
if storage_provider_config == "":
|
||||
raise Exception(
|
||||
"Error: no provider configurations are provided for provider %s" % storage_provider_name)
|
||||
|
||||
# Redis validate
|
||||
redis_host = conf.get("configuration", "redis_host")
|
||||
if redis_host is None or len(redis_host) < 1:
|
||||
raise Exception(
|
||||
"Error: redis_host in harbor.cfg needs to point to an endpoint of Redis server or cluster.")
|
||||
|
||||
redis_port = conf.get("configuration", "redis_port")
|
||||
if len(redis_port) < 1:
|
||||
raise Exception(
|
||||
"Error: redis_port in harbor.cfg needs to point to the port of Redis server or cluster.")
|
||||
|
||||
redis_db_index = conf.get("configuration", "redis_db_index").strip()
|
||||
if len(redis_db_index.split(",")) != 3:
|
||||
raise Exception(
|
||||
"Error invalid value for redis_db_index: %s. please set it as 1,2,3" % redis_db_index)
|
||||
|
||||
def validate_crt_subj(dirty_subj):
|
||||
subj_list = [item for item in dirty_subj.strip().split("/") \
|
||||
if len(item.split("=")) == 2 and len(item.split("=")[1]) > 0]
|
||||
return "/" + "/".join(subj_list)
|
||||
|
||||
|
||||
def generate_random_string(length):
|
||||
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length))
|
||||
|
||||
|
||||
def prepare_config_dir(root, *name):
|
||||
absolute_path = os.path.join(root, *name)
|
||||
if not os.path.exists(absolute_path):
|
||||
os.makedirs(absolute_path)
|
||||
return absolute_path
|
||||
|
||||
|
||||
def delfile(src):
|
||||
if os.path.isfile(src):
|
||||
try:
|
||||
os.remove(src)
|
||||
print("Clearing the configuration file: %s" % src)
|
||||
except:
|
||||
pass
|
||||
elif os.path.isdir(src):
|
||||
for item in os.listdir(src):
|
||||
itemsrc = os.path.join(src, item)
|
||||
delfile(itemsrc)
|
52
make/photon/prepare/utils/nginx.py
Normal file
52
make/photon/prepare/utils/nginx.py
Normal file
@ -0,0 +1,52 @@
|
||||
import os, shutil
|
||||
from fnmatch import fnmatch
|
||||
|
||||
from g import config_dir, templates_dir
|
||||
from utils.misc import prepare_config_dir, mark_file
|
||||
from utils.jinja import render_jinja
|
||||
from utils.cert import SSL_CERT_KEY_PATH, SSL_CERT_PATH
|
||||
|
||||
nginx_conf = os.path.join(config_dir, "nginx", "nginx.conf")
|
||||
nginx_confd_dir = os.path.join(config_dir, "nginx", "conf.d")
|
||||
nginx_https_conf_template = os.path.join(templates_dir, "nginx", "nginx.https.conf.jinja")
|
||||
nginx_http_conf_template = os.path.join(templates_dir, "nginx", "nginx.http.conf.jinja")
|
||||
nginx_template_ext_dir = os.path.join(templates_dir, 'nginx', 'ext')
|
||||
|
||||
CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTPS = 'harbor.https.*.conf'
|
||||
CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTP = 'harbor.http.*.conf'
|
||||
|
||||
def prepare_nginx(config_dict):
|
||||
prepare_config_dir(nginx_confd_dir)
|
||||
render_nginx_template(config_dict)
|
||||
|
||||
def render_nginx_template(config_dict):
|
||||
if config_dict['protocol'] == "https":
|
||||
render_jinja(nginx_https_conf_template, nginx_conf,
|
||||
ssl_cert = SSL_CERT_PATH,
|
||||
ssl_cert_key = SSL_CERT_KEY_PATH)
|
||||
location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTPS
|
||||
else:
|
||||
render_jinja(nginx_http_conf_template, nginx_conf)
|
||||
location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTP
|
||||
copy_nginx_location_configs_if_exist(nginx_template_ext_dir, nginx_confd_dir, location_file_pattern)
|
||||
|
||||
def add_additional_location_config(src, dst):
|
||||
"""
|
||||
These conf files is used for user that wanna add additional customized locations to harbor proxy
|
||||
:params src: source of the file
|
||||
:params dst: destination file path
|
||||
"""
|
||||
if not os.path.isfile(src):
|
||||
return
|
||||
print("Copying nginx configuration file {src} to {dst}".format(
|
||||
src=src, dst=dst))
|
||||
shutil.copy2(src, dst)
|
||||
mark_file(dst, mode=0o644)
|
||||
|
||||
def copy_nginx_location_configs_if_exist(src_config_dir, dst_config_dir, filename_pattern):
|
||||
if not os.path.exists(src_config_dir):
|
||||
return
|
||||
map(lambda filename: add_additional_location_config(
|
||||
os.path.join(src_config_dir, filename),
|
||||
os.path.join(dst_config_dir, filename)),
|
||||
[f for f in os.listdir(src_config_dir) if fnmatch(f, filename_pattern)])
|
107
make/photon/prepare/utils/notary.py
Normal file
107
make/photon/prepare/utils/notary.py
Normal file
@ -0,0 +1,107 @@
|
||||
import os, shutil
|
||||
from g import base_dir, templates_dir, config_dir, root_crt, DEFAULT_UID, DEFAULT_GID
|
||||
from .cert import openssl_installed, create_cert, create_root_cert, get_alias
|
||||
from .jinja import render_jinja
|
||||
from .misc import mark_file, prepare_config_dir
|
||||
|
||||
notary_template_dir = os.path.join(templates_dir, "notary")
|
||||
notary_signer_pg_template = os.path.join(notary_template_dir, "signer-config.postgres.json.jinja")
|
||||
notary_server_pg_template = os.path.join(notary_template_dir, "server-config.postgres.json.jinja")
|
||||
notary_server_nginx_config_template = os.path.join(templates_dir, "nginx", "notary.server.conf.jinja")
|
||||
notary_signer_env_template = os.path.join(notary_template_dir, "signer_env.jinja")
|
||||
notary_server_env_template = os.path.join(notary_template_dir, "server_env.jinja")
|
||||
|
||||
notary_config_dir = os.path.join(config_dir, 'notary')
|
||||
notary_signer_pg_config = os.path.join(notary_config_dir, "signer-config.postgres.json")
|
||||
notary_server_pg_config = os.path.join(notary_config_dir, "server-config.postgres.json")
|
||||
notary_server_config_path = os.path.join(notary_config_dir, 'notary.server.conf')
|
||||
notary_signer_env_path = os.path.join(notary_config_dir, "signer_env")
|
||||
notary_server_env_path = os.path.join(notary_config_dir, "server_env")
|
||||
|
||||
|
||||
def prepare_env_notary(customize_crt, nginx_config_dir):
|
||||
notary_config_dir = prepare_config_dir(config_dir, "notary")
|
||||
if (customize_crt == 'on' or customize_crt == True) and openssl_installed():
|
||||
try:
|
||||
temp_cert_dir = os.path.join(base_dir, "cert_tmp")
|
||||
if not os.path.exists(temp_cert_dir):
|
||||
os.makedirs(temp_cert_dir)
|
||||
ca_subj = "/C=US/ST=California/L=Palo Alto/O=GoHarbor/OU=Harbor/CN=Self-signed by GoHarbor"
|
||||
cert_subj = "/C=US/ST=California/L=Palo Alto/O=GoHarbor/OU=Harbor/CN=notarysigner"
|
||||
signer_ca_cert = os.path.join(temp_cert_dir, "notary-signer-ca.crt")
|
||||
signer_ca_key = os.path.join(temp_cert_dir, "notary-signer-ca.key")
|
||||
signer_cert_path = os.path.join(temp_cert_dir, "notary-signer.crt")
|
||||
signer_key_path = os.path.join(temp_cert_dir, "notary-signer.key")
|
||||
create_root_cert(ca_subj, key_path=signer_ca_key, cert_path=signer_ca_cert)
|
||||
create_cert(cert_subj, signer_ca_key, signer_ca_cert, key_path=signer_key_path, cert_path=signer_cert_path)
|
||||
print("Copying certs for notary signer")
|
||||
shutil.copy2(signer_cert_path, notary_config_dir)
|
||||
shutil.copy2(signer_key_path, notary_config_dir)
|
||||
shutil.copy2(signer_ca_cert, notary_config_dir)
|
||||
finally:
|
||||
srl_tmp = os.path.join(os.getcwd(), ".srl")
|
||||
if os.path.isfile(srl_tmp):
|
||||
os.remove(srl_tmp)
|
||||
if os.path.isdir(temp_cert_dir):
|
||||
shutil.rmtree(temp_cert_dir, True)
|
||||
else:
|
||||
print("Copying certs for notary signer")
|
||||
shutil.copy2(os.path.join(notary_template_dir, "notary-signer.crt"), notary_config_dir)
|
||||
shutil.copy2(os.path.join(notary_template_dir, "notary-signer.key"), notary_config_dir)
|
||||
shutil.copy2(os.path.join(notary_template_dir, "notary-signer-ca.crt"), notary_config_dir)
|
||||
|
||||
shutil.copy2(root_crt, notary_config_dir)
|
||||
shutil.copy2(
|
||||
os.path.join(notary_template_dir, "server_env.jinja"),
|
||||
os.path.join(notary_config_dir, "server_env"))
|
||||
|
||||
print("Copying nginx configuration file for notary")
|
||||
notary_nginx_upstream_template_conf = os.path.join(templates_dir, "nginx", "notary.upstream.conf.jinja")
|
||||
notary_server_nginx_config = os.path.join(nginx_config_dir, "notary.server.conf")
|
||||
shutil.copy2(notary_nginx_upstream_template_conf, notary_server_nginx_config)
|
||||
|
||||
mark_file(os.path.join(notary_config_dir, "notary-signer.crt"))
|
||||
mark_file(os.path.join(notary_config_dir, "notary-signer.key"))
|
||||
mark_file(os.path.join(notary_config_dir, "notary-signer-ca.crt"))
|
||||
mark_file(os.path.join(notary_config_dir, "root.crt"))
|
||||
|
||||
# print("Copying sql file for notary DB")
|
||||
# if os.path.exists(os.path.join(notary_config_dir, "postgresql-initdb.d")):
|
||||
# shutil.rmtree(os.path.join(notary_config_dir, "postgresql-initdb.d"))
|
||||
# shutil.copytree(os.path.join(notary_temp_dir, "postgresql-initdb.d"), os.path.join(notary_config_dir, "postgresql-initdb.d"))
|
||||
|
||||
|
||||
def prepare_notary(config_dict, nginx_config_dir, ssl_cert_path, ssl_cert_key_path):
|
||||
|
||||
prepare_env_notary(config_dict['customize_crt'], nginx_config_dir)
|
||||
|
||||
render_jinja(
|
||||
notary_signer_pg_template,
|
||||
notary_signer_pg_config,
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID
|
||||
)
|
||||
|
||||
render_jinja(
|
||||
notary_server_pg_template,
|
||||
notary_server_pg_config,
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID,
|
||||
token_endpoint=config_dict['public_url'])
|
||||
|
||||
render_jinja(
|
||||
notary_server_nginx_config_template,
|
||||
os.path.join(nginx_config_dir, "notary.server.conf"),
|
||||
ssl_cert=ssl_cert_path,
|
||||
ssl_cert_key=ssl_cert_key_path)
|
||||
|
||||
default_alias = get_alias(config_dict['secretkey_path'])
|
||||
render_jinja(
|
||||
notary_signer_env_template,
|
||||
notary_signer_env_path,
|
||||
alias=default_alias)
|
||||
|
||||
render_jinja(
|
||||
notary_server_env_template,
|
||||
notary_server_env_path
|
||||
)
|
0
make/photon/prepare/utils/proxy.py
Normal file
0
make/photon/prepare/utils/proxy.py
Normal file
51
make/photon/prepare/utils/registry.py
Normal file
51
make/photon/prepare/utils/registry.py
Normal file
@ -0,0 +1,51 @@
|
||||
import os, shutil
|
||||
|
||||
from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID
|
||||
from utils.misc import prepare_config_dir
|
||||
from utils.jinja import render_jinja
|
||||
|
||||
|
||||
registry_config_dir = os.path.join(config_dir, "registry")
|
||||
registry_config_template_path = os.path.join(templates_dir, "registry", "config.yml.jinja")
|
||||
registry_conf = os.path.join(config_dir, "registry", "config.yml")
|
||||
|
||||
|
||||
def prepare_registry(config_dict):
|
||||
prepare_registry_config_dir()
|
||||
|
||||
storage_provider_info = get_storage_provider_info(
|
||||
config_dict['storage_provider_name'],
|
||||
config_dict['storage_provider_config'],
|
||||
registry_config_dir)
|
||||
|
||||
render_jinja(
|
||||
registry_config_template_path,
|
||||
registry_conf,
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID,
|
||||
storage_provider_info=storage_provider_info,
|
||||
**config_dict)
|
||||
|
||||
def prepare_registry_config_dir():
|
||||
prepare_config_dir(registry_config_dir)
|
||||
|
||||
def get_storage_provider_info(provider_name, provider_config, registry_config_dir_path):
|
||||
if provider_name == "filesystem":
|
||||
if not provider_config:
|
||||
storage_provider_config = "rootdirectory: /storage"
|
||||
elif "rootdirectory:" not in storage_provider_config:
|
||||
storage_provider_config = "rootdirectory: /storage" + "," + storage_provider_config
|
||||
# generate storage configuration section in yaml format
|
||||
storage_provider_conf_list = [provider_name + ':']
|
||||
for c in storage_provider_config.split(","):
|
||||
kvs = c.split(": ")
|
||||
if len(kvs) == 2:
|
||||
if kvs[0].strip() == "keyfile":
|
||||
srcKeyFile = kvs[1].strip()
|
||||
if os.path.isfile(srcKeyFile):
|
||||
shutil.copyfile(srcKeyFile, os.path.join(registry_config_dir_path, "gcs.key"))
|
||||
storage_provider_conf_list.append("keyfile: %s" % "/etc/registry/gcs.key")
|
||||
continue
|
||||
storage_provider_conf_list.append(c.strip())
|
||||
storage_provider_info = ('\n' + ' ' * 4).join(storage_provider_conf_list)
|
||||
return storage_provider_info
|
30
make/photon/prepare/utils/registry_ctl.py
Normal file
30
make/photon/prepare/utils/registry_ctl.py
Normal file
@ -0,0 +1,30 @@
|
||||
import os, shutil
|
||||
|
||||
from g import config_dir, templates_dir
|
||||
from utils.misc import prepare_config_dir
|
||||
from utils.jinja import render_jinja
|
||||
|
||||
registryctl_config_dir = os.path.join(config_dir, "registryctl")
|
||||
registryctl_config_template_path = os.path.join(templates_dir, "registryctl", "config.yml.jinja")
|
||||
registryctl_conf = os.path.join(config_dir, "registryctl", "config.yml")
|
||||
registryctl_env_template_path = os.path.join(templates_dir, "registryctl", "env.jinja")
|
||||
registryctl_conf_env = os.path.join(config_dir, "registryctl", "env")
|
||||
|
||||
def prepare_registry_ctl(config_dict):
|
||||
# prepare dir
|
||||
prepare_registry_ctl_config_dir()
|
||||
|
||||
# Render Registryctl
|
||||
render_jinja(
|
||||
registryctl_env_template_path,
|
||||
registryctl_conf_env,
|
||||
**config_dict)
|
||||
|
||||
# Copy Registryctl config
|
||||
copy_registry_ctl_conf(registryctl_config_template_path, registryctl_conf)
|
||||
|
||||
def prepare_registry_ctl_config_dir():
|
||||
prepare_config_dir(registryctl_config_dir)
|
||||
|
||||
def copy_registry_ctl_conf(src, dst):
|
||||
shutil.copyfile(src, dst)
|
11
make/photon/prepare/utils/uaa.py
Normal file
11
make/photon/prepare/utils/uaa.py
Normal file
@ -0,0 +1,11 @@
|
||||
import os, shutil
|
||||
|
||||
def prepare_uaa_cert_file(uaa_ca_cert, core_cert_dir):
|
||||
if os.path.isfile(uaa_ca_cert):
|
||||
if not os.path.isdir(core_cert_dir):
|
||||
os.makedirs(core_cert_dir)
|
||||
core_uaa_ca = os.path.join(core_cert_dir, "uaa_ca.pem")
|
||||
print("Copying UAA CA cert to %s" % core_uaa_ca)
|
||||
shutil.copyfile(uaa_ca_cert, core_uaa_ca)
|
||||
else:
|
||||
print("Can not find UAA CA cert: %s, skip" % uaa_ca_cert)
|
754
make/prepare
754
make/prepare
@ -1,750 +1,10 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import print_function, unicode_literals # We require Python 2.6 or later
|
||||
from string import Template
|
||||
import random
|
||||
import os
|
||||
from fnmatch import fnmatch
|
||||
import sys
|
||||
import string
|
||||
import argparse
|
||||
import subprocess
|
||||
import shutil
|
||||
from io import open
|
||||
#!/bin/bash
|
||||
|
||||
if sys.version_info[:3][0] == 2:
|
||||
import ConfigParser as ConfigParser
|
||||
import StringIO as StringIO
|
||||
host_make_path="$( cd "$(dirname "$0")" ; pwd -P )"
|
||||
|
||||
if sys.version_info[:3][0] == 3:
|
||||
import configparser as ConfigParser
|
||||
import io as StringIO
|
||||
echo host make path is set to ${host_make_path}
|
||||
data_path=$(grep '^[^#]*data_volume' ${host_make_path}/harbor.yml | awk '{print $NF}')
|
||||
log_path=$(grep '^[^#]*log_location' ${host_make_path}/harbor.yml | awk '{print $NF}')
|
||||
secretkey_path=$(grep '^[^#]*secretkey_path' ${host_make_path}/harbor.yml | awk '{print $NF}')
|
||||
|
||||
DATA_VOL = "/data"
|
||||
DEFAULT_UID = 10000
|
||||
DEFAULT_GID = 10000
|
||||
|
||||
base_dir = os.path.dirname(__file__)
|
||||
config_dir = os.path.join(base_dir, "common/config")
|
||||
templates_dir = os.path.join(base_dir, "common/templates")
|
||||
|
||||
custom_nginx_location_file_pattern = 'harbor.https.*.conf'
|
||||
|
||||
def validate(conf, args):
|
||||
|
||||
protocol = rcp.get("configuration", "ui_url_protocol")
|
||||
if protocol != "https" and args.notary_mode:
|
||||
raise Exception("Error: the protocol must be https when Harbor is deployed with Notary")
|
||||
if protocol == "https":
|
||||
if not rcp.has_option("configuration", "ssl_cert"):
|
||||
raise Exception("Error: The protocol is https but attribute ssl_cert is not set")
|
||||
cert_path = rcp.get("configuration", "ssl_cert")
|
||||
if not os.path.isfile(cert_path):
|
||||
raise Exception("Error: The path for certificate: %s is invalid" % cert_path)
|
||||
if not rcp.has_option("configuration", "ssl_cert_key"):
|
||||
raise Exception("Error: The protocol is https but attribute ssl_cert_key is not set")
|
||||
cert_key_path = rcp.get("configuration", "ssl_cert_key")
|
||||
if not os.path.isfile(cert_key_path):
|
||||
raise Exception("Error: The path for certificate key: %s is invalid" % cert_key_path)
|
||||
project_creation = rcp.get("configuration", "project_creation_restriction")
|
||||
|
||||
if project_creation != "everyone" and project_creation != "adminonly":
|
||||
raise Exception("Error invalid value for project_creation_restriction: %s" % project_creation)
|
||||
|
||||
valid_storage_drivers = ["filesystem", "azure", "gcs", "s3", "swift", "oss"]
|
||||
storage_provider_name = rcp.get("configuration", "registry_storage_provider_name").strip()
|
||||
if storage_provider_name not in valid_storage_drivers:
|
||||
raise Exception("Error: storage driver %s is not supported, only the following ones are supported: %s" % (storage_provider_name, ",".join(valid_storage_drivers)))
|
||||
|
||||
storage_provider_config = rcp.get("configuration", "registry_storage_provider_config").strip()
|
||||
if storage_provider_name != "filesystem":
|
||||
if storage_provider_config == "":
|
||||
raise Exception("Error: no provider configurations are provided for provider %s" % storage_provider_name)
|
||||
|
||||
redis_host = rcp.get("configuration", "redis_host")
|
||||
if redis_host is None or len(redis_host) < 1:
|
||||
raise Exception("Error: redis_host in harbor.cfg needs to point to an endpoint of Redis server or cluster.")
|
||||
|
||||
redis_port = rcp.get("configuration", "redis_port")
|
||||
if len(redis_port) < 1:
|
||||
raise Exception("Error: redis_port in harbor.cfg needs to point to the port of Redis server or cluster.")
|
||||
|
||||
redis_db_index = rcp.get("configuration", "redis_db_index").strip()
|
||||
if len(redis_db_index.split(",")) != 3:
|
||||
raise Exception("Error invalid value for redis_db_index: %s. please set it as 1,2,3" % redis_db_index)
|
||||
|
||||
#To meet security requirement
|
||||
#By default it will change file mode to 0600, and make the owner of the file to 10000:10000
|
||||
def mark_file(path, mode=0o600, uid=DEFAULT_UID, gid=DEFAULT_GID):
|
||||
if mode > 0:
|
||||
os.chmod(path, mode)
|
||||
if uid > 0 and gid > 0:
|
||||
os.chown(path, uid, gid)
|
||||
|
||||
def get_secret_key(path):
|
||||
secret_key = _get_secret(path, "secretkey")
|
||||
if len(secret_key) != 16:
|
||||
raise Exception("secret key's length has to be 16 chars, current length: %d" % len(secret_key))
|
||||
return secret_key
|
||||
|
||||
def get_alias(path):
|
||||
alias = _get_secret(path, "defaultalias", length=8)
|
||||
return alias
|
||||
|
||||
def _get_secret(folder, filename, length=16):
|
||||
key_file = os.path.join(folder, filename)
|
||||
if os.path.isfile(key_file):
|
||||
with open(key_file, 'r') as f:
|
||||
key = f.read()
|
||||
print("loaded secret from file: %s" % key_file)
|
||||
mark_file(key_file)
|
||||
return key
|
||||
if not os.path.isdir(folder):
|
||||
os.makedirs(folder)
|
||||
key = ''.join(random.choice(string.ascii_letters+string.digits) for i in range(length))
|
||||
with open(key_file, 'w') as f:
|
||||
f.write(key)
|
||||
print("Generated and saved secret to file: %s" % key_file)
|
||||
mark_file(key_file)
|
||||
return key
|
||||
|
||||
def prep_conf_dir(root, *name):
|
||||
absolute_path = os.path.join(root, *name)
|
||||
if not os.path.exists(absolute_path):
|
||||
os.makedirs(absolute_path)
|
||||
return absolute_path
|
||||
|
||||
def render(src, dest, mode=0o640, uid=0, gid=0, **kw):
|
||||
t = Template(open(src, 'r').read())
|
||||
with open(dest, 'w') as f:
|
||||
f.write(t.substitute(**kw))
|
||||
mark_file(dest, mode, uid, gid)
|
||||
print("Generated configuration file: %s" % dest)
|
||||
|
||||
def delfile(src):
|
||||
if os.path.isfile(src):
|
||||
try:
|
||||
os.remove(src)
|
||||
print("Clearing the configuration file: %s" % src)
|
||||
except:
|
||||
pass
|
||||
elif os.path.isdir(src):
|
||||
for item in os.listdir(src):
|
||||
itemsrc=os.path.join(src,item)
|
||||
delfile(itemsrc)
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--conf', dest='cfgfile', default=base_dir+'/harbor.cfg',type=str,help="the path of Harbor configuration file")
|
||||
parser.add_argument('--with-notary', dest='notary_mode', default=False, action='store_true', help="the Harbor instance is to be deployed with notary")
|
||||
parser.add_argument('--with-clair', dest='clair_mode', default=False, action='store_true', help="the Harbor instance is to be deployed with clair")
|
||||
parser.add_argument('--with-chartmuseum', dest='chart_mode', default=False, action='store_true', help="the Harbor instance is to be deployed with chart repository supporting")
|
||||
args = parser.parse_args()
|
||||
|
||||
delfile(config_dir)
|
||||
#Read configurations
|
||||
conf = StringIO.StringIO()
|
||||
conf.write("[configuration]\n")
|
||||
conf.write(open(args.cfgfile).read())
|
||||
conf.seek(0, os.SEEK_SET)
|
||||
rcp = ConfigParser.RawConfigParser()
|
||||
rcp.readfp(conf)
|
||||
validate(rcp, args)
|
||||
|
||||
reload_config = rcp.get("configuration", "reload_config") if rcp.has_option(
|
||||
"configuration", "reload_config") else "false"
|
||||
hostname = rcp.get("configuration", "hostname")
|
||||
protocol = rcp.get("configuration", "ui_url_protocol")
|
||||
public_url = protocol + "://" + hostname
|
||||
email_identity = rcp.get("configuration", "email_identity")
|
||||
email_host = rcp.get("configuration", "email_server")
|
||||
email_port = rcp.get("configuration", "email_server_port")
|
||||
email_usr = rcp.get("configuration", "email_username")
|
||||
email_pwd = rcp.get("configuration", "email_password")
|
||||
email_from = rcp.get("configuration", "email_from")
|
||||
email_ssl = rcp.get("configuration", "email_ssl")
|
||||
email_insecure = rcp.get("configuration", "email_insecure")
|
||||
harbor_admin_password = rcp.get("configuration", "harbor_admin_password")
|
||||
auth_mode = rcp.get("configuration", "auth_mode")
|
||||
ldap_url = rcp.get("configuration", "ldap_url")
|
||||
# this two options are either both set or unset
|
||||
if rcp.has_option("configuration", "ldap_searchdn"):
|
||||
ldap_searchdn = rcp.get("configuration", "ldap_searchdn")
|
||||
ldap_search_pwd = rcp.get("configuration", "ldap_search_pwd")
|
||||
else:
|
||||
ldap_searchdn = ""
|
||||
ldap_search_pwd = ""
|
||||
ldap_basedn = rcp.get("configuration", "ldap_basedn")
|
||||
# ldap_filter is null by default
|
||||
if rcp.has_option("configuration", "ldap_filter"):
|
||||
ldap_filter = rcp.get("configuration", "ldap_filter")
|
||||
else:
|
||||
ldap_filter = ""
|
||||
ldap_uid = rcp.get("configuration", "ldap_uid")
|
||||
ldap_scope = rcp.get("configuration", "ldap_scope")
|
||||
ldap_timeout = rcp.get("configuration", "ldap_timeout")
|
||||
ldap_verify_cert = rcp.get("configuration", "ldap_verify_cert")
|
||||
ldap_group_basedn = rcp.get("configuration", "ldap_group_basedn")
|
||||
ldap_group_filter = rcp.get("configuration", "ldap_group_filter")
|
||||
ldap_group_gid = rcp.get("configuration", "ldap_group_gid")
|
||||
ldap_group_scope = rcp.get("configuration", "ldap_group_scope")
|
||||
db_password = rcp.get("configuration", "db_password")
|
||||
db_host = rcp.get("configuration", "db_host")
|
||||
db_user = rcp.get("configuration", "db_user")
|
||||
db_port = rcp.get("configuration", "db_port")
|
||||
self_registration = rcp.get("configuration", "self_registration")
|
||||
if protocol == "https":
|
||||
cert_path = rcp.get("configuration", "ssl_cert")
|
||||
cert_key_path = rcp.get("configuration", "ssl_cert_key")
|
||||
customize_crt = rcp.get("configuration", "customize_crt")
|
||||
max_job_workers = rcp.get("configuration", "max_job_workers")
|
||||
token_expiration = rcp.get("configuration", "token_expiration")
|
||||
proj_cre_restriction = rcp.get("configuration", "project_creation_restriction")
|
||||
secretkey_path = rcp.get("configuration", "secretkey_path")
|
||||
if rcp.has_option("configuration", "admiral_url"):
|
||||
admiral_url = rcp.get("configuration", "admiral_url")
|
||||
else:
|
||||
admiral_url = ""
|
||||
clair_db_password = rcp.get("configuration", "clair_db_password")
|
||||
clair_db_host = rcp.get("configuration", "clair_db_host")
|
||||
clair_db_port = rcp.get("configuration", "clair_db_port")
|
||||
clair_db_username = rcp.get("configuration", "clair_db_username")
|
||||
clair_db = rcp.get("configuration", "clair_db")
|
||||
clair_updaters_interval = rcp.get("configuration", "clair_updaters_interval")
|
||||
|
||||
uaa_endpoint = rcp.get("configuration", "uaa_endpoint")
|
||||
uaa_clientid = rcp.get("configuration", "uaa_clientid")
|
||||
uaa_clientsecret = rcp.get("configuration", "uaa_clientsecret")
|
||||
uaa_verify_cert = rcp.get("configuration", "uaa_verify_cert")
|
||||
uaa_ca_cert = rcp.get("configuration", "uaa_ca_cert")
|
||||
|
||||
secret_key = get_secret_key(secretkey_path)
|
||||
log_rotate_count = rcp.get("configuration", "log_rotate_count")
|
||||
log_rotate_size = rcp.get("configuration", "log_rotate_size")
|
||||
|
||||
redis_host = rcp.get("configuration", "redis_host")
|
||||
redis_port = rcp.get("configuration", "redis_port")
|
||||
redis_password = rcp.get("configuration", "redis_password")
|
||||
redis_db_index = rcp.get("configuration", "redis_db_index")
|
||||
|
||||
db_indexs = redis_db_index.split(',')
|
||||
redis_db_index_reg = db_indexs[0]
|
||||
redis_db_index_js = db_indexs[1]
|
||||
redis_db_index_chart = db_indexs[2]
|
||||
|
||||
#redis://[arbitrary_username:password@]ipaddress:port/database_index
|
||||
redis_url_js = ''
|
||||
redis_url_reg = ''
|
||||
if len(redis_password) > 0:
|
||||
redis_url_js = "redis://anonymous:%s@%s:%s/%s" % (redis_password, redis_host, redis_port, redis_db_index_js)
|
||||
redis_url_reg = "redis://anonymous:%s@%s:%s/%s" % (redis_password, redis_host, redis_port, redis_db_index_reg)
|
||||
else:
|
||||
redis_url_js = "redis://%s:%s/%s" % (redis_host, redis_port, redis_db_index_js)
|
||||
redis_url_reg = "redis://%s:%s/%s" % (redis_host, redis_port, redis_db_index_reg)
|
||||
|
||||
if rcp.has_option("configuration", "skip_reload_env_pattern"):
|
||||
skip_reload_env_pattern = rcp.get("configuration", "skip_reload_env_pattern")
|
||||
else:
|
||||
skip_reload_env_pattern = "$^"
|
||||
storage_provider_name = rcp.get("configuration", "registry_storage_provider_name").strip()
|
||||
storage_provider_config = rcp.get("configuration", "registry_storage_provider_config").strip()
|
||||
# yaml requires 1 or more spaces between the key and value
|
||||
storage_provider_config = storage_provider_config.replace(":", ": ", 1)
|
||||
registry_custom_ca_bundle_path = rcp.get("configuration", "registry_custom_ca_bundle").strip()
|
||||
core_secret = ''.join(random.choice(string.ascii_letters+string.digits) for i in range(16))
|
||||
jobservice_secret = ''.join(random.choice(string.ascii_letters+string.digits) for i in range(16))
|
||||
|
||||
core_config_dir = prep_conf_dir(config_dir,"core")
|
||||
core_certificates_dir = prep_conf_dir(core_config_dir,"certificates")
|
||||
db_config_dir = prep_conf_dir(config_dir, "db")
|
||||
job_config_dir = prep_conf_dir(config_dir, "jobservice")
|
||||
registry_config_dir = prep_conf_dir(config_dir, "registry")
|
||||
registryctl_config_dir = prep_conf_dir(config_dir, "registryctl")
|
||||
nginx_config_dir = prep_conf_dir (config_dir, "nginx")
|
||||
nginx_conf_d = prep_conf_dir(nginx_config_dir, "conf.d")
|
||||
log_config_dir = prep_conf_dir (config_dir, "log")
|
||||
|
||||
conf_env = os.path.join(config_dir, "core", "config_env")
|
||||
core_conf_env = os.path.join(config_dir, "core", "env")
|
||||
core_conf = os.path.join(config_dir, "core", "app.conf")
|
||||
core_cert_dir = os.path.join(config_dir, "core", "certificates")
|
||||
jobservice_conf = os.path.join(config_dir, "jobservice", "config.yml")
|
||||
registry_conf = os.path.join(config_dir, "registry", "config.yml")
|
||||
registryctl_conf_env = os.path.join(config_dir, "registryctl", "env")
|
||||
registryctl_conf_yml = os.path.join(config_dir, "registryctl", "config.yml")
|
||||
db_conf_env = os.path.join(config_dir, "db", "env")
|
||||
job_conf_env = os.path.join(config_dir, "jobservice", "env")
|
||||
nginx_conf = os.path.join(config_dir, "nginx", "nginx.conf")
|
||||
cert_dir = os.path.join(config_dir, "nginx", "cert")
|
||||
log_rotate_config = os.path.join(config_dir, "log", "logrotate.conf")
|
||||
registry_url = "http://registry:5000"
|
||||
registry_controller_url = "http://registryctl:8080"
|
||||
core_url = "http://core:8080"
|
||||
token_service_url = "http://core:8080/service/token"
|
||||
|
||||
jobservice_url = "http://jobservice:8080"
|
||||
clair_url = "http://clair:6060"
|
||||
notary_url = "http://notary-server:4443"
|
||||
chart_repository_url = "http://chartmuseum:9999"
|
||||
|
||||
if len(admiral_url) != 0 and admiral_url != "NA":
|
||||
#VIC overwrites the data volume path, which by default should be same as the value of secretkey_path
|
||||
DATA_VOL = secretkey_path
|
||||
JOB_LOG_DIR = os.path.join(DATA_VOL, "job_logs")
|
||||
if not os.path.exists(JOB_LOG_DIR):
|
||||
os.makedirs(JOB_LOG_DIR)
|
||||
mark_file(JOB_LOG_DIR, mode=0o755)
|
||||
|
||||
if protocol == "https":
|
||||
target_cert_path = os.path.join(cert_dir, os.path.basename(cert_path))
|
||||
if not os.path.exists(cert_dir):
|
||||
os.makedirs(cert_dir)
|
||||
shutil.copy2(cert_path,target_cert_path)
|
||||
target_cert_key_path = os.path.join(cert_dir, os.path.basename(cert_key_path))
|
||||
shutil.copy2(cert_key_path,target_cert_key_path)
|
||||
render(os.path.join(templates_dir, "nginx", "nginx.https.conf"),
|
||||
nginx_conf,
|
||||
ssl_cert = os.path.join("/etc/nginx/cert", os.path.basename(target_cert_path)),
|
||||
ssl_cert_key = os.path.join("/etc/nginx/cert", os.path.basename(target_cert_key_path)))
|
||||
else:
|
||||
render(os.path.join(templates_dir, "nginx", "nginx.http.conf"), nginx_conf)
|
||||
custom_nginx_location_file_pattern = 'harbor.http.*.conf'
|
||||
|
||||
def add_additional_location_config(src, dst):
|
||||
"""
|
||||
This conf file is used for user that wanna add additional customized locations to harbor proxy
|
||||
:params src: source of the file
|
||||
:params dst: destination file path
|
||||
"""
|
||||
if not os.path.isfile(src):
|
||||
return
|
||||
print("Copying nginx configuration file {src} to {dst}".format(src=src, dst=dst))
|
||||
shutil.copy2(src, dst)
|
||||
mark_file(dst)
|
||||
|
||||
nginx_template_ext_dir = os.path.join(templates_dir, 'nginx', 'ext')
|
||||
if os.path.exists(nginx_template_ext_dir):
|
||||
map(lambda filename: add_additional_location_config(
|
||||
os.path.join(nginx_template_ext_dir, filename),
|
||||
os.path.join(nginx_conf_d, filename)),
|
||||
[fname for fname in os.listdir(nginx_template_ext_dir) if fnmatch(fname, custom_nginx_location_file_pattern)])
|
||||
|
||||
#Use reload_key to avoid reload config after restart harbor
|
||||
reload_key = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) if reload_config == "true" else ""
|
||||
|
||||
ldap_group_admin_dn = rcp.get("configuration", "ldap_group_admin_dn") if rcp.has_option("configuration", "ldap_group_admin_dn") else ""
|
||||
|
||||
render(os.path.join(templates_dir, "core", "config_env"),
|
||||
conf_env,
|
||||
reload_config=reload_config,
|
||||
public_url=public_url,
|
||||
core_url=core_url,
|
||||
auth_mode=auth_mode,
|
||||
self_registration=self_registration,
|
||||
ldap_url=ldap_url,
|
||||
ldap_searchdn =ldap_searchdn,
|
||||
ldap_search_pwd =ldap_search_pwd,
|
||||
ldap_basedn=ldap_basedn,
|
||||
ldap_filter=ldap_filter,
|
||||
ldap_uid=ldap_uid,
|
||||
ldap_scope=ldap_scope,
|
||||
ldap_verify_cert=ldap_verify_cert,
|
||||
ldap_timeout=ldap_timeout,
|
||||
ldap_group_basedn=ldap_group_basedn,
|
||||
ldap_group_filter=ldap_group_filter,
|
||||
ldap_group_gid=ldap_group_gid,
|
||||
ldap_group_scope=ldap_group_scope,
|
||||
ldap_group_admin_dn=ldap_group_admin_dn,
|
||||
db_password=db_password,
|
||||
db_host=db_host,
|
||||
db_user=db_user,
|
||||
db_port=db_port,
|
||||
email_host=email_host,
|
||||
email_port=email_port,
|
||||
email_usr=email_usr,
|
||||
email_pwd=email_pwd,
|
||||
email_ssl=email_ssl,
|
||||
email_insecure=email_insecure,
|
||||
email_from=email_from,
|
||||
email_identity=email_identity,
|
||||
harbor_admin_password=harbor_admin_password,
|
||||
project_creation_restriction=proj_cre_restriction,
|
||||
max_job_workers=max_job_workers,
|
||||
core_secret=core_secret,
|
||||
jobservice_secret=jobservice_secret,
|
||||
token_expiration=token_expiration,
|
||||
admiral_url=admiral_url,
|
||||
with_notary=args.notary_mode,
|
||||
with_clair=args.clair_mode,
|
||||
clair_db_password=clair_db_password,
|
||||
clair_db_host=clair_db_host,
|
||||
clair_db_port=clair_db_port,
|
||||
clair_db_username=clair_db_username,
|
||||
clair_db=clair_db,
|
||||
uaa_endpoint=uaa_endpoint,
|
||||
uaa_clientid=uaa_clientid,
|
||||
uaa_clientsecret=uaa_clientsecret,
|
||||
uaa_verify_cert=uaa_verify_cert,
|
||||
storage_provider_name=storage_provider_name,
|
||||
registry_url=registry_url,
|
||||
token_service_url=token_service_url,
|
||||
jobservice_url=jobservice_url,
|
||||
clair_url=clair_url,
|
||||
notary_url=notary_url,
|
||||
reload_key=reload_key,
|
||||
skip_reload_env_pattern=skip_reload_env_pattern,
|
||||
chart_repository_url=chart_repository_url,
|
||||
registry_controller_url = registry_controller_url,
|
||||
with_chartmuseum=args.chart_mode
|
||||
)
|
||||
|
||||
# set cache for chart repo server
|
||||
# default set 'memory' mode, if redis is configured then set to 'redis'
|
||||
chart_cache_driver = "memory"
|
||||
if len(redis_host) > 0:
|
||||
chart_cache_driver = "redis"
|
||||
|
||||
render(os.path.join(templates_dir, "core", "env"),
|
||||
core_conf_env,
|
||||
core_secret=core_secret,
|
||||
jobservice_secret=jobservice_secret,
|
||||
redis_host=redis_host,
|
||||
redis_port=redis_port,
|
||||
redis_password=redis_password,
|
||||
chart_cache_driver = chart_cache_driver,
|
||||
redis_url_reg = redis_url_reg)
|
||||
|
||||
registry_config_file = "config.yml"
|
||||
if storage_provider_name == "filesystem":
|
||||
if not storage_provider_config:
|
||||
storage_provider_config = "rootdirectory: /storage"
|
||||
elif "rootdirectory:" not in storage_provider_config:
|
||||
storage_provider_config = "rootdirectory: /storage" + "," + storage_provider_config
|
||||
# generate storage configuration section in yaml format
|
||||
storage_provider_conf_list = [storage_provider_name + ':']
|
||||
for c in storage_provider_config.split(","):
|
||||
kvs = c.split(": ")
|
||||
if len(kvs) == 2:
|
||||
if kvs[0].strip() == "keyfile":
|
||||
srcKeyFile = kvs[1].strip()
|
||||
if os.path.isfile(srcKeyFile):
|
||||
shutil.copyfile(srcKeyFile, os.path.join(registry_config_dir, "gcs.key"))
|
||||
storage_provider_conf_list.append("keyfile: %s" % "/etc/registry/gcs.key")
|
||||
continue
|
||||
storage_provider_conf_list.append(c.strip())
|
||||
storage_provider_info = ('\n' + ' ' * 4).join(storage_provider_conf_list)
|
||||
render(os.path.join(templates_dir, "registry", registry_config_file),
|
||||
registry_conf,
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID,
|
||||
storage_provider_info=storage_provider_info,
|
||||
public_url=public_url,
|
||||
core_url=core_url,
|
||||
redis_host=redis_host,
|
||||
redis_port=redis_port,
|
||||
redis_password=redis_password,
|
||||
redis_db_index_reg=redis_db_index_reg)
|
||||
|
||||
render(os.path.join(templates_dir, "db", "env"),
|
||||
db_conf_env,
|
||||
db_password=db_password)
|
||||
|
||||
render(os.path.join(templates_dir, "jobservice", "env"),
|
||||
job_conf_env,
|
||||
core_secret=core_secret,
|
||||
jobservice_secret=jobservice_secret,
|
||||
core_url=core_url)
|
||||
|
||||
render(os.path.join(templates_dir, "jobservice", "config.yml"),
|
||||
jobservice_conf,
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID,
|
||||
max_job_workers=max_job_workers,
|
||||
redis_url=redis_url_js)
|
||||
|
||||
render(os.path.join(templates_dir, "log", "logrotate.conf"),
|
||||
log_rotate_config,
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID,
|
||||
log_rotate_count=log_rotate_count,
|
||||
log_rotate_size=log_rotate_size)
|
||||
|
||||
render(os.path.join(templates_dir, "registryctl", "env"),
|
||||
registryctl_conf_env,
|
||||
jobservice_secret=jobservice_secret,
|
||||
core_secret=core_secret)
|
||||
|
||||
shutil.copyfile(os.path.join(templates_dir, "core", "app.conf"), core_conf)
|
||||
shutil.copyfile(os.path.join(templates_dir, "registryctl", "config.yml"), registryctl_conf_yml)
|
||||
print("Generated configuration file: %s" % core_conf)
|
||||
|
||||
if auth_mode == "uaa_auth":
|
||||
if os.path.isfile(uaa_ca_cert):
|
||||
if not os.path.isdir(core_cert_dir):
|
||||
os.makedirs(core_cert_dir)
|
||||
core_uaa_ca = os.path.join(core_cert_dir, "uaa_ca.pem")
|
||||
print("Copying UAA CA cert to %s" % core_uaa_ca)
|
||||
shutil.copyfile(uaa_ca_cert, core_uaa_ca)
|
||||
else:
|
||||
print("Can not find UAA CA cert: %s, skip" % uaa_ca_cert)
|
||||
|
||||
|
||||
def validate_crt_subj(dirty_subj):
|
||||
subj_list = [item for item in dirty_subj.strip().split("/") \
|
||||
if len(item.split("=")) == 2 and len(item.split("=")[1]) > 0]
|
||||
return "/" + "/".join(subj_list)
|
||||
|
||||
FNULL = open(os.devnull, 'w')
|
||||
|
||||
from functools import wraps
|
||||
def stat_decorator(func):
|
||||
@wraps(func)
|
||||
def check_wrapper(*args, **kw):
|
||||
stat = func(*args, **kw)
|
||||
message = "Generated certificate, key file: %s, cert file: %s" % (kw['key_path'], kw['cert_path']) \
|
||||
if stat == 0 else "Fail to generate key file: %s, cert file: %s" % (kw['key_path'], kw['cert_path'])
|
||||
print(message)
|
||||
if stat != 0:
|
||||
sys.exit(1)
|
||||
return check_wrapper
|
||||
|
||||
@stat_decorator
|
||||
def create_root_cert(subj, key_path="./k.key", cert_path="./cert.crt"):
|
||||
rc = subprocess.call(["openssl", "genrsa", "-out", key_path, "4096"], stdout=FNULL, stderr=subprocess.STDOUT)
|
||||
if rc != 0:
|
||||
return rc
|
||||
return subprocess.call(["openssl", "req", "-new", "-x509", "-key", key_path,\
|
||||
"-out", cert_path, "-days", "3650", "-subj", subj], stdout=FNULL, stderr=subprocess.STDOUT)
|
||||
|
||||
@stat_decorator
|
||||
def create_cert(subj, ca_key, ca_cert, key_path="./k.key", cert_path="./cert.crt"):
|
||||
cert_dir = os.path.dirname(cert_path)
|
||||
csr_path = os.path.join(cert_dir, "tmp.csr")
|
||||
rc = subprocess.call(["openssl", "req", "-newkey", "rsa:4096", "-nodes","-sha256","-keyout", key_path,\
|
||||
"-out", csr_path, "-subj", subj], stdout=FNULL, stderr=subprocess.STDOUT)
|
||||
if rc != 0:
|
||||
return rc
|
||||
return subprocess.call(["openssl", "x509", "-req", "-days", "3650", "-in", csr_path, "-CA", \
|
||||
ca_cert, "-CAkey", ca_key, "-CAcreateserial", "-out", cert_path], stdout=FNULL, stderr=subprocess.STDOUT)
|
||||
|
||||
def openssl_installed():
|
||||
shell_stat = subprocess.check_call(["which", "openssl"], stdout=FNULL, stderr=subprocess.STDOUT)
|
||||
if shell_stat != 0:
|
||||
print("Cannot find openssl installed in this computer\nUse default SSL certificate file")
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
if customize_crt == 'on' and openssl_installed():
|
||||
shell_stat = subprocess.check_call(["which", "openssl"], stdout=FNULL, stderr=subprocess.STDOUT)
|
||||
empty_subj = "/"
|
||||
private_key_pem = os.path.join(config_dir, "core", "private_key.pem")
|
||||
root_crt = os.path.join(config_dir, "registry", "root.crt")
|
||||
create_root_cert(empty_subj, key_path=private_key_pem, cert_path=root_crt)
|
||||
mark_file(private_key_pem)
|
||||
mark_file(root_crt)
|
||||
else:
|
||||
print("Copied configuration file: %s" % core_config_dir + "private_key.pem")
|
||||
shutil.copyfile(os.path.join(templates_dir, "core", "private_key.pem"), os.path.join(core_config_dir, "private_key.pem"))
|
||||
print("Copied configuration file: %s" % registry_config_dir + "root.crt")
|
||||
shutil.copyfile(os.path.join(templates_dir, "registry", "root.crt"), os.path.join(registry_config_dir, "root.crt"))
|
||||
|
||||
if len(registry_custom_ca_bundle_path) > 0 and os.path.isfile(registry_custom_ca_bundle_path):
|
||||
shutil.copyfile(registry_custom_ca_bundle_path, os.path.join(config_dir, "custom-ca-bundle.crt"))
|
||||
print("Copied custom ca bundle: %s" % os.path.join(config_dir, "custom-ca-bundle.crt"))
|
||||
|
||||
if args.notary_mode:
|
||||
notary_config_dir = prep_conf_dir(config_dir, "notary")
|
||||
notary_temp_dir = os.path.join(templates_dir, "notary")
|
||||
print("Copying sql file for notary DB")
|
||||
# if os.path.exists(os.path.join(notary_config_dir, "postgresql-initdb.d")):
|
||||
# shutil.rmtree(os.path.join(notary_config_dir, "postgresql-initdb.d"))
|
||||
# shutil.copytree(os.path.join(notary_temp_dir, "postgresql-initdb.d"), os.path.join(notary_config_dir, "postgresql-initdb.d"))
|
||||
if customize_crt == 'on' and openssl_installed():
|
||||
try:
|
||||
temp_cert_dir = os.path.join(base_dir, "cert_tmp")
|
||||
if not os.path.exists(temp_cert_dir):
|
||||
os.makedirs(temp_cert_dir)
|
||||
ca_subj = "/C=US/ST=California/L=Palo Alto/O=GoHarbor/OU=Harbor/CN=Self-signed by GoHarbor"
|
||||
cert_subj = "/C=US/ST=California/L=Palo Alto/O=GoHarbor/OU=Harbor/CN=notarysigner"
|
||||
signer_ca_cert = os.path.join(temp_cert_dir, "notary-signer-ca.crt")
|
||||
signer_ca_key = os.path.join(temp_cert_dir, "notary-signer-ca.key")
|
||||
signer_cert_path = os.path.join(temp_cert_dir, "notary-signer.crt")
|
||||
signer_key_path = os.path.join(temp_cert_dir, "notary-signer.key")
|
||||
create_root_cert(ca_subj, key_path=signer_ca_key, cert_path=signer_ca_cert)
|
||||
create_cert(cert_subj, signer_ca_key, signer_ca_cert, key_path=signer_key_path, cert_path=signer_cert_path)
|
||||
print("Copying certs for notary signer")
|
||||
shutil.copy2(signer_cert_path, notary_config_dir)
|
||||
shutil.copy2(signer_key_path, notary_config_dir)
|
||||
shutil.copy2(signer_ca_cert, notary_config_dir)
|
||||
finally:
|
||||
srl_tmp = os.path.join(os.getcwd(), ".srl")
|
||||
if os.path.isfile(srl_tmp):
|
||||
os.remove(srl_tmp)
|
||||
if os.path.isdir(temp_cert_dir):
|
||||
shutil.rmtree(temp_cert_dir, True)
|
||||
else:
|
||||
print("Copying certs for notary signer")
|
||||
shutil.copy2(os.path.join(notary_temp_dir, "notary-signer.crt"), notary_config_dir)
|
||||
shutil.copy2(os.path.join(notary_temp_dir, "notary-signer.key"), notary_config_dir)
|
||||
shutil.copy2(os.path.join(notary_temp_dir, "notary-signer-ca.crt"), notary_config_dir)
|
||||
shutil.copy2(os.path.join(registry_config_dir, "root.crt"), notary_config_dir)
|
||||
mark_file(os.path.join(notary_config_dir, "notary-signer.crt"))
|
||||
mark_file(os.path.join(notary_config_dir, "notary-signer.key"))
|
||||
mark_file(os.path.join(notary_config_dir, "notary-signer-ca.crt"))
|
||||
mark_file(os.path.join(notary_config_dir, "root.crt"))
|
||||
print("Copying notary signer configuration file")
|
||||
render(os.path.join(notary_temp_dir, "signer-config.postgres.json"),
|
||||
os.path.join(notary_config_dir, "signer-config.postgres.json"),
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID
|
||||
)
|
||||
|
||||
render(os.path.join(notary_temp_dir, "server-config.postgres.json"),
|
||||
os.path.join(notary_config_dir, "server-config.postgres.json"),
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID,
|
||||
token_endpoint=public_url)
|
||||
print("Copying nginx configuration file for notary")
|
||||
shutil.copy2(os.path.join(templates_dir, "nginx", "notary.upstream.conf"), nginx_conf_d)
|
||||
render(os.path.join(templates_dir, "nginx", "notary.server.conf"),
|
||||
os.path.join(nginx_conf_d, "notary.server.conf"),
|
||||
ssl_cert = os.path.join("/etc/nginx/cert", os.path.basename(target_cert_path)),
|
||||
ssl_cert_key = os.path.join("/etc/nginx/cert", os.path.basename(target_cert_key_path)))
|
||||
|
||||
default_alias = get_alias(secretkey_path)
|
||||
render(os.path.join(notary_temp_dir, "signer_env"), os.path.join(notary_config_dir, "signer_env"), alias = default_alias)
|
||||
shutil.copy2(os.path.join(notary_temp_dir, "server_env"), notary_config_dir)
|
||||
|
||||
if args.clair_mode:
|
||||
clair_temp_dir = os.path.join(templates_dir, "clair")
|
||||
clair_config_dir = prep_conf_dir(config_dir, "clair")
|
||||
if os.path.exists(os.path.join(clair_config_dir, "postgresql-init.d")):
|
||||
print("Copying offline data file for clair DB")
|
||||
shutil.rmtree(os.path.join(clair_config_dir, "postgresql-init.d"))
|
||||
shutil.copytree(os.path.join(clair_temp_dir, "postgresql-init.d"), os.path.join(clair_config_dir, "postgresql-init.d"))
|
||||
postgres_env = os.path.join(clair_config_dir, "postgres_env")
|
||||
render(os.path.join(clair_temp_dir, "postgres_env"), postgres_env, password = clair_db_password)
|
||||
clair_conf = os.path.join(clair_config_dir, "config.yaml")
|
||||
render(os.path.join(clair_temp_dir, "config.yaml"),
|
||||
clair_conf,
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID,
|
||||
password = clair_db_password,
|
||||
username = clair_db_username,
|
||||
host = clair_db_host,
|
||||
port = clair_db_port,
|
||||
dbname = clair_db,
|
||||
interval = clair_updaters_interval)
|
||||
|
||||
# config http proxy for Clair
|
||||
http_proxy = rcp.get("configuration", "http_proxy").strip()
|
||||
https_proxy = rcp.get("configuration", "https_proxy").strip()
|
||||
no_proxy = rcp.get("configuration", "no_proxy").strip()
|
||||
clair_env = os.path.join(clair_config_dir, "clair_env")
|
||||
render(os.path.join(clair_temp_dir, "clair_env"), clair_env,
|
||||
http_proxy = http_proxy,
|
||||
https_proxy = https_proxy,
|
||||
no_proxy = no_proxy)
|
||||
|
||||
# config chart repository
|
||||
if args.chart_mode:
|
||||
chartm_temp_dir = os.path.join(templates_dir, "chartserver")
|
||||
chartm_config_dir = os.path.join(config_dir, "chartserver")
|
||||
chartm_env = os.path.join(config_dir, "chartserver", "env")
|
||||
|
||||
if not os.path.isdir(chartm_config_dir):
|
||||
print ("Create config folder: %s" % chartm_config_dir)
|
||||
os.makedirs(chartm_config_dir)
|
||||
|
||||
# process redis info
|
||||
cache_store = "redis"
|
||||
cache_redis_password = redis_password
|
||||
cache_redis_addr = redis_host+":"+redis_port
|
||||
cache_redis_db_index = redis_db_index_chart
|
||||
|
||||
# process storage info
|
||||
#default using local file system
|
||||
storage_driver = "local"
|
||||
# storage provider configurations
|
||||
# please be aware that, we do not check the validations of the values for the specified keys
|
||||
# convert the configs to config map
|
||||
storage_provider_configs = storage_provider_config.split(",")
|
||||
storgae_provider_confg_map = {}
|
||||
storage_provider_config_options = []
|
||||
|
||||
for k_v in storage_provider_configs:
|
||||
if len(k_v) > 0:
|
||||
kvs = k_v.split(": ") # add space suffix to avoid existing ":" in the value
|
||||
if len(kvs) == 2:
|
||||
#key must not be empty
|
||||
if kvs[0].strip() != "":
|
||||
storgae_provider_confg_map[kvs[0].strip()] = kvs[1].strip()
|
||||
|
||||
if storage_provider_name == "s3":
|
||||
# aws s3 storage
|
||||
storage_driver = "amazon"
|
||||
storage_provider_config_options.append("STORAGE_AMAZON_BUCKET=%s" % storgae_provider_confg_map.get("bucket", ""))
|
||||
storage_provider_config_options.append("STORAGE_AMAZON_PREFIX=%s" % storgae_provider_confg_map.get("rootdirectory", ""))
|
||||
storage_provider_config_options.append("STORAGE_AMAZON_REGION=%s" % storgae_provider_confg_map.get("region", ""))
|
||||
storage_provider_config_options.append("STORAGE_AMAZON_ENDPOINT=%s" % storgae_provider_confg_map.get("regionendpoint", ""))
|
||||
storage_provider_config_options.append("AWS_ACCESS_KEY_ID=%s" % storgae_provider_confg_map.get("accesskey", ""))
|
||||
storage_provider_config_options.append("AWS_SECRET_ACCESS_KEY=%s" % storgae_provider_confg_map.get("secretkey", ""))
|
||||
elif storage_provider_name == "gcs":
|
||||
# google cloud storage
|
||||
storage_driver = "google"
|
||||
storage_provider_config_options.append("STORAGE_GOOGLE_BUCKET=%s" % storgae_provider_confg_map.get("bucket", ""))
|
||||
storage_provider_config_options.append("STORAGE_GOOGLE_PREFIX=%s" % storgae_provider_confg_map.get("rootdirectory", ""))
|
||||
|
||||
keyFileOnHost = storgae_provider_confg_map.get("keyfile", "")
|
||||
if os.path.isfile(keyFileOnHost):
|
||||
shutil.copyfile(keyFileOnHost, os.path.join(chartm_config_dir, "gcs.key"))
|
||||
targetKeyFile = "/etc/chartserver/gcs.key"
|
||||
storage_provider_config_options.append("GOOGLE_APPLICATION_CREDENTIALS=%s" % targetKeyFile)
|
||||
elif storage_provider_name == "azure":
|
||||
# azure storage
|
||||
storage_driver = "microsoft"
|
||||
storage_provider_config_options.append("STORAGE_MICROSOFT_CONTAINER=%s" % storgae_provider_confg_map.get("container", ""))
|
||||
storage_provider_config_options.append("AZURE_STORAGE_ACCOUNT=%s" % storgae_provider_confg_map.get("accountname", ""))
|
||||
storage_provider_config_options.append("AZURE_STORAGE_ACCESS_KEY=%s" % storgae_provider_confg_map.get("accountkey", ""))
|
||||
storage_provider_config_options.append("STORAGE_MICROSOFT_PREFIX=/azure/harbor/charts")
|
||||
elif storage_provider_name == "swift":
|
||||
# open stack swift
|
||||
storage_driver = "openstack"
|
||||
storage_provider_config_options.append("STORAGE_OPENSTACK_CONTAINER=%s" % storgae_provider_confg_map.get("container", ""))
|
||||
storage_provider_config_options.append("STORAGE_OPENSTACK_PREFIX=%s" % storgae_provider_confg_map.get("rootdirectory", ""))
|
||||
storage_provider_config_options.append("STORAGE_OPENSTACK_REGION=%s" % storgae_provider_confg_map.get("region", ""))
|
||||
storage_provider_config_options.append("OS_AUTH_URL=%s" % storgae_provider_confg_map.get("authurl", ""))
|
||||
storage_provider_config_options.append("OS_USERNAME=%s" % storgae_provider_confg_map.get("username", ""))
|
||||
storage_provider_config_options.append("OS_PASSWORD=%s" % storgae_provider_confg_map.get("password", ""))
|
||||
storage_provider_config_options.append("OS_PROJECT_ID=%s" % storgae_provider_confg_map.get("tenantid", ""))
|
||||
storage_provider_config_options.append("OS_PROJECT_NAME=%s" % storgae_provider_confg_map.get("tenant", ""))
|
||||
storage_provider_config_options.append("OS_DOMAIN_ID=%s" % storgae_provider_confg_map.get("domainid", ""))
|
||||
storage_provider_config_options.append("OS_DOMAIN_NAME=%s" % storgae_provider_confg_map.get("domain", ""))
|
||||
elif storage_provider_name == "oss":
|
||||
# aliyun OSS
|
||||
storage_driver = "alibaba"
|
||||
storage_provider_config_options.append("STORAGE_ALIBABA_BUCKET=%s" % storgae_provider_confg_map.get("bucket", ""))
|
||||
storage_provider_config_options.append("STORAGE_ALIBABA_PREFIX=%s" % storgae_provider_confg_map.get("rootdirectory", ""))
|
||||
storage_provider_config_options.append("STORAGE_ALIBABA_ENDPOINT=%s" % storgae_provider_confg_map.get("endpoint", ""))
|
||||
storage_provider_config_options.append("ALIBABA_CLOUD_ACCESS_KEY_ID=%s" % storgae_provider_confg_map.get("accesskeyid", ""))
|
||||
storage_provider_config_options.append("ALIBABA_CLOUD_ACCESS_KEY_SECRET=%s" % storgae_provider_confg_map.get("accesskeysecret", ""))
|
||||
else:
|
||||
# use local file system
|
||||
storage_provider_config_options.append("STORAGE_LOCAL_ROOTDIR=/chart_storage")
|
||||
|
||||
# generate storage provider configuration
|
||||
all_storage_provider_configs = ('\n').join(storage_provider_config_options)
|
||||
|
||||
render(os.path.join(chartm_temp_dir, "env"),
|
||||
chartm_env,
|
||||
cache_store=cache_store,
|
||||
cache_redis_addr=cache_redis_addr,
|
||||
cache_redis_password=cache_redis_password,
|
||||
cache_redis_db_index=cache_redis_db_index,
|
||||
core_secret=core_secret,
|
||||
storage_driver=storage_driver,
|
||||
all_storage_driver_configs=all_storage_provider_configs)
|
||||
|
||||
|
||||
FNULL.close()
|
||||
print("The configuration files are ready, please use docker-compose to start the service.")
|
||||
docker run -it --rm -v ${host_make_path}:/harbor_make -v $data_path:/data -v $log_path:/var/log/harbor -v $secretkey_path:$secretkey_path goharbor/prepare:1.7.1 $@
|
||||
|
@ -3,6 +3,7 @@
|
||||
# These certs file is only for Harbor testing.
|
||||
IP='10.4.142.42'
|
||||
OPENSSLCNF=
|
||||
DATA_VOL='/data'
|
||||
|
||||
for path in /etc/openssl/openssl.cnf /etc/ssl/openssl.cnf /usr/local/etc/openssl/openssl.cnf; do
|
||||
if [[ -e ${path} ]]; then
|
||||
@ -30,6 +31,6 @@ openssl x509 -req -days 365 -in $IP.csr -CA harbor_ca.crt \
|
||||
-CAkey harbor_ca.key -CAcreateserial -extfile extfile.cnf -out $IP.crt
|
||||
|
||||
# Copy to harbor default location
|
||||
mkdir -p /data/cert
|
||||
cp $IP.crt /data/cert/server.crt
|
||||
cp $IP.key /data/cert/server.key
|
||||
mkdir -p $DATA_VOL/cert
|
||||
cp $IP.crt $DATA_VOL/cert/server.crt
|
||||
cp $IP.key $DATA_VOL/cert/server.key
|
||||
|
@ -3,6 +3,7 @@
|
||||
# These certs file is only for Harbor testing.
|
||||
IP='127.0.0.1'
|
||||
OPENSSLCNF=
|
||||
DATA_VOL='/data'
|
||||
|
||||
for path in /etc/openssl/openssl.cnf /etc/ssl/openssl.cnf /usr/local/etc/openssl/openssl.cnf; do
|
||||
if [[ -e ${path} ]]; then
|
||||
@ -30,6 +31,6 @@ openssl x509 -req -days 365 -in $IP.csr -CA harbor_ca.crt \
|
||||
-CAkey harbor_ca.key -CAcreateserial -extfile extfile.cnf -out $IP.crt
|
||||
|
||||
# Copy to harbor default location
|
||||
mkdir -p /data/cert
|
||||
cp $IP.crt /data/cert/server.crt
|
||||
cp $IP.key /data/cert/server.key
|
||||
mkdir -p $DATA_VOL/cert
|
||||
cp $IP.crt $DATA_VOL/cert/server.crt
|
||||
cp $IP.key $DATA_VOL/cert/server.key
|
@ -3,5 +3,5 @@ IP=`ip addr s eth0 |grep "inet "|awk '{print $2}' |awk -F "/" '{print $1}'`
|
||||
PROTOCOL='https'
|
||||
|
||||
#echo $IP
|
||||
sudo sed "s/reg.mydomain.com/$IP/" -i make/harbor.cfg
|
||||
sudo sed "s/^ui_url_protocol = .*/ui_url_protocol = $PROTOCOL/g" -i make/harbor.cfg
|
||||
sudo sed "s/reg.mydomain.com/$IP/" -i make/harbor.yml
|
||||
sudo sed "s/^ui_url_protocol: .*/ui_url_protocol: $PROTOCOL/g" -i make/harbor.yml
|
||||
|
@ -25,7 +25,7 @@ sleep 2
|
||||
sudo -E env "PATH=$PATH" make go_check
|
||||
sudo ./tests/hostcfg.sh
|
||||
sudo ./tests/generateCerts.sh
|
||||
sudo ./make/prepare
|
||||
sudo MAKEPATH=$(pwd)/make ./make/prepare
|
||||
sudo mkdir -p "/data/redis"
|
||||
sudo mkdir -p /etc/core/ca/ && sudo mv ./tests/ca.crt /etc/core/ca/
|
||||
sudo mkdir -p /harbor && sudo mv ./VERSION /harbor/UIVERSION
|
||||
|
Loading…
Reference in New Issue
Block a user