mirror of
https://github.com/goharbor/harbor.git
synced 2024-12-22 16:48:30 +01:00
af7b61ebd5
This is a solution for product that using harbor as proxy to add customized location Signed-off-by: Qian Deng <dengq@vmware.com>
757 lines
36 KiB
Python
Executable File
757 lines
36 KiB
Python
Executable File
#!/usr/bin/python
|
|
# -*- coding: utf-8 -*-
|
|
from __future__ import print_function, unicode_literals # We require Python 2.6 or later
|
|
from string import Template
|
|
import random
|
|
import os
|
|
from fnmatch import fnmatch
|
|
import sys
|
|
import string
|
|
import argparse
|
|
import subprocess
|
|
import shutil
|
|
from io import open
|
|
|
|
if sys.version_info[:3][0] == 2:
|
|
import ConfigParser as ConfigParser
|
|
import StringIO as StringIO
|
|
|
|
if sys.version_info[:3][0] == 3:
|
|
import configparser as ConfigParser
|
|
import io as StringIO
|
|
|
|
DATA_VOL = "/data"
|
|
DEFAULT_UID = 10000
|
|
DEFAULT_GID = 10000
|
|
|
|
base_dir = os.path.dirname(__file__)
|
|
config_dir = os.path.join(base_dir, "common/config")
|
|
templates_dir = os.path.join(base_dir, "common/templates")
|
|
|
|
custom_nginx_location_file_pattern = 'harbor.https.*.conf'
|
|
|
|
def validate(conf, args):
|
|
|
|
protocol = rcp.get("configuration", "ui_url_protocol")
|
|
if protocol != "https" and args.notary_mode:
|
|
raise Exception("Error: the protocol must be https when Harbor is deployed with Notary")
|
|
if protocol == "https":
|
|
if not rcp.has_option("configuration", "ssl_cert"):
|
|
raise Exception("Error: The protocol is https but attribute ssl_cert is not set")
|
|
cert_path = rcp.get("configuration", "ssl_cert")
|
|
if not os.path.isfile(cert_path):
|
|
raise Exception("Error: The path for certificate: %s is invalid" % cert_path)
|
|
if not rcp.has_option("configuration", "ssl_cert_key"):
|
|
raise Exception("Error: The protocol is https but attribute ssl_cert_key is not set")
|
|
cert_key_path = rcp.get("configuration", "ssl_cert_key")
|
|
if not os.path.isfile(cert_key_path):
|
|
raise Exception("Error: The path for certificate key: %s is invalid" % cert_key_path)
|
|
project_creation = rcp.get("configuration", "project_creation_restriction")
|
|
|
|
if project_creation != "everyone" and project_creation != "adminonly":
|
|
raise Exception("Error invalid value for project_creation_restriction: %s" % project_creation)
|
|
|
|
valid_storage_drivers = ["filesystem", "azure", "gcs", "s3", "swift", "oss"]
|
|
storage_provider_name = rcp.get("configuration", "registry_storage_provider_name").strip()
|
|
if storage_provider_name not in valid_storage_drivers:
|
|
raise Exception("Error: storage driver %s is not supported, only the following ones are supported: %s" % (storage_provider_name, ",".join(valid_storage_drivers)))
|
|
|
|
storage_provider_config = rcp.get("configuration", "registry_storage_provider_config").strip()
|
|
if storage_provider_name != "filesystem":
|
|
if storage_provider_config == "":
|
|
raise Exception("Error: no provider configurations are provided for provider %s" % storage_provider_name)
|
|
|
|
redis_host = rcp.get("configuration", "redis_host")
|
|
if redis_host is None or len(redis_host) < 1:
|
|
raise Exception("Error: redis_host in harbor.cfg needs to point to an endpoint of Redis server or cluster.")
|
|
|
|
redis_port = rcp.get("configuration", "redis_port")
|
|
if len(redis_port) < 1:
|
|
raise Exception("Error: redis_port in harbor.cfg needs to point to the port of Redis server or cluster.")
|
|
|
|
redis_db_index = rcp.get("configuration", "redis_db_index").strip()
|
|
if len(redis_db_index.split(",")) != 3:
|
|
raise Exception("Error invalid value for redis_db_index: %s. please set it as 1,2,3" % redis_db_index)
|
|
|
|
#To meet security requirement
|
|
#By default it will change file mode to 0600, and make the owner of the file to 10000:10000
|
|
def mark_file(path, mode=0o600, uid=DEFAULT_UID, gid=DEFAULT_GID):
|
|
if mode > 0:
|
|
os.chmod(path, mode)
|
|
if uid > 0 and gid > 0:
|
|
os.chown(path, uid, gid)
|
|
|
|
def get_secret_key(path):
|
|
secret_key = _get_secret(path, "secretkey")
|
|
if len(secret_key) != 16:
|
|
raise Exception("secret key's length has to be 16 chars, current length: %d" % len(secret_key))
|
|
return secret_key
|
|
|
|
def get_alias(path):
|
|
alias = _get_secret(path, "defaultalias", length=8)
|
|
return alias
|
|
|
|
def _get_secret(folder, filename, length=16):
|
|
key_file = os.path.join(folder, filename)
|
|
if os.path.isfile(key_file):
|
|
with open(key_file, 'r') as f:
|
|
key = f.read()
|
|
print("loaded secret from file: %s" % key_file)
|
|
mark_file(key_file)
|
|
return key
|
|
if not os.path.isdir(folder):
|
|
os.makedirs(folder)
|
|
key = ''.join(random.choice(string.ascii_letters+string.digits) for i in range(length))
|
|
with open(key_file, 'w') as f:
|
|
f.write(key)
|
|
print("Generated and saved secret to file: %s" % key_file)
|
|
mark_file(key_file)
|
|
return key
|
|
|
|
def prep_conf_dir(root, *name):
|
|
absolute_path = os.path.join(root, *name)
|
|
if not os.path.exists(absolute_path):
|
|
os.makedirs(absolute_path)
|
|
return absolute_path
|
|
|
|
def render(src, dest, mode=0o640, uid=0, gid=0, **kw):
|
|
t = Template(open(src, 'r').read())
|
|
with open(dest, 'w') as f:
|
|
f.write(t.substitute(**kw))
|
|
mark_file(dest, mode, uid, gid)
|
|
print("Generated configuration file: %s" % dest)
|
|
|
|
def delfile(src):
|
|
if os.path.isfile(src):
|
|
try:
|
|
os.remove(src)
|
|
print("Clearing the configuration file: %s" % src)
|
|
except:
|
|
pass
|
|
elif os.path.isdir(src):
|
|
for item in os.listdir(src):
|
|
itemsrc=os.path.join(src,item)
|
|
delfile(itemsrc)
|
|
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument('--conf', dest='cfgfile', default=base_dir+'/harbor.cfg',type=str,help="the path of Harbor configuration file")
|
|
parser.add_argument('--with-notary', dest='notary_mode', default=False, action='store_true', help="the Harbor instance is to be deployed with notary")
|
|
parser.add_argument('--with-clair', dest='clair_mode', default=False, action='store_true', help="the Harbor instance is to be deployed with clair")
|
|
parser.add_argument('--with-chartmuseum', dest='chart_mode', default=False, action='store_true', help="the Harbor instance is to be deployed with chart repository supporting")
|
|
args = parser.parse_args()
|
|
|
|
delfile(config_dir)
|
|
#Read configurations
|
|
conf = StringIO.StringIO()
|
|
conf.write("[configuration]\n")
|
|
conf.write(open(args.cfgfile).read())
|
|
conf.seek(0, os.SEEK_SET)
|
|
rcp = ConfigParser.RawConfigParser()
|
|
rcp.readfp(conf)
|
|
validate(rcp, args)
|
|
|
|
reload_config = rcp.get("configuration", "reload_config") if rcp.has_option(
|
|
"configuration", "reload_config") else "false"
|
|
hostname = rcp.get("configuration", "hostname")
|
|
protocol = rcp.get("configuration", "ui_url_protocol")
|
|
public_url = protocol + "://" + hostname
|
|
email_identity = rcp.get("configuration", "email_identity")
|
|
email_host = rcp.get("configuration", "email_server")
|
|
email_port = rcp.get("configuration", "email_server_port")
|
|
email_usr = rcp.get("configuration", "email_username")
|
|
email_pwd = rcp.get("configuration", "email_password")
|
|
email_from = rcp.get("configuration", "email_from")
|
|
email_ssl = rcp.get("configuration", "email_ssl")
|
|
email_insecure = rcp.get("configuration", "email_insecure")
|
|
harbor_admin_password = rcp.get("configuration", "harbor_admin_password")
|
|
auth_mode = rcp.get("configuration", "auth_mode")
|
|
ldap_url = rcp.get("configuration", "ldap_url")
|
|
# this two options are either both set or unset
|
|
if rcp.has_option("configuration", "ldap_searchdn"):
|
|
ldap_searchdn = rcp.get("configuration", "ldap_searchdn")
|
|
ldap_search_pwd = rcp.get("configuration", "ldap_search_pwd")
|
|
else:
|
|
ldap_searchdn = ""
|
|
ldap_search_pwd = ""
|
|
ldap_basedn = rcp.get("configuration", "ldap_basedn")
|
|
# ldap_filter is null by default
|
|
if rcp.has_option("configuration", "ldap_filter"):
|
|
ldap_filter = rcp.get("configuration", "ldap_filter")
|
|
else:
|
|
ldap_filter = ""
|
|
ldap_uid = rcp.get("configuration", "ldap_uid")
|
|
ldap_scope = rcp.get("configuration", "ldap_scope")
|
|
ldap_timeout = rcp.get("configuration", "ldap_timeout")
|
|
ldap_verify_cert = rcp.get("configuration", "ldap_verify_cert")
|
|
ldap_group_basedn = rcp.get("configuration", "ldap_group_basedn")
|
|
ldap_group_filter = rcp.get("configuration", "ldap_group_filter")
|
|
ldap_group_gid = rcp.get("configuration", "ldap_group_gid")
|
|
ldap_group_scope = rcp.get("configuration", "ldap_group_scope")
|
|
db_password = rcp.get("configuration", "db_password")
|
|
db_host = rcp.get("configuration", "db_host")
|
|
db_user = rcp.get("configuration", "db_user")
|
|
db_port = rcp.get("configuration", "db_port")
|
|
self_registration = rcp.get("configuration", "self_registration")
|
|
if protocol == "https":
|
|
cert_path = rcp.get("configuration", "ssl_cert")
|
|
cert_key_path = rcp.get("configuration", "ssl_cert_key")
|
|
customize_crt = rcp.get("configuration", "customize_crt")
|
|
max_job_workers = rcp.get("configuration", "max_job_workers")
|
|
token_expiration = rcp.get("configuration", "token_expiration")
|
|
proj_cre_restriction = rcp.get("configuration", "project_creation_restriction")
|
|
secretkey_path = rcp.get("configuration", "secretkey_path")
|
|
if rcp.has_option("configuration", "admiral_url"):
|
|
admiral_url = rcp.get("configuration", "admiral_url")
|
|
else:
|
|
admiral_url = ""
|
|
clair_db_password = rcp.get("configuration", "clair_db_password")
|
|
clair_db_host = rcp.get("configuration", "clair_db_host")
|
|
clair_db_port = rcp.get("configuration", "clair_db_port")
|
|
clair_db_username = rcp.get("configuration", "clair_db_username")
|
|
clair_db = rcp.get("configuration", "clair_db")
|
|
clair_updaters_interval = rcp.get("configuration", "clair_updaters_interval")
|
|
|
|
uaa_endpoint = rcp.get("configuration", "uaa_endpoint")
|
|
uaa_clientid = rcp.get("configuration", "uaa_clientid")
|
|
uaa_clientsecret = rcp.get("configuration", "uaa_clientsecret")
|
|
uaa_verify_cert = rcp.get("configuration", "uaa_verify_cert")
|
|
uaa_ca_cert = rcp.get("configuration", "uaa_ca_cert")
|
|
|
|
secret_key = get_secret_key(secretkey_path)
|
|
log_rotate_count = rcp.get("configuration", "log_rotate_count")
|
|
log_rotate_size = rcp.get("configuration", "log_rotate_size")
|
|
|
|
redis_host = rcp.get("configuration", "redis_host")
|
|
redis_port = rcp.get("configuration", "redis_port")
|
|
redis_password = rcp.get("configuration", "redis_password")
|
|
redis_db_index = rcp.get("configuration", "redis_db_index")
|
|
|
|
db_indexs = redis_db_index.split(',')
|
|
redis_db_index_reg = db_indexs[0]
|
|
redis_db_index_js = db_indexs[1]
|
|
redis_db_index_chart = db_indexs[2]
|
|
|
|
#redis://[arbitrary_username:password@]ipaddress:port/database_index
|
|
redis_url_js = ''
|
|
redis_url_reg = ''
|
|
if len(redis_password) > 0:
|
|
redis_url_js = "redis://anonymous:%s@%s:%s/%s" % (redis_password, redis_host, redis_port, redis_db_index_js)
|
|
redis_url_reg = "redis://anonymous:%s@%s:%s/%s" % (redis_password, redis_host, redis_port, redis_db_index_reg)
|
|
else:
|
|
redis_url_js = "redis://%s:%s/%s" % (redis_host, redis_port, redis_db_index_js)
|
|
redis_url_reg = "redis://%s:%s/%s" % (redis_host, redis_port, redis_db_index_reg)
|
|
|
|
if rcp.has_option("configuration", "skip_reload_env_pattern"):
|
|
skip_reload_env_pattern = rcp.get("configuration", "skip_reload_env_pattern")
|
|
else:
|
|
skip_reload_env_pattern = "$^"
|
|
storage_provider_name = rcp.get("configuration", "registry_storage_provider_name").strip()
|
|
storage_provider_config = rcp.get("configuration", "registry_storage_provider_config").strip()
|
|
# yaml requires 1 or more spaces between the key and value
|
|
storage_provider_config = storage_provider_config.replace(":", ": ", 1)
|
|
registry_custom_ca_bundle_path = rcp.get("configuration", "registry_custom_ca_bundle").strip()
|
|
core_secret = ''.join(random.choice(string.ascii_letters+string.digits) for i in range(16))
|
|
jobservice_secret = ''.join(random.choice(string.ascii_letters+string.digits) for i in range(16))
|
|
|
|
adminserver_config_dir = os.path.join(config_dir,"adminserver")
|
|
if not os.path.exists(adminserver_config_dir):
|
|
os.makedirs(os.path.join(config_dir, "adminserver"))
|
|
|
|
core_config_dir = prep_conf_dir(config_dir,"core")
|
|
core_certificates_dir = prep_conf_dir(core_config_dir,"certificates")
|
|
db_config_dir = prep_conf_dir(config_dir, "db")
|
|
job_config_dir = prep_conf_dir(config_dir, "jobservice")
|
|
registry_config_dir = prep_conf_dir(config_dir, "registry")
|
|
registryctl_config_dir = prep_conf_dir(config_dir, "registryctl")
|
|
nginx_config_dir = prep_conf_dir (config_dir, "nginx")
|
|
nginx_conf_d = prep_conf_dir(nginx_config_dir, "conf.d")
|
|
log_config_dir = prep_conf_dir (config_dir, "log")
|
|
|
|
adminserver_conf_env = os.path.join(config_dir, "adminserver", "env")
|
|
core_conf_env = os.path.join(config_dir, "core", "env")
|
|
core_conf = os.path.join(config_dir, "core", "app.conf")
|
|
core_cert_dir = os.path.join(config_dir, "core", "certificates")
|
|
jobservice_conf = os.path.join(config_dir, "jobservice", "config.yml")
|
|
registry_conf = os.path.join(config_dir, "registry", "config.yml")
|
|
registryctl_conf_env = os.path.join(config_dir, "registryctl", "env")
|
|
registryctl_conf_yml = os.path.join(config_dir, "registryctl", "config.yml")
|
|
db_conf_env = os.path.join(config_dir, "db", "env")
|
|
job_conf_env = os.path.join(config_dir, "jobservice", "env")
|
|
nginx_conf = os.path.join(config_dir, "nginx", "nginx.conf")
|
|
cert_dir = os.path.join(config_dir, "nginx", "cert")
|
|
log_rotate_config = os.path.join(config_dir, "log", "logrotate.conf")
|
|
adminserver_url = "http://adminserver:8080"
|
|
registry_url = "http://registry:5000"
|
|
registry_controller_url = "http://registryctl:8080"
|
|
core_url = "http://core:8080"
|
|
token_service_url = "http://core:8080/service/token"
|
|
|
|
jobservice_url = "http://jobservice:8080"
|
|
clair_url = "http://clair:6060"
|
|
notary_url = "http://notary-server:4443"
|
|
chart_repository_url = "http://chartmuseum:9999"
|
|
|
|
if len(admiral_url) != 0 and admiral_url != "NA":
|
|
#VIC overwrites the data volume path, which by default should be same as the value of secretkey_path
|
|
DATA_VOL = secretkey_path
|
|
JOB_LOG_DIR = os.path.join(DATA_VOL, "job_logs")
|
|
if not os.path.exists(JOB_LOG_DIR):
|
|
os.makedirs(JOB_LOG_DIR)
|
|
mark_file(JOB_LOG_DIR, mode=0o755)
|
|
|
|
if protocol == "https":
|
|
target_cert_path = os.path.join(cert_dir, os.path.basename(cert_path))
|
|
if not os.path.exists(cert_dir):
|
|
os.makedirs(cert_dir)
|
|
shutil.copy2(cert_path,target_cert_path)
|
|
target_cert_key_path = os.path.join(cert_dir, os.path.basename(cert_key_path))
|
|
shutil.copy2(cert_key_path,target_cert_key_path)
|
|
render(os.path.join(templates_dir, "nginx", "nginx.https.conf"),
|
|
nginx_conf,
|
|
ssl_cert = os.path.join("/etc/nginx/cert", os.path.basename(target_cert_path)),
|
|
ssl_cert_key = os.path.join("/etc/nginx/cert", os.path.basename(target_cert_key_path)))
|
|
else:
|
|
render(os.path.join(templates_dir, "nginx", "nginx.http.conf"), nginx_conf)
|
|
custom_nginx_location_file_pattern = 'harbor.http.*.conf'
|
|
|
|
def add_additional_location_config(src, dst):
|
|
"""
|
|
This conf file is used for user that wanna add additional customized locations to harbor proxy
|
|
:params src: source of the file
|
|
:params dst: destination file path
|
|
"""
|
|
if not os.path.isfile(src):
|
|
return
|
|
print("Copying nginx configuration file {src} to {dst}".format(src=src, dst=dst))
|
|
shutil.copy2(src, dst)
|
|
mark_file(dst)
|
|
|
|
nginx_template_ext_dir = os.path.join(templates_dir, 'nginx', 'ext')
|
|
if os.path.exists(nginx_template_ext_dir):
|
|
map(lambda filename: add_additional_location_config(
|
|
os.path.join(nginx_template_ext_dir, filename),
|
|
os.path.join(nginx_conf_d, filename)),
|
|
[fname for fname in os.listdir(nginx_template_ext_dir) if fnmatch(fname, custom_nginx_location_file_pattern)])
|
|
|
|
#Use reload_key to avoid reload config after restart harbor
|
|
reload_key = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) if reload_config == "true" else ""
|
|
|
|
ldap_group_admin_dn = rcp.get("configuration", "ldap_group_admin_dn") if rcp.has_option("configuration", "ldap_group_admin_dn") else ""
|
|
|
|
render(os.path.join(templates_dir, "adminserver", "env"),
|
|
adminserver_conf_env,
|
|
reload_config=reload_config,
|
|
public_url=public_url,
|
|
core_url=core_url,
|
|
auth_mode=auth_mode,
|
|
self_registration=self_registration,
|
|
ldap_url=ldap_url,
|
|
ldap_searchdn =ldap_searchdn,
|
|
ldap_search_pwd =ldap_search_pwd,
|
|
ldap_basedn=ldap_basedn,
|
|
ldap_filter=ldap_filter,
|
|
ldap_uid=ldap_uid,
|
|
ldap_scope=ldap_scope,
|
|
ldap_verify_cert=ldap_verify_cert,
|
|
ldap_timeout=ldap_timeout,
|
|
ldap_group_basedn=ldap_group_basedn,
|
|
ldap_group_filter=ldap_group_filter,
|
|
ldap_group_gid=ldap_group_gid,
|
|
ldap_group_scope=ldap_group_scope,
|
|
ldap_group_admin_dn=ldap_group_admin_dn,
|
|
db_password=db_password,
|
|
db_host=db_host,
|
|
db_user=db_user,
|
|
db_port=db_port,
|
|
email_host=email_host,
|
|
email_port=email_port,
|
|
email_usr=email_usr,
|
|
email_pwd=email_pwd,
|
|
email_ssl=email_ssl,
|
|
email_insecure=email_insecure,
|
|
email_from=email_from,
|
|
email_identity=email_identity,
|
|
harbor_admin_password=harbor_admin_password,
|
|
project_creation_restriction=proj_cre_restriction,
|
|
max_job_workers=max_job_workers,
|
|
core_secret=core_secret,
|
|
jobservice_secret=jobservice_secret,
|
|
token_expiration=token_expiration,
|
|
admiral_url=admiral_url,
|
|
with_notary=args.notary_mode,
|
|
with_clair=args.clair_mode,
|
|
clair_db_password=clair_db_password,
|
|
clair_db_host=clair_db_host,
|
|
clair_db_port=clair_db_port,
|
|
clair_db_username=clair_db_username,
|
|
clair_db=clair_db,
|
|
uaa_endpoint=uaa_endpoint,
|
|
uaa_clientid=uaa_clientid,
|
|
uaa_clientsecret=uaa_clientsecret,
|
|
uaa_verify_cert=uaa_verify_cert,
|
|
storage_provider_name=storage_provider_name,
|
|
registry_url=registry_url,
|
|
token_service_url=token_service_url,
|
|
jobservice_url=jobservice_url,
|
|
clair_url=clair_url,
|
|
notary_url=notary_url,
|
|
reload_key=reload_key,
|
|
skip_reload_env_pattern=skip_reload_env_pattern,
|
|
chart_repository_url=chart_repository_url,
|
|
registry_controller_url = registry_controller_url,
|
|
with_chartmuseum=args.chart_mode
|
|
)
|
|
|
|
# set cache for chart repo server
|
|
# default set 'memory' mode, if redis is configured then set to 'redis'
|
|
chart_cache_driver = "memory"
|
|
if len(redis_host) > 0:
|
|
chart_cache_driver = "redis"
|
|
|
|
render(os.path.join(templates_dir, "core", "env"),
|
|
core_conf_env,
|
|
core_secret=core_secret,
|
|
jobservice_secret=jobservice_secret,
|
|
redis_host=redis_host,
|
|
redis_port=redis_port,
|
|
redis_password=redis_password,
|
|
adminserver_url = adminserver_url,
|
|
chart_cache_driver = chart_cache_driver,
|
|
redis_url_reg = redis_url_reg)
|
|
|
|
registry_config_file = "config.yml"
|
|
if storage_provider_name == "filesystem":
|
|
if not storage_provider_config:
|
|
storage_provider_config = "rootdirectory: /storage"
|
|
elif "rootdirectory:" not in storage_provider_config:
|
|
storage_provider_config = "rootdirectory: /storage" + "," + storage_provider_config
|
|
# generate storage configuration section in yaml format
|
|
storage_provider_conf_list = [storage_provider_name + ':']
|
|
for c in storage_provider_config.split(","):
|
|
kvs = c.split(": ")
|
|
if len(kvs) == 2:
|
|
if kvs[0].strip() == "keyfile":
|
|
srcKeyFile = kvs[1].strip()
|
|
if os.path.isfile(srcKeyFile):
|
|
shutil.copyfile(srcKeyFile, os.path.join(registry_config_dir, "gcs.key"))
|
|
storage_provider_conf_list.append("keyfile: %s" % "/etc/registry/gcs.key")
|
|
continue
|
|
storage_provider_conf_list.append(c.strip())
|
|
storage_provider_info = ('\n' + ' ' * 4).join(storage_provider_conf_list)
|
|
render(os.path.join(templates_dir, "registry", registry_config_file),
|
|
registry_conf,
|
|
uid=DEFAULT_UID,
|
|
gid=DEFAULT_GID,
|
|
storage_provider_info=storage_provider_info,
|
|
public_url=public_url,
|
|
core_url=core_url,
|
|
redis_host=redis_host,
|
|
redis_port=redis_port,
|
|
redis_password=redis_password,
|
|
redis_db_index_reg=redis_db_index_reg)
|
|
|
|
render(os.path.join(templates_dir, "db", "env"),
|
|
db_conf_env,
|
|
db_password=db_password)
|
|
|
|
render(os.path.join(templates_dir, "jobservice", "env"),
|
|
job_conf_env,
|
|
core_secret=core_secret,
|
|
jobservice_secret=jobservice_secret,
|
|
core_url=core_url)
|
|
|
|
render(os.path.join(templates_dir, "jobservice", "config.yml"),
|
|
jobservice_conf,
|
|
uid=DEFAULT_UID,
|
|
gid=DEFAULT_GID,
|
|
max_job_workers=max_job_workers,
|
|
redis_url=redis_url_js)
|
|
|
|
render(os.path.join(templates_dir, "log", "logrotate.conf"),
|
|
log_rotate_config,
|
|
uid=DEFAULT_UID,
|
|
gid=DEFAULT_GID,
|
|
log_rotate_count=log_rotate_count,
|
|
log_rotate_size=log_rotate_size)
|
|
|
|
render(os.path.join(templates_dir, "registryctl", "env"),
|
|
registryctl_conf_env,
|
|
jobservice_secret=jobservice_secret,
|
|
core_secret=core_secret)
|
|
|
|
shutil.copyfile(os.path.join(templates_dir, "core", "app.conf"), core_conf)
|
|
shutil.copyfile(os.path.join(templates_dir, "registryctl", "config.yml"), registryctl_conf_yml)
|
|
print("Generated configuration file: %s" % core_conf)
|
|
|
|
if auth_mode == "uaa_auth":
|
|
if os.path.isfile(uaa_ca_cert):
|
|
if not os.path.isdir(core_cert_dir):
|
|
os.makedirs(core_cert_dir)
|
|
core_uaa_ca = os.path.join(core_cert_dir, "uaa_ca.pem")
|
|
print("Copying UAA CA cert to %s" % core_uaa_ca)
|
|
shutil.copyfile(uaa_ca_cert, core_uaa_ca)
|
|
else:
|
|
print("Can not find UAA CA cert: %s, skip" % uaa_ca_cert)
|
|
|
|
|
|
def validate_crt_subj(dirty_subj):
|
|
subj_list = [item for item in dirty_subj.strip().split("/") \
|
|
if len(item.split("=")) == 2 and len(item.split("=")[1]) > 0]
|
|
return "/" + "/".join(subj_list)
|
|
|
|
FNULL = open(os.devnull, 'w')
|
|
|
|
from functools import wraps
|
|
def stat_decorator(func):
|
|
@wraps(func)
|
|
def check_wrapper(*args, **kw):
|
|
stat = func(*args, **kw)
|
|
message = "Generated certificate, key file: %s, cert file: %s" % (kw['key_path'], kw['cert_path']) \
|
|
if stat == 0 else "Fail to generate key file: %s, cert file: %s" % (kw['key_path'], kw['cert_path'])
|
|
print(message)
|
|
if stat != 0:
|
|
sys.exit(1)
|
|
return check_wrapper
|
|
|
|
@stat_decorator
|
|
def create_root_cert(subj, key_path="./k.key", cert_path="./cert.crt"):
|
|
rc = subprocess.call(["openssl", "genrsa", "-out", key_path, "4096"], stdout=FNULL, stderr=subprocess.STDOUT)
|
|
if rc != 0:
|
|
return rc
|
|
return subprocess.call(["openssl", "req", "-new", "-x509", "-key", key_path,\
|
|
"-out", cert_path, "-days", "3650", "-subj", subj], stdout=FNULL, stderr=subprocess.STDOUT)
|
|
|
|
@stat_decorator
|
|
def create_cert(subj, ca_key, ca_cert, key_path="./k.key", cert_path="./cert.crt"):
|
|
cert_dir = os.path.dirname(cert_path)
|
|
csr_path = os.path.join(cert_dir, "tmp.csr")
|
|
rc = subprocess.call(["openssl", "req", "-newkey", "rsa:4096", "-nodes","-sha256","-keyout", key_path,\
|
|
"-out", csr_path, "-subj", subj], stdout=FNULL, stderr=subprocess.STDOUT)
|
|
if rc != 0:
|
|
return rc
|
|
return subprocess.call(["openssl", "x509", "-req", "-days", "3650", "-in", csr_path, "-CA", \
|
|
ca_cert, "-CAkey", ca_key, "-CAcreateserial", "-out", cert_path], stdout=FNULL, stderr=subprocess.STDOUT)
|
|
|
|
def openssl_installed():
|
|
shell_stat = subprocess.check_call(["which", "openssl"], stdout=FNULL, stderr=subprocess.STDOUT)
|
|
if shell_stat != 0:
|
|
print("Cannot find openssl installed in this computer\nUse default SSL certificate file")
|
|
return False
|
|
return True
|
|
|
|
|
|
if customize_crt == 'on' and openssl_installed():
|
|
shell_stat = subprocess.check_call(["which", "openssl"], stdout=FNULL, stderr=subprocess.STDOUT)
|
|
empty_subj = "/"
|
|
private_key_pem = os.path.join(config_dir, "core", "private_key.pem")
|
|
root_crt = os.path.join(config_dir, "registry", "root.crt")
|
|
create_root_cert(empty_subj, key_path=private_key_pem, cert_path=root_crt)
|
|
mark_file(private_key_pem)
|
|
mark_file(root_crt)
|
|
else:
|
|
print("Copied configuration file: %s" % core_config_dir + "private_key.pem")
|
|
shutil.copyfile(os.path.join(templates_dir, "core", "private_key.pem"), os.path.join(core_config_dir, "private_key.pem"))
|
|
print("Copied configuration file: %s" % registry_config_dir + "root.crt")
|
|
shutil.copyfile(os.path.join(templates_dir, "registry", "root.crt"), os.path.join(registry_config_dir, "root.crt"))
|
|
|
|
if len(registry_custom_ca_bundle_path) > 0 and os.path.isfile(registry_custom_ca_bundle_path):
|
|
shutil.copyfile(registry_custom_ca_bundle_path, os.path.join(config_dir, "custom-ca-bundle.crt"))
|
|
print("Copied custom ca bundle: %s" % os.path.join(config_dir, "custom-ca-bundle.crt"))
|
|
|
|
if args.notary_mode:
|
|
notary_config_dir = prep_conf_dir(config_dir, "notary")
|
|
notary_temp_dir = os.path.join(templates_dir, "notary")
|
|
print("Copying sql file for notary DB")
|
|
# if os.path.exists(os.path.join(notary_config_dir, "postgresql-initdb.d")):
|
|
# shutil.rmtree(os.path.join(notary_config_dir, "postgresql-initdb.d"))
|
|
# shutil.copytree(os.path.join(notary_temp_dir, "postgresql-initdb.d"), os.path.join(notary_config_dir, "postgresql-initdb.d"))
|
|
if customize_crt == 'on' and openssl_installed():
|
|
try:
|
|
temp_cert_dir = os.path.join(base_dir, "cert_tmp")
|
|
if not os.path.exists(temp_cert_dir):
|
|
os.makedirs(temp_cert_dir)
|
|
ca_subj = "/C=US/ST=California/L=Palo Alto/O=GoHarbor/OU=Harbor/CN=Self-signed by GoHarbor"
|
|
cert_subj = "/C=US/ST=California/L=Palo Alto/O=GoHarbor/OU=Harbor/CN=notarysigner"
|
|
signer_ca_cert = os.path.join(temp_cert_dir, "notary-signer-ca.crt")
|
|
signer_ca_key = os.path.join(temp_cert_dir, "notary-signer-ca.key")
|
|
signer_cert_path = os.path.join(temp_cert_dir, "notary-signer.crt")
|
|
signer_key_path = os.path.join(temp_cert_dir, "notary-signer.key")
|
|
create_root_cert(ca_subj, key_path=signer_ca_key, cert_path=signer_ca_cert)
|
|
create_cert(cert_subj, signer_ca_key, signer_ca_cert, key_path=signer_key_path, cert_path=signer_cert_path)
|
|
print("Copying certs for notary signer")
|
|
shutil.copy2(signer_cert_path, notary_config_dir)
|
|
shutil.copy2(signer_key_path, notary_config_dir)
|
|
shutil.copy2(signer_ca_cert, notary_config_dir)
|
|
finally:
|
|
srl_tmp = os.path.join(os.getcwd(), ".srl")
|
|
if os.path.isfile(srl_tmp):
|
|
os.remove(srl_tmp)
|
|
if os.path.isdir(temp_cert_dir):
|
|
shutil.rmtree(temp_cert_dir, True)
|
|
else:
|
|
print("Copying certs for notary signer")
|
|
shutil.copy2(os.path.join(notary_temp_dir, "notary-signer.crt"), notary_config_dir)
|
|
shutil.copy2(os.path.join(notary_temp_dir, "notary-signer.key"), notary_config_dir)
|
|
shutil.copy2(os.path.join(notary_temp_dir, "notary-signer-ca.crt"), notary_config_dir)
|
|
shutil.copy2(os.path.join(registry_config_dir, "root.crt"), notary_config_dir)
|
|
mark_file(os.path.join(notary_config_dir, "notary-signer.crt"))
|
|
mark_file(os.path.join(notary_config_dir, "notary-signer.key"))
|
|
mark_file(os.path.join(notary_config_dir, "notary-signer-ca.crt"))
|
|
mark_file(os.path.join(notary_config_dir, "root.crt"))
|
|
print("Copying notary signer configuration file")
|
|
render(os.path.join(notary_temp_dir, "signer-config.postgres.json"),
|
|
os.path.join(notary_config_dir, "signer-config.postgres.json"),
|
|
uid=DEFAULT_UID,
|
|
gid=DEFAULT_GID
|
|
)
|
|
|
|
render(os.path.join(notary_temp_dir, "server-config.postgres.json"),
|
|
os.path.join(notary_config_dir, "server-config.postgres.json"),
|
|
uid=DEFAULT_UID,
|
|
gid=DEFAULT_GID,
|
|
token_endpoint=public_url)
|
|
print("Copying nginx configuration file for notary")
|
|
shutil.copy2(os.path.join(templates_dir, "nginx", "notary.upstream.conf"), nginx_conf_d)
|
|
render(os.path.join(templates_dir, "nginx", "notary.server.conf"),
|
|
os.path.join(nginx_conf_d, "notary.server.conf"),
|
|
ssl_cert = os.path.join("/etc/nginx/cert", os.path.basename(target_cert_path)),
|
|
ssl_cert_key = os.path.join("/etc/nginx/cert", os.path.basename(target_cert_key_path)))
|
|
|
|
default_alias = get_alias(secretkey_path)
|
|
render(os.path.join(notary_temp_dir, "signer_env"), os.path.join(notary_config_dir, "signer_env"), alias = default_alias)
|
|
shutil.copy2(os.path.join(notary_temp_dir, "server_env"), notary_config_dir)
|
|
|
|
if args.clair_mode:
|
|
clair_temp_dir = os.path.join(templates_dir, "clair")
|
|
clair_config_dir = prep_conf_dir(config_dir, "clair")
|
|
if os.path.exists(os.path.join(clair_config_dir, "postgresql-init.d")):
|
|
print("Copying offline data file for clair DB")
|
|
shutil.rmtree(os.path.join(clair_config_dir, "postgresql-init.d"))
|
|
shutil.copytree(os.path.join(clair_temp_dir, "postgresql-init.d"), os.path.join(clair_config_dir, "postgresql-init.d"))
|
|
postgres_env = os.path.join(clair_config_dir, "postgres_env")
|
|
render(os.path.join(clair_temp_dir, "postgres_env"), postgres_env, password = clair_db_password)
|
|
clair_conf = os.path.join(clair_config_dir, "config.yaml")
|
|
render(os.path.join(clair_temp_dir, "config.yaml"),
|
|
clair_conf,
|
|
uid=DEFAULT_UID,
|
|
gid=DEFAULT_GID,
|
|
password = clair_db_password,
|
|
username = clair_db_username,
|
|
host = clair_db_host,
|
|
port = clair_db_port,
|
|
dbname = clair_db,
|
|
interval = clair_updaters_interval)
|
|
|
|
# config http proxy for Clair
|
|
http_proxy = rcp.get("configuration", "http_proxy").strip()
|
|
https_proxy = rcp.get("configuration", "https_proxy").strip()
|
|
no_proxy = rcp.get("configuration", "no_proxy").strip()
|
|
clair_env = os.path.join(clair_config_dir, "clair_env")
|
|
render(os.path.join(clair_temp_dir, "clair_env"), clair_env,
|
|
http_proxy = http_proxy,
|
|
https_proxy = https_proxy,
|
|
no_proxy = no_proxy)
|
|
|
|
# config chart repository
|
|
if args.chart_mode:
|
|
chartm_temp_dir = os.path.join(templates_dir, "chartserver")
|
|
chartm_config_dir = os.path.join(config_dir, "chartserver")
|
|
chartm_env = os.path.join(config_dir, "chartserver", "env")
|
|
|
|
if not os.path.isdir(chartm_config_dir):
|
|
print ("Create config folder: %s" % chartm_config_dir)
|
|
os.makedirs(chartm_config_dir)
|
|
|
|
# process redis info
|
|
cache_store = "redis"
|
|
cache_redis_password = redis_password
|
|
cache_redis_addr = redis_host+":"+redis_port
|
|
cache_redis_db_index = redis_db_index_chart
|
|
|
|
# process storage info
|
|
#default using local file system
|
|
storage_driver = "local"
|
|
# storage provider configurations
|
|
# please be aware that, we do not check the validations of the values for the specified keys
|
|
# convert the configs to config map
|
|
storage_provider_configs = storage_provider_config.split(",")
|
|
storgae_provider_confg_map = {}
|
|
storage_provider_config_options = []
|
|
|
|
for k_v in storage_provider_configs:
|
|
if len(k_v) > 0:
|
|
kvs = k_v.split(": ") # add space suffix to avoid existing ":" in the value
|
|
if len(kvs) == 2:
|
|
#key must not be empty
|
|
if kvs[0].strip() != "":
|
|
storgae_provider_confg_map[kvs[0].strip()] = kvs[1].strip()
|
|
|
|
if storage_provider_name == "s3":
|
|
# aws s3 storage
|
|
storage_driver = "amazon"
|
|
storage_provider_config_options.append("STORAGE_AMAZON_BUCKET=%s" % storgae_provider_confg_map.get("bucket", ""))
|
|
storage_provider_config_options.append("STORAGE_AMAZON_PREFIX=%s" % storgae_provider_confg_map.get("rootdirectory", ""))
|
|
storage_provider_config_options.append("STORAGE_AMAZON_REGION=%s" % storgae_provider_confg_map.get("region", ""))
|
|
storage_provider_config_options.append("STORAGE_AMAZON_ENDPOINT=%s" % storgae_provider_confg_map.get("regionendpoint", ""))
|
|
storage_provider_config_options.append("AWS_ACCESS_KEY_ID=%s" % storgae_provider_confg_map.get("accesskey", ""))
|
|
storage_provider_config_options.append("AWS_SECRET_ACCESS_KEY=%s" % storgae_provider_confg_map.get("secretkey", ""))
|
|
elif storage_provider_name == "gcs":
|
|
# google cloud storage
|
|
storage_driver = "google"
|
|
storage_provider_config_options.append("STORAGE_GOOGLE_BUCKET=%s" % storgae_provider_confg_map.get("bucket", ""))
|
|
storage_provider_config_options.append("STORAGE_GOOGLE_PREFIX=%s" % storgae_provider_confg_map.get("rootdirectory", ""))
|
|
|
|
keyFileOnHost = storgae_provider_confg_map.get("keyfile", "")
|
|
if os.path.isfile(keyFileOnHost):
|
|
shutil.copyfile(keyFileOnHost, os.path.join(chartm_config_dir, "gcs.key"))
|
|
targetKeyFile = "/etc/chartserver/gcs.key"
|
|
storage_provider_config_options.append("GOOGLE_APPLICATION_CREDENTIALS=%s" % targetKeyFile)
|
|
elif storage_provider_name == "azure":
|
|
# azure storage
|
|
storage_driver = "microsoft"
|
|
storage_provider_config_options.append("STORAGE_MICROSOFT_CONTAINER=%s" % storgae_provider_confg_map.get("container", ""))
|
|
storage_provider_config_options.append("AZURE_STORAGE_ACCOUNT=%s" % storgae_provider_confg_map.get("accountname", ""))
|
|
storage_provider_config_options.append("AZURE_STORAGE_ACCESS_KEY=%s" % storgae_provider_confg_map.get("accountkey", ""))
|
|
storage_provider_config_options.append("STORAGE_MICROSOFT_PREFIX=/azure/harbor/charts")
|
|
elif storage_provider_name == "swift":
|
|
# open stack swift
|
|
storage_driver = "openstack"
|
|
storage_provider_config_options.append("STORAGE_OPENSTACK_CONTAINER=%s" % storgae_provider_confg_map.get("container", ""))
|
|
storage_provider_config_options.append("STORAGE_OPENSTACK_PREFIX=%s" % storgae_provider_confg_map.get("rootdirectory", ""))
|
|
storage_provider_config_options.append("STORAGE_OPENSTACK_REGION=%s" % storgae_provider_confg_map.get("region", ""))
|
|
storage_provider_config_options.append("OS_AUTH_URL=%s" % storgae_provider_confg_map.get("authurl", ""))
|
|
storage_provider_config_options.append("OS_USERNAME=%s" % storgae_provider_confg_map.get("username", ""))
|
|
storage_provider_config_options.append("OS_PASSWORD=%s" % storgae_provider_confg_map.get("password", ""))
|
|
storage_provider_config_options.append("OS_PROJECT_ID=%s" % storgae_provider_confg_map.get("tenantid", ""))
|
|
storage_provider_config_options.append("OS_PROJECT_NAME=%s" % storgae_provider_confg_map.get("tenant", ""))
|
|
storage_provider_config_options.append("OS_DOMAIN_ID=%s" % storgae_provider_confg_map.get("domainid", ""))
|
|
storage_provider_config_options.append("OS_DOMAIN_NAME=%s" % storgae_provider_confg_map.get("domain", ""))
|
|
elif storage_provider_name == "oss":
|
|
# aliyun OSS
|
|
storage_driver = "alibaba"
|
|
storage_provider_config_options.append("STORAGE_ALIBABA_BUCKET=%s" % storgae_provider_confg_map.get("bucket", ""))
|
|
storage_provider_config_options.append("STORAGE_ALIBABA_PREFIX=%s" % storgae_provider_confg_map.get("rootdirectory", ""))
|
|
storage_provider_config_options.append("STORAGE_ALIBABA_ENDPOINT=%s" % storgae_provider_confg_map.get("endpoint", ""))
|
|
storage_provider_config_options.append("ALIBABA_CLOUD_ACCESS_KEY_ID=%s" % storgae_provider_confg_map.get("accesskeyid", ""))
|
|
storage_provider_config_options.append("ALIBABA_CLOUD_ACCESS_KEY_SECRET=%s" % storgae_provider_confg_map.get("accesskeysecret", ""))
|
|
else:
|
|
# use local file system
|
|
storage_provider_config_options.append("STORAGE_LOCAL_ROOTDIR=/chart_storage")
|
|
|
|
# generate storage provider configuration
|
|
all_storage_provider_configs = ('\n').join(storage_provider_config_options)
|
|
|
|
render(os.path.join(chartm_temp_dir, "env"),
|
|
chartm_env,
|
|
cache_store=cache_store,
|
|
cache_redis_addr=cache_redis_addr,
|
|
cache_redis_password=cache_redis_password,
|
|
cache_redis_db_index=cache_redis_db_index,
|
|
core_secret=core_secret,
|
|
storage_driver=storage_driver,
|
|
all_storage_driver_configs=all_storage_provider_configs)
|
|
|
|
|
|
FNULL.close()
|
|
print("The configuration files are ready, please use docker-compose to start the service.")
|