harbor/make/photon/prepare/utils/chart.py

107 lines
6.3 KiB
Python
Raw Normal View History

import os, shutil
from g import templates_dir, config_dir
from .jinja import render_jinja
chartm_temp_dir = os.path.join(templates_dir, "chartserver")
chartm_env_temp = os.path.join(chartm_temp_dir, "env.jinja")
chartm_config_dir = os.path.join(config_dir, "chartserver")
chartm_env = os.path.join(config_dir, "chartserver", "env")
def prepare_chartmuseum(config_dict):
core_secret = config_dict['core_secret']
redis_host = config_dict['redis_host']
redis_port = config_dict['redis_port']
redis_password = config_dict['redis_password']
redis_db_index_chart = config_dict['redis_db_index_chart']
storage_provider_name = config_dict['storage_provider_name']
storage_provider_config_map = config_dict['storage_provider_config']
if not os.path.isdir(chartm_config_dir):
print ("Create config folder: %s" % chartm_config_dir)
os.makedirs(chartm_config_dir)
# process redis info
cache_store = "redis"
cache_redis_password = redis_password
cache_redis_addr = "{}:{}".format(redis_host, redis_port)
cache_redis_db_index = redis_db_index_chart
# process storage info
#default using local file system
storage_driver = "local"
# storage provider configurations
# please be aware that, we do not check the validations of the values for the specified keys
# convert the configs to config map
storage_provider_config_options = []
if storage_provider_name == 's3':
# aws s3 storage
storage_driver = "amazon"
storage_provider_config_options.append("STORAGE_AMAZON_BUCKET=%s" % (storage_provider_config_map.get("bucket") or '') )
storage_provider_config_options.append("STORAGE_AMAZON_PREFIX=%s" % (storage_provider_config_map.get("rootdirectory") or '') )
storage_provider_config_options.append("STORAGE_AMAZON_REGION=%s" % (storage_provider_config_map.get("region") or '') )
storage_provider_config_options.append("STORAGE_AMAZON_ENDPOINT=%s" % (storage_provider_config_map.get("regionendpoint") or '') )
storage_provider_config_options.append("AWS_ACCESS_KEY_ID=%s" % (storage_provider_config_map.get("accesskey") or '') )
storage_provider_config_options.append("AWS_SECRET_ACCESS_KEY=%s" % (storage_provider_config_map.get("secretkey") or '') )
elif storage_provider_name == 'gcs':
# google cloud storage
storage_driver = "google"
storage_provider_config_options.append("STORAGE_GOOGLE_BUCKET=%s" % ( storage_provider_config_map.get("bucket") or '') )
storage_provider_config_options.append("STORAGE_GOOGLE_PREFIX=%s" % ( storage_provider_config_map.get("rootdirectory") or '') )
if storage_provider_config_map.get("keyfile"):
storage_provider_config_options.append('GOOGLE_APPLICATION_CREDENTIALS=%s' % '/etc/chartserver/gcs.key')
elif storage_provider_name == 'azure':
# azure storage
storage_driver = "microsoft"
storage_provider_config_options.append("STORAGE_MICROSOFT_CONTAINER=%s" % ( storage_provider_config_map.get("container") or '') )
storage_provider_config_options.append("AZURE_STORAGE_ACCOUNT=%s" % ( storage_provider_config_map.get("accountname") or '') )
storage_provider_config_options.append("AZURE_STORAGE_ACCESS_KEY=%s" % ( storage_provider_config_map.get("accountkey") or '') )
storage_provider_config_options.append("STORAGE_MICROSOFT_PREFIX=/azure/harbor/charts")
elif storage_provider_name == 'swift':
# open stack swift
storage_driver = "openstack"
storage_provider_config_options.append("STORAGE_OPENSTACK_CONTAINER=%s" % ( storage_provider_config_map.get("container") or '') )
storage_provider_config_options.append("STORAGE_OPENSTACK_PREFIX=%s" % ( storage_provider_config_map.get("rootdirectory") or '') )
storage_provider_config_options.append("STORAGE_OPENSTACK_REGION=%s" % ( storage_provider_config_map.get("region") or '') )
storage_provider_config_options.append("OS_AUTH_URL=%s" % ( storage_provider_config_map.get("authurl") or '') )
storage_provider_config_options.append("OS_USERNAME=%s" % ( storage_provider_config_map.get("username") or '') )
storage_provider_config_options.append("OS_PASSWORD=%s" % ( storage_provider_config_map.get("password") or '') )
storage_provider_config_options.append("OS_PROJECT_ID=%s" % ( storage_provider_config_map.get("tenantid") or '') )
storage_provider_config_options.append("OS_PROJECT_NAME=%s" % ( storage_provider_config_map.get("tenant") or '') )
storage_provider_config_options.append("OS_DOMAIN_ID=%s" % ( storage_provider_config_map.get("domainid") or '') )
storage_provider_config_options.append("OS_DOMAIN_NAME=%s" % ( storage_provider_config_map.get("domain") or '') )
elif storage_provider_name == 'oss':
# aliyun OSS
storage_driver = "alibaba"
bucket = storage_provider_config_map.get("bucket") or ''
endpoint = storage_provider_config_map.get("endpoint") or ''
if endpoint.startswith(bucket + "."):
endpoint = endpoint.replace(bucket + ".", "")
storage_provider_config_options.append("STORAGE_ALIBABA_BUCKET=%s" % bucket )
storage_provider_config_options.append("STORAGE_ALIBABA_ENDPOINT=%s" % endpoint )
storage_provider_config_options.append("STORAGE_ALIBABA_PREFIX=%s" % ( storage_provider_config_map.get("rootdirectory") or '') )
storage_provider_config_options.append("ALIBABA_CLOUD_ACCESS_KEY_ID=%s" % ( storage_provider_config_map.get("accesskeyid") or '') )
storage_provider_config_options.append("ALIBABA_CLOUD_ACCESS_KEY_SECRET=%s" % ( storage_provider_config_map.get("accesskeysecret") or '') )
else:
# use local file system
storage_provider_config_options.append("STORAGE_LOCAL_ROOTDIR=/chart_storage")
# generate storage provider configuration
all_storage_provider_configs = ('\n').join(storage_provider_config_options)
render_jinja(
chartm_env_temp,
chartm_env,
cache_store=cache_store,
cache_redis_addr=cache_redis_addr,
cache_redis_password=cache_redis_password,
cache_redis_db_index=cache_redis_db_index,
core_secret=config_dict['core_secret'],
storage_driver=storage_driver,
all_storage_driver_configs=all_storage_provider_configs,
public_url=config_dict['public_url'],
chart_absolute_url=config_dict['chart_absolute_url'])