mirror of
https://github.com/goharbor/harbor.git
synced 2024-11-21 17:55:30 +01:00
Remove migrator container
1. Python 2.7 is EOL 2. Migration script from 1.9 already to prepare migrate Signed-off-by: DQ <dengq@vmware.com>
This commit is contained in:
parent
f6c0608e22
commit
e786add88c
@ -1,34 +0,0 @@
|
||||
FROM photon:2.0
|
||||
|
||||
ENV PGDATA /var/lib/postgresql/data
|
||||
|
||||
## have both mysql and pgsql installed.
|
||||
RUN tdnf distro-sync -y \
|
||||
&& tdnf remove -y toybox \
|
||||
&& tdnf install -y sed shadow procps-ng gawk gzip sudo net-tools glibc-i18n >> /dev/null\
|
||||
&& tdnf install -y python2 python2-devel python-pip gcc PyYAML python-jinja2 \
|
||||
linux-api-headers glibc-devel binutils zlib-devel openssl-devel postgresql python-psycopg2 >> /dev/null \
|
||||
&& pip install alembic \
|
||||
&& mkdir /docker-entrypoint-initdb.d /docker-entrypoint-updatedb.d \
|
||||
&& chmod 777 /docker-entrypoint-initdb.d /docker-entrypoint-updatedb.d \
|
||||
&& mkdir -p /harbor-migration \
|
||||
&& touch /etc/localtime.bak \
|
||||
&& groupadd -r postgres --gid=999 \
|
||||
&& useradd -r -g postgres --uid=999 postgres \
|
||||
&& mkdir -p /run/postgresql \
|
||||
&& chown -R postgres:postgres /run/postgresql \
|
||||
&& chmod 2777 /run/postgresql \
|
||||
&& mkdir -p "$PGDATA" && chown -R postgres:postgres "$PGDATA" && chmod 777 "$PGDATA" \
|
||||
&& sed -i "s|#listen_addresses = 'localhost'.*|listen_addresses = '*'|g" /usr/share/postgresql/postgresql.conf.sample \
|
||||
&& sed -i "s|#unix_socket_directories = '/tmp'.*|unix_socket_directories = '/run/postgresql'|g" /usr/share/postgresql/postgresql.conf.sample \
|
||||
&& touch /usr/share/locale/locale.alias \
|
||||
&& locale-gen.sh en_US.UTF-8 \
|
||||
&& tdnf clean all
|
||||
|
||||
VOLUME /var/lib/postgresql/data
|
||||
|
||||
WORKDIR /harbor-migration
|
||||
|
||||
COPY ./ ./
|
||||
|
||||
ENTRYPOINT ["./docker-entrypoint.sh"]
|
@ -1,21 +0,0 @@
|
||||
from __future__ import print_function
|
||||
import utils
|
||||
import os
|
||||
import yaml
|
||||
from jinja2 import Environment, FileSystemLoader, StrictUndefined
|
||||
|
||||
acceptable_versions = ['1.9.0']
|
||||
|
||||
def migrate(input_cfg, output_cfg):
|
||||
config_dict = utils.read_conf(input_cfg)
|
||||
|
||||
current_dir = os.path.dirname(__file__)
|
||||
tpl = Environment(
|
||||
loader=FileSystemLoader(current_dir),
|
||||
undefined=StrictUndefined,
|
||||
trim_blocks=True,
|
||||
lstrip_blocks=True
|
||||
).get_template('harbor.yml.jinja')
|
||||
|
||||
with open(output_cfg, 'w') as f:
|
||||
f.write(tpl.render(**config_dict))
|
@ -1,342 +0,0 @@
|
||||
# Configuration file of Harbor
|
||||
|
||||
# The IP address or hostname to access admin UI and registry service.
|
||||
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
|
||||
hostname: {{ hostname }}
|
||||
|
||||
# http related config
|
||||
{% if http is defined %}
|
||||
http:
|
||||
# port for http, default is 80. If https enabled, this port will redirect to https port
|
||||
port: {{ http.port }}
|
||||
{% else %}
|
||||
# http:
|
||||
# # port for http, default is 80. If https enabled, this port will redirect to https port
|
||||
# port: 80
|
||||
{% endif %}
|
||||
|
||||
{% if https is defined %}
|
||||
# https related config
|
||||
https:
|
||||
# https port for harbor, default is 443
|
||||
port: {{ https.port }}
|
||||
# The path of cert and key files for nginx
|
||||
certificate: {{ https.certificate }}
|
||||
private_key: {{ https.private_key }}
|
||||
{% else %}
|
||||
# https related config
|
||||
# https:
|
||||
# # https port for harbor, default is 443
|
||||
# port: 443
|
||||
# # The path of cert and key files for nginx
|
||||
# certificate: /your/certificate/path
|
||||
# private_key: /your/private/key/path
|
||||
{% endif %}
|
||||
|
||||
# Uncomment external_url if you want to enable external proxy
|
||||
# And when it enabled the hostname will no longer used
|
||||
{% if external_url is defined %}
|
||||
external_url: {{ external_url }}
|
||||
{% else %}
|
||||
# external_url: https://reg.mydomain.com:8433
|
||||
{% endif %}
|
||||
|
||||
# The initial password of Harbor admin
|
||||
# It only works in first time to install harbor
|
||||
# Remember Change the admin password from UI after launching Harbor.
|
||||
{% if harbor_admin_password is defined %}
|
||||
harbor_admin_password: {{ harbor_admin_password }}
|
||||
{% else %}
|
||||
harbor_admin_password: Harbor12345
|
||||
{% endif %}
|
||||
|
||||
# Harbor DB configuration
|
||||
database:
|
||||
{% if database is defined %}
|
||||
# The password for the root user of Harbor DB. Change this before any production use.
|
||||
password: {{ database.password}}
|
||||
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
||||
max_idle_conns: {{ database.max_idle_conns or 50}}
|
||||
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
|
||||
# Note: the default number of connections is 100 for postgres.
|
||||
max_open_conns: {{ database.max_open_conns or 100}}
|
||||
{% else %}
|
||||
# The password for the root user of Harbor DB. Change this before any production use.
|
||||
password: root123
|
||||
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
||||
max_idle_conns: 50
|
||||
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
|
||||
# Note: the default number of connections is 100 for postgres.
|
||||
max_open_conns: 100
|
||||
{% endif %}
|
||||
|
||||
{% if data_volume is defined %}
|
||||
# The default data volume
|
||||
data_volume: {{ data_volume }}
|
||||
{% else %}
|
||||
# The default data volume
|
||||
data_volume: /data
|
||||
{% endif %}
|
||||
|
||||
# Harbor Storage settings by default is using /data dir on local filesystem
|
||||
# Uncomment storage_service setting If you want to using external storage
|
||||
{% if storage_service is defined %}
|
||||
storage_service:
|
||||
{% for key, value in storage_service.items() %}
|
||||
{% if key == 'ca_bundle' %}
|
||||
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||
ca_bundle: {{ value if value is not none else '' }}
|
||||
{% elif key == 'redirect' %}
|
||||
# # set disable to true when you want to disable registry redirect
|
||||
redirect:
|
||||
disabled: {{ value.disabled }}
|
||||
{% else %}
|
||||
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
|
||||
# # for more info about this configuration please refer https://docs.docker.com/registry/configuration/
|
||||
{{ key }}:
|
||||
{% for k, v in value.items() %}
|
||||
{{ k }}: {{ v if v is not none else '' }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
# Harbor Storage settings by default is using /data dir on local filesystem
|
||||
# Uncomment storage_service setting If you want to using external storage
|
||||
# storage_service:
|
||||
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||
# ca_bundle:
|
||||
|
||||
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
|
||||
# # for more info about this configuration please refer https://docs.docker.com/registry/configuration/
|
||||
# filesystem:
|
||||
# maxthreads: 100
|
||||
# # set disable to true when you want to disable registry redirect
|
||||
# redirect:
|
||||
# disabled: false
|
||||
{% endif %}
|
||||
|
||||
# Clair configuration
|
||||
clair:
|
||||
# The interval of clair updaters, the unit is hour, set to 0 to disable the updaters.
|
||||
{% if clair is defined %}
|
||||
updaters_interval: {{ clair.updaters_interval }}
|
||||
{% else %}
|
||||
updaters_interval: 12
|
||||
{% endif %}
|
||||
|
||||
jobservice:
|
||||
# Maximum number of job workers in job service
|
||||
{% if jobservice is defined %}
|
||||
max_job_workers: {{ jobservice.max_job_workers }}
|
||||
{% else %}
|
||||
max_job_workers: 10
|
||||
{% endif %}
|
||||
|
||||
notification:
|
||||
# Maximum retry count for webhook job
|
||||
{% if notification is defined %}
|
||||
webhook_job_max_retry: {{ notification.webhook_job_max_retry}}
|
||||
{% else %}
|
||||
webhook_job_max_retry: 10
|
||||
{% endif %}
|
||||
|
||||
{% if chart is defined %}
|
||||
chart:
|
||||
# Change the value of absolute_url to enabled can enable absolute url in chart
|
||||
absolute_url: {{ chart.absolute_url if chart.absolute_url == 'enabled' else 'disabled' }}
|
||||
{% else %}
|
||||
chart:
|
||||
# Change the value of absolute_url to enabled can enable absolute url in chart
|
||||
absolute_url: disabled
|
||||
{% endif %}
|
||||
|
||||
# Log configurations
|
||||
log:
|
||||
# options are debug, info, warning, error, fatal
|
||||
{% if log is defined %}
|
||||
level: {{ log.level }}
|
||||
# configs for logs in local storage
|
||||
local:
|
||||
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
rotate_count: {{ log.local.rotate_count }}
|
||||
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||
# are all valid.
|
||||
rotate_size: {{ log.local.rotate_size }}
|
||||
# The directory on your host that store log
|
||||
location: {{ log.local.location }}
|
||||
{% if log.external_endpoint is defined%}
|
||||
external_endpoint:
|
||||
# protocol used to transmit log to external endpoint, options is tcp or udp
|
||||
protocol: {{ log.external_endpoint.protocol }}
|
||||
# The host of external endpoint
|
||||
host: {{ log.external_endpoint.host }}
|
||||
# Port of external endpoint
|
||||
port: {{ log.external_endpoint.port }}
|
||||
{% else %}
|
||||
# Uncomment following lines to enable external syslog endpoint.
|
||||
# external_endpoint:
|
||||
# # protocol used to transmit log to external endpoint, options is tcp or udp
|
||||
# protocol: tcp
|
||||
# # The host of external endpoint
|
||||
# host: localhost
|
||||
# # Port of external endpoint
|
||||
# port: 5140
|
||||
{% endif %}
|
||||
{% else %}
|
||||
level: info
|
||||
# configs for logs in local storage
|
||||
local:
|
||||
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
rotate_count: 50
|
||||
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||
# are all valid.
|
||||
rotate_size: 200M
|
||||
# The directory on your host that store log
|
||||
location: /var/log/harbor
|
||||
|
||||
# Uncomment following lines to enable external syslog endpoint.
|
||||
# external_endpoint:
|
||||
# # protocol used to transmit log to external endpoint, options is tcp or udp
|
||||
# protocol: tcp
|
||||
# # The host of external endpoint
|
||||
# host: localhost
|
||||
# # Port of external endpoint
|
||||
# port: 5140
|
||||
{% endif %}
|
||||
|
||||
|
||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||
_version: 1.10.0
|
||||
{% if external_database is defined %}
|
||||
# Uncomment external_database if using external database.
|
||||
external_database:
|
||||
harbor:
|
||||
host: {{ external_database.harbor.host }}
|
||||
port: {{ external_database.harbor.port }}
|
||||
db_name: {{ external_database.harbor.db_name }}
|
||||
username: {{ external_database.harbor.username }}
|
||||
password: {{ external_database.harbor.password }}
|
||||
ssl_mode: {{ external_database.harbor.ssl_mode }}
|
||||
max_idle_conns: {{ external_database.harbor.max_idle_conns }}
|
||||
max_open_conns: {{ external_database.harbor.max_open_conns }}
|
||||
clair:
|
||||
host: {{ external_database.clair.host }}
|
||||
port: {{ external_database.clair.port }}
|
||||
db_name: {{ external_database.clair.db_name }}
|
||||
username: {{ external_database.clair.username }}
|
||||
password: {{ external_database.clair.password }}
|
||||
ssl_mode: {{ external_database.clair.ssl_mode }}
|
||||
notary_signer:
|
||||
host: {{ external_database.notary_signer.host }}
|
||||
port: {{ external_database.notary_signer.port }}
|
||||
db_name: {{external_database.notary_signer.db_name }}
|
||||
username: {{ external_database.notary_signer.username }}
|
||||
password: {{ external_database.notary_signer.password }}
|
||||
ssl_mode: {{ external_database.notary_signer.ssl_mode }}
|
||||
notary_server:
|
||||
host: {{ external_database.notary_server.host }}
|
||||
port: {{ external_database.notary_server.port }}
|
||||
db_name: {{ external_database.notary_server.db_name }}
|
||||
username: {{ external_database.notary_server.username }}
|
||||
password: {{ external_database.notary_server.password }}
|
||||
ssl_mode: {{ external_database.notary_server.ssl_mode }}
|
||||
{% else %}
|
||||
# Uncomment external_database if using external database.
|
||||
# external_database:
|
||||
# harbor:
|
||||
# host: harbor_db_host
|
||||
# port: harbor_db_port
|
||||
# db_name: harbor_db_name
|
||||
# username: harbor_db_username
|
||||
# password: harbor_db_password
|
||||
# ssl_mode: disable
|
||||
# clair:
|
||||
# host: clair_db_host
|
||||
# port: clair_db_port
|
||||
# db_name: clair_db_name
|
||||
# username: clair_db_username
|
||||
# password: clair_db_password
|
||||
# ssl_mode: disable
|
||||
# notary_signer:
|
||||
# host: notary_signer_db_host
|
||||
# port: notary_signer_db_port
|
||||
# db_name: notary_signer_db_name
|
||||
# username: notary_signer_db_username
|
||||
# password: notary_signer_db_password
|
||||
# ssl_mode: disable
|
||||
# notary_server:
|
||||
# host: notary_server_db_host
|
||||
# port: notary_server_db_port
|
||||
# db_name: notary_server_db_name
|
||||
# username: notary_server_db_username
|
||||
# password: notary_server_db_password
|
||||
# ssl_mode: disable
|
||||
{% endif %}
|
||||
|
||||
{% if external_redis is defined %}
|
||||
external_redis:
|
||||
host: {{ external_redis.host }}
|
||||
port: {{ external_redis.port }}
|
||||
password: {{ external_redis.password }}
|
||||
# db_index 0 is for core, it's unchangeable
|
||||
registry_db_index: {{ external_redis.registry_db_index }}
|
||||
jobservice_db_index: {{ external_redis.jobservice_db_index }}
|
||||
chartmuseum_db_index: {{ external_redis.chartmuseum_db_index }}
|
||||
clair_db_index: 4
|
||||
{% else %}
|
||||
# Umcomments external_redis if using external Redis server
|
||||
# external_redis:
|
||||
# host: redis
|
||||
# port: 6379
|
||||
# password:
|
||||
# # db_index 0 is for core, it's unchangeable
|
||||
# registry_db_index: 1
|
||||
# jobservice_db_index: 2
|
||||
# chartmuseum_db_index: 3
|
||||
# clair_db_index: 4
|
||||
{% endif %}
|
||||
|
||||
{% if uaa is defined %}
|
||||
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
|
||||
uaa:
|
||||
ca_file: {{ uaa.ca_file }}
|
||||
{% else %}
|
||||
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
|
||||
# uaa:
|
||||
# ca_file: /path/to/ca
|
||||
{% endif %}
|
||||
|
||||
|
||||
# Global proxy
|
||||
# Config http proxy for components, e.g. http://my.proxy.com:3128
|
||||
# Components doesn't need to connect to each others via http proxy.
|
||||
# Remove component from `components` array if want disable proxy
|
||||
# for it. If you want use proxy for replication, MUST enable proxy
|
||||
# for core and jobservice, and set `http_proxy` and `https_proxy`.
|
||||
# Add domain to the `no_proxy` field, when you want disable proxy
|
||||
# for some special registry.
|
||||
{% if proxy is defined %}
|
||||
proxy:
|
||||
http_proxy: {{ proxy.http_proxy or ''}}
|
||||
https_proxy: {{ proxy.https_proxy or ''}}
|
||||
no_proxy: {{ proxy.no_proxy or ''}}
|
||||
{% if proxy.components is defined %}
|
||||
components:
|
||||
{% for component in proxy.components %}
|
||||
- {{component}}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
proxy:
|
||||
http_proxy:
|
||||
https_proxy:
|
||||
no_proxy:
|
||||
components:
|
||||
- core
|
||||
- jobservice
|
||||
- clair
|
||||
{% endif %}
|
@ -1,50 +0,0 @@
|
||||
from __future__ import print_function
|
||||
import utils
|
||||
import os
|
||||
acceptable_versions = ['1.2.0', '1.3.0', '1.4.0']
|
||||
|
||||
#The dict overwrite is for overwriting any value that was set in previous cfg,
|
||||
#which needs a new value in new version of .cfg
|
||||
overwrite = {
|
||||
'redis_url':'redis:6379',
|
||||
'max_job_workers':'50'
|
||||
}
|
||||
#The dict default is for filling in the values that are not set in previous config files.
|
||||
#In 1.5 template the placeholder has the same value as the attribute name.
|
||||
default = {
|
||||
'log_rotate_count':'50',
|
||||
'log_rotate_size':'200M',
|
||||
'db_host':'mysql',
|
||||
'db_port':'3306',
|
||||
'db_user':'root',
|
||||
'clair_db_host':'postgres',
|
||||
'clair_db_port':'5432',
|
||||
'clair_db_username':'postgres',
|
||||
'clair_db':'postgres',
|
||||
'uaa_endpoint':'uaa.mydomain.org',
|
||||
'uaa_clientid':'id',
|
||||
'uaa_clientsecret':'secret',
|
||||
'uaa_verify_cert':'true',
|
||||
'uaa_ca_cert':'/path/to/ca.pem',
|
||||
'registry_storage_provider_name':'filesystem',
|
||||
'registry_storage_provider_config':''
|
||||
}
|
||||
|
||||
def migrate(input_cfg, output_cfg):
|
||||
d = utils.read_conf(input_cfg)
|
||||
keys = list(default.keys())
|
||||
keys.extend(overwrite.keys())
|
||||
keys.extend(['hostname', 'ui_url_protocol', 'max_job_workers', 'customize_crt',
|
||||
'ssl_cert', 'ssl_cert_key', 'secretkey_path', 'admiral_url', 'db_password', 'clair_db_password'])
|
||||
val = {}
|
||||
for k in keys:
|
||||
if k in overwrite:
|
||||
val[k] = overwrite[k]
|
||||
elif k in d:
|
||||
val[k] = d[k]
|
||||
else:
|
||||
val[k] = default[k]
|
||||
tpl_path = os.path.join(os.path.dirname(__file__), 'harbor.cfg.tpl')
|
||||
utils.render(tpl_path, output_cfg, **val)
|
||||
|
||||
|
@ -1,179 +0,0 @@
|
||||
## Configuration file of Harbor
|
||||
|
||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||
_version = 1.5.0
|
||||
#The IP address or hostname to access admin UI and registry service.
|
||||
#DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
|
||||
hostname = $hostname
|
||||
|
||||
#The protocol for accessing the UI and token/notification service, by default it is http.
|
||||
#It can be set to https if ssl is enabled on nginx.
|
||||
ui_url_protocol = $ui_url_protocol
|
||||
|
||||
#Maximum number of job workers in job service
|
||||
max_job_workers = $max_job_workers
|
||||
|
||||
#Determine whether or not to generate certificate for the registry's token.
|
||||
#If the value is on, the prepare script creates new root cert and private key
|
||||
#for generating token to access the registry. If the value is off the default key/cert will be used.
|
||||
#This flag also controls the creation of the notary signer's cert.
|
||||
customize_crt = $customize_crt
|
||||
|
||||
#The path of cert and key files for nginx, they are applied only the protocol is set to https
|
||||
ssl_cert = $ssl_cert
|
||||
ssl_cert_key = $ssl_cert_key
|
||||
|
||||
#The path of secretkey storage
|
||||
secretkey_path = $secretkey_path
|
||||
|
||||
#Admiral's url, comment this attribute, or set its value to NA when Harbor is standalone
|
||||
admiral_url = $admiral_url
|
||||
|
||||
#Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
log_rotate_count = $log_rotate_count
|
||||
#Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||
#If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||
#are all valid.
|
||||
log_rotate_size = $log_rotate_size
|
||||
|
||||
#Config http proxy for Clair, e.g. http://my.proxy.com:3128
|
||||
#Clair doesn't need to connect to harbor ui container via http proxy.
|
||||
http_proxy =
|
||||
https_proxy =
|
||||
no_proxy = 127.0.0.1,localhost,ui
|
||||
|
||||
#NOTES: The properties between BEGIN INITIAL PROPERTIES and END INITIAL PROPERTIES
|
||||
#only take effect in the first boot, the subsequent changes of these properties
|
||||
#should be performed on web ui
|
||||
|
||||
#************************BEGIN INITIAL PROPERTIES************************
|
||||
|
||||
#Email account settings for sending out password resetting emails.
|
||||
|
||||
#Email server uses the given username and password to authenticate on TLS connections to host and act as identity.
|
||||
#Identity left blank to act as username.
|
||||
email_identity =
|
||||
|
||||
email_server = smtp.mydomain.com
|
||||
email_server_port = 25
|
||||
email_username = sample_admin@mydomain.com
|
||||
email_password = abc
|
||||
email_from = admin <sample_admin@mydomain.com>
|
||||
email_ssl = false
|
||||
email_insecure = false
|
||||
|
||||
##The initial password of Harbor admin, only works for the first time when Harbor starts.
|
||||
#It has no effect after the first launch of Harbor.
|
||||
#Change the admin password from UI after launching Harbor.
|
||||
harbor_admin_password = Harbor12345
|
||||
|
||||
##By default the auth mode is db_auth, i.e. the credentials are stored in a local database.
|
||||
#Set it to ldap_auth if you want to verify a user's credentials against an LDAP server.
|
||||
auth_mode = db_auth
|
||||
|
||||
#The url for an ldap endpoint.
|
||||
ldap_url = ldaps://ldap.mydomain.com
|
||||
|
||||
#A user's DN who has the permission to search the LDAP/AD server.
|
||||
#If your LDAP/AD server does not support anonymous search, you should configure this DN and ldap_search_pwd.
|
||||
#ldap_searchdn = uid=searchuser,ou=people,dc=mydomain,dc=com
|
||||
|
||||
#the password of the ldap_searchdn
|
||||
#ldap_search_pwd = password
|
||||
|
||||
#The base DN from which to look up a user in LDAP/AD
|
||||
ldap_basedn = ou=people,dc=mydomain,dc=com
|
||||
|
||||
#Search filter for LDAP/AD, make sure the syntax of the filter is correct.
|
||||
#ldap_filter = (objectClass=person)
|
||||
|
||||
# The attribute used in a search to match a user, it could be uid, cn, email, sAMAccountName or other attributes depending on your LDAP/AD
|
||||
ldap_uid = uid
|
||||
|
||||
#the scope to search for users, 0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE
|
||||
ldap_scope = 2
|
||||
|
||||
#Timeout (in seconds) when connecting to an LDAP Server. The default value (and most reasonable) is 5 seconds.
|
||||
ldap_timeout = 5
|
||||
|
||||
#Verify certificate from LDAP server
|
||||
ldap_verify_cert = true
|
||||
|
||||
#The base dn from which to lookup a group in LDAP/AD
|
||||
ldap_group_basedn = ou=group,dc=mydomain,dc=com
|
||||
|
||||
#filter to search LDAP/AD group
|
||||
ldap_group_filter = objectclass=group
|
||||
|
||||
#The attribute used to name a LDAP/AD group, it could be cn, name
|
||||
ldap_group_gid = cn
|
||||
|
||||
#The scope to search for ldap groups. 0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE
|
||||
ldap_group_scope = 2
|
||||
|
||||
#Turn on or off the self-registration feature
|
||||
self_registration = on
|
||||
|
||||
#The expiration time (in minute) of token created by token service, default is 30 minutes
|
||||
token_expiration = 30
|
||||
|
||||
#The flag to control what users have permission to create projects
|
||||
#The default value "everyone" allows everyone to creates a project.
|
||||
#Set to "adminonly" so that only admin user can create project.
|
||||
project_creation_restriction = everyone
|
||||
|
||||
#************************END INITIAL PROPERTIES************************
|
||||
|
||||
#######Harbor DB configuration section#######
|
||||
|
||||
#The address of the Harbor database. Only need to change when using external db.
|
||||
db_host = $db_host
|
||||
|
||||
#The password for the root user of Harbor DB. Change this before any production use.
|
||||
db_password = $db_password
|
||||
|
||||
#The port of Harbor database host
|
||||
db_port = $db_port
|
||||
|
||||
#The user name of Harbor database
|
||||
db_user = $db_user
|
||||
|
||||
##### End of Harbor DB configuration#######
|
||||
|
||||
#The redis server address. Only needed in HA installation.
|
||||
redis_url = $redis_url
|
||||
|
||||
##########Clair DB configuration############
|
||||
|
||||
#Clair DB host address. Only change it when using an exteral DB.
|
||||
clair_db_host = $clair_db_host
|
||||
|
||||
#The password of the Clair's postgres database. Only effective when Harbor is deployed with Clair.
|
||||
#Please update it before deployment. Subsequent update will cause Clair's API server and Harbor unable to access Clair's database.
|
||||
clair_db_password = $clair_db_password
|
||||
|
||||
#Clair DB connect port
|
||||
clair_db_port = $clair_db_port
|
||||
|
||||
#Clair DB username
|
||||
clair_db_username = $clair_db_username
|
||||
|
||||
#Clair default database
|
||||
clair_db = $clair_db
|
||||
|
||||
##########End of Clair DB configuration############
|
||||
|
||||
#The following attributes only need to be set when auth mode is uaa_auth
|
||||
uaa_endpoint = $uaa_endpoint
|
||||
uaa_clientid = $uaa_clientid
|
||||
uaa_clientsecret = $uaa_clientsecret
|
||||
uaa_verify_cert = $uaa_verify_cert
|
||||
uaa_ca_cert = $uaa_ca_cert
|
||||
|
||||
|
||||
### Docker Registry setting ###
|
||||
#registry_storage_provider can be: filesystem, s3, gcs, azure, etc.
|
||||
registry_storage_provider_name = $registry_storage_provider_name
|
||||
#registry_storage_provider_config is a comma separated "key: value" pairs, e.g. "key1: value, key2: value2".
|
||||
#Refer to https://docs.docker.com/registry/configuration/#storage for all available configuration.
|
||||
registry_storage_provider_config = $registry_storage_provider_config
|
@ -1,59 +0,0 @@
|
||||
from __future__ import print_function
|
||||
import utils
|
||||
import os
|
||||
acceptable_versions = ['1.5.0']
|
||||
keys = [
|
||||
'hostname',
|
||||
'ui_url_protocol',
|
||||
'customize_crt',
|
||||
'ssl_cert',
|
||||
'ssl_cert_key',
|
||||
'secretkey_path',
|
||||
'admiral_url',
|
||||
'log_rotate_count',
|
||||
'log_rotate_size',
|
||||
'http_proxy',
|
||||
'https_proxy',
|
||||
'no_proxy',
|
||||
'db_host',
|
||||
'db_password',
|
||||
'db_port',
|
||||
'db_user',
|
||||
'clair_db_host',
|
||||
'clair_db_password',
|
||||
'clair_db_port',
|
||||
'clair_db_username',
|
||||
'clair_db',
|
||||
'uaa_endpoint',
|
||||
'uaa_clientid',
|
||||
'uaa_clientsecret',
|
||||
'uaa_verify_cert',
|
||||
'uaa_ca_cert',
|
||||
'registry_storage_provider_name',
|
||||
'registry_storage_provider_config'
|
||||
]
|
||||
|
||||
def migrate(input_cfg, output_cfg):
|
||||
d = utils.read_conf(input_cfg)
|
||||
val = {}
|
||||
for k in keys:
|
||||
val[k] = d.get(k,'')
|
||||
#append registry to no_proxy
|
||||
np_list = d.get('no_proxy','').split(',')
|
||||
if not 'registry' in np_list:
|
||||
np_list.append('registry')
|
||||
val['no_proxy'] = ','.join(np_list)
|
||||
#handle harbor db information, if it previously pointed to internal mariadb, point it to the new default db instance of pgsql,
|
||||
#update user to default pgsql user.
|
||||
if 'mysql' == d['db_host']:
|
||||
val['db_host'] = 'postgresql'
|
||||
val['db_port'] = '5432'
|
||||
val['db_user'] = 'postgres'
|
||||
#handle clair db information, if it pointed to internal pgsql in previous deployment, point it to the new default db instance of pgsql,
|
||||
#the user should be the same user as harbor db
|
||||
if 'postgres' == d['clair_db_host']:
|
||||
val['clair_db_host'] = 'postgresql'
|
||||
val['cliar_db_user'] = val['db_user']
|
||||
val['clair_db_password'] = val['db_password']
|
||||
tpl_path = os.path.join(os.path.dirname(__file__), 'harbor.cfg.tpl')
|
||||
utils.render(tpl_path, output_cfg, **val)
|
@ -1,200 +0,0 @@
|
||||
## Configuration file of Harbor
|
||||
|
||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||
_version = 1.6.0
|
||||
#The IP address or hostname to access admin UI and registry service.
|
||||
#DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
|
||||
hostname = $hostname
|
||||
|
||||
#The protocol for accessing the UI and token/notification service, by default it is http.
|
||||
#It can be set to https if ssl is enabled on nginx.
|
||||
ui_url_protocol = $ui_url_protocol
|
||||
|
||||
#Maximum number of job workers in job service
|
||||
max_job_workers = 10
|
||||
|
||||
#Determine whether or not to generate certificate for the registry's token.
|
||||
#If the value is on, the prepare script creates new root cert and private key
|
||||
#for generating token to access the registry. If the value is off the default key/cert will be used.
|
||||
#This flag also controls the creation of the notary signer's cert.
|
||||
customize_crt = $customize_crt
|
||||
|
||||
#The path of cert and key files for nginx, they are applied only the protocol is set to https
|
||||
ssl_cert = $ssl_cert
|
||||
ssl_cert_key = $ssl_cert_key
|
||||
|
||||
#The path of secretkey storage
|
||||
secretkey_path = $secretkey_path
|
||||
|
||||
#Admiral's url, comment this attribute, or set its value to NA when Harbor is standalone
|
||||
admiral_url = $admiral_url
|
||||
|
||||
#Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
log_rotate_count = $log_rotate_count
|
||||
#Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||
#If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||
#are all valid.
|
||||
log_rotate_size = $log_rotate_size
|
||||
|
||||
#Config http proxy for Clair, e.g. http://my.proxy.com:3128
|
||||
#Clair doesn't need to connect to harbor ui container via http proxy.
|
||||
http_proxy = $http_proxy
|
||||
https_proxy = $https_proxy
|
||||
no_proxy = $no_proxy
|
||||
|
||||
#NOTES: The properties between BEGIN INITIAL PROPERTIES and END INITIAL PROPERTIES
|
||||
#only take effect in the first boot, the subsequent changes of these properties
|
||||
#should be performed on web ui
|
||||
|
||||
#************************BEGIN INITIAL PROPERTIES************************
|
||||
|
||||
#Email account settings for sending out password resetting emails.
|
||||
|
||||
#Email server uses the given username and password to authenticate on TLS connections to host and act as identity.
|
||||
#Identity left blank to act as username.
|
||||
email_identity =
|
||||
|
||||
email_server = smtp.mydomain.com
|
||||
email_server_port = 25
|
||||
email_username = sample_admin@mydomain.com
|
||||
email_password = abc
|
||||
email_from = admin <sample_admin@mydomain.com>
|
||||
email_ssl = false
|
||||
email_insecure = false
|
||||
|
||||
##The initial password of Harbor admin, only works for the first time when Harbor starts.
|
||||
#It has no effect after the first launch of Harbor.
|
||||
#Change the admin password from UI after launching Harbor.
|
||||
harbor_admin_password = Harbor12345
|
||||
|
||||
##By default the auth mode is db_auth, i.e. the credentials are stored in a local database.
|
||||
#Set it to ldap_auth if you want to verify a user's credentials against an LDAP server.
|
||||
auth_mode = db_auth
|
||||
|
||||
#The url for an ldap endpoint.
|
||||
ldap_url = ldaps://ldap.mydomain.com
|
||||
|
||||
#A user's DN who has the permission to search the LDAP/AD server.
|
||||
#If your LDAP/AD server does not support anonymous search, you should configure this DN and ldap_search_pwd.
|
||||
#ldap_searchdn = uid=searchuser,ou=people,dc=mydomain,dc=com
|
||||
|
||||
#the password of the ldap_searchdn
|
||||
#ldap_search_pwd = password
|
||||
|
||||
#The base DN from which to look up a user in LDAP/AD
|
||||
ldap_basedn = ou=people,dc=mydomain,dc=com
|
||||
|
||||
#Search filter for LDAP/AD, make sure the syntax of the filter is correct.
|
||||
#ldap_filter = (objectClass=person)
|
||||
|
||||
# The attribute used in a search to match a user, it could be uid, cn, email, sAMAccountName or other attributes depending on your LDAP/AD
|
||||
ldap_uid = uid
|
||||
|
||||
#the scope to search for users, 0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE
|
||||
ldap_scope = 2
|
||||
|
||||
#Timeout (in seconds) when connecting to an LDAP Server. The default value (and most reasonable) is 5 seconds.
|
||||
ldap_timeout = 5
|
||||
|
||||
#Verify certificate from LDAP server
|
||||
ldap_verify_cert = true
|
||||
|
||||
#The base dn from which to lookup a group in LDAP/AD
|
||||
ldap_group_basedn = ou=group,dc=mydomain,dc=com
|
||||
|
||||
#filter to search LDAP/AD group
|
||||
ldap_group_filter = objectclass=group
|
||||
|
||||
#The attribute used to name a LDAP/AD group, it could be cn, name
|
||||
ldap_group_gid = cn
|
||||
|
||||
#The scope to search for ldap groups. 0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE
|
||||
ldap_group_scope = 2
|
||||
|
||||
#Turn on or off the self-registration feature
|
||||
self_registration = on
|
||||
|
||||
#The expiration time (in minute) of token created by token service, default is 30 minutes
|
||||
token_expiration = 30
|
||||
|
||||
#The flag to control what users have permission to create projects
|
||||
#The default value "everyone" allows everyone to creates a project.
|
||||
#Set to "adminonly" so that only admin user can create project.
|
||||
project_creation_restriction = everyone
|
||||
|
||||
#************************END INITIAL PROPERTIES************************
|
||||
|
||||
#######Harbor DB configuration section#######
|
||||
|
||||
#The address of the Harbor database. Only need to change when using external db.
|
||||
db_host = $db_host
|
||||
|
||||
#The password for the root user of Harbor DB. Change this before any production use.
|
||||
db_password = $db_password
|
||||
|
||||
#The port of Harbor database host
|
||||
db_port = $db_port
|
||||
|
||||
#The user name of Harbor database
|
||||
db_user = $db_user
|
||||
|
||||
##### End of Harbor DB configuration#######
|
||||
|
||||
|
||||
##########Clair DB configuration############
|
||||
|
||||
#Clair DB host address. Only change it when using an exteral DB.
|
||||
clair_db_host = $clair_db_host
|
||||
|
||||
#The password of the Clair's postgres database. Only effective when Harbor is deployed with Clair.
|
||||
#Please update it before deployment. Subsequent update will cause Clair's API server and Harbor unable to access Clair's database.
|
||||
clair_db_password = $clair_db_password
|
||||
|
||||
#Clair DB connect port
|
||||
clair_db_port = $clair_db_port
|
||||
|
||||
#Clair DB username
|
||||
clair_db_username = $clair_db_username
|
||||
|
||||
#Clair default database
|
||||
clair_db = $clair_db
|
||||
|
||||
#The interval of clair updaters, the unit is hour, set to 0 to disable the updaters.
|
||||
clair_updaters_interval = 12
|
||||
|
||||
##########End of Clair DB configuration############
|
||||
|
||||
##########Redis server configuration.############
|
||||
#Redis connection address
|
||||
redis_host = redis
|
||||
|
||||
#Redis connection port
|
||||
redis_port = 6379
|
||||
|
||||
#Redis connection password
|
||||
redis_password =
|
||||
|
||||
#Redis connection db index
|
||||
#db_index 1,2,3 is for registry, jobservice and chartmuseum.
|
||||
#db_index 0 is for UI, it's unchangeable
|
||||
redis_db_index = 1,2,3
|
||||
##########Redis server configuration.############
|
||||
|
||||
#The following attributes only need to be set when auth mode is uaa_auth
|
||||
uaa_endpoint = $uaa_endpoint
|
||||
uaa_clientid = $uaa_clientid
|
||||
uaa_clientsecret = $uaa_clientsecret
|
||||
uaa_verify_cert = $uaa_verify_cert
|
||||
uaa_ca_cert = $uaa_ca_cert
|
||||
|
||||
|
||||
### Docker Registry setting ###
|
||||
#registry_storage_provider can be: filesystem, s3, gcs, azure, etc.
|
||||
registry_storage_provider_name = $registry_storage_provider_name
|
||||
#registry_storage_provider_config is a comma separated "key: value" pairs, e.g. "key1: value, key2: value2".
|
||||
#Refer to https://docs.docker.com/registry/configuration/#storage for all available configuration.
|
||||
registry_storage_provider_config = $registry_storage_provider_config
|
||||
#registry_custom_ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||
#of registry's container. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||
registry_custom_ca_bundle =
|
||||
|
@ -1,46 +0,0 @@
|
||||
from __future__ import print_function
|
||||
import utils
|
||||
import os
|
||||
acceptable_versions = ['1.6.0']
|
||||
keys = [
|
||||
'hostname',
|
||||
'ui_url_protocol',
|
||||
'customize_crt',
|
||||
'ssl_cert',
|
||||
'ssl_cert_key',
|
||||
'secretkey_path',
|
||||
'admiral_url',
|
||||
'log_rotate_count',
|
||||
'log_rotate_size',
|
||||
'http_proxy',
|
||||
'https_proxy',
|
||||
'no_proxy',
|
||||
'db_host',
|
||||
'db_password',
|
||||
'db_port',
|
||||
'db_user',
|
||||
'clair_db_host',
|
||||
'clair_db_password',
|
||||
'clair_db_port',
|
||||
'clair_db_username',
|
||||
'clair_db',
|
||||
'uaa_endpoint',
|
||||
'uaa_clientid',
|
||||
'uaa_clientsecret',
|
||||
'uaa_verify_cert',
|
||||
'uaa_ca_cert',
|
||||
'registry_storage_provider_name',
|
||||
'registry_storage_provider_config'
|
||||
]
|
||||
|
||||
def migrate(input_cfg, output_cfg):
|
||||
d = utils.read_conf(input_cfg)
|
||||
val = {}
|
||||
for k in keys:
|
||||
val[k] = d.get(k,'')
|
||||
#append registry to no_proxy
|
||||
np_list = d.get('no_proxy','').split(',')
|
||||
new_np_list = ['core' if x=='ui' else x for x in np_list]
|
||||
val['no_proxy'] = ','.join(new_np_list)
|
||||
tpl_path = os.path.join(os.path.dirname(__file__), 'harbor.cfg.tpl')
|
||||
utils.render(tpl_path, output_cfg, **val)
|
@ -1,204 +0,0 @@
|
||||
## Configuration file of Harbor
|
||||
|
||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||
_version = 1.7.0
|
||||
#The IP address or hostname to access admin UI and registry service.
|
||||
#DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
|
||||
#DO NOT comment out this line, modify the value of "hostname" directly, or the installation will fail.
|
||||
hostname = $hostname
|
||||
|
||||
#The protocol for accessing the UI and token/notification service, by default it is http.
|
||||
#It can be set to https if ssl is enabled on nginx.
|
||||
ui_url_protocol = $ui_url_protocol
|
||||
|
||||
#Maximum number of job workers in job service
|
||||
max_job_workers = 10
|
||||
|
||||
#Determine whether or not to generate certificate for the registry's token.
|
||||
#If the value is on, the prepare script creates new root cert and private key
|
||||
#for generating token to access the registry. If the value is off the default key/cert will be used.
|
||||
#This flag also controls the creation of the notary signer's cert.
|
||||
customize_crt = $customize_crt
|
||||
|
||||
#The path of cert and key files for nginx, they are applied only the protocol is set to https
|
||||
ssl_cert = $ssl_cert
|
||||
ssl_cert_key = $ssl_cert_key
|
||||
|
||||
#The path of secretkey storage
|
||||
secretkey_path = $secretkey_path
|
||||
|
||||
#Admiral's url, comment this attribute, or set its value to NA when Harbor is standalone
|
||||
admiral_url = $admiral_url
|
||||
|
||||
#Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
log_rotate_count = $log_rotate_count
|
||||
#Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||
#If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||
#are all valid.
|
||||
log_rotate_size = $log_rotate_size
|
||||
|
||||
#Config http proxy for Clair, e.g. http://my.proxy.com:3128
|
||||
#Clair doesn't need to connect to harbor internal components via http proxy.
|
||||
http_proxy = $http_proxy
|
||||
https_proxy = $https_proxy
|
||||
no_proxy = $no_proxy
|
||||
|
||||
#NOTES: The properties between BEGIN INITIAL PROPERTIES and END INITIAL PROPERTIES
|
||||
#only take effect in the first boot, the subsequent changes of these properties
|
||||
#should be performed on web ui
|
||||
|
||||
#************************BEGIN INITIAL PROPERTIES************************
|
||||
|
||||
#Email account settings for sending out password resetting emails.
|
||||
|
||||
#Email server uses the given username and password to authenticate on TLS connections to host and act as identity.
|
||||
#Identity left blank to act as username.
|
||||
email_identity =
|
||||
|
||||
email_server = smtp.mydomain.com
|
||||
email_server_port = 25
|
||||
email_username = sample_admin@mydomain.com
|
||||
email_password = abc
|
||||
email_from = admin <sample_admin@mydomain.com>
|
||||
email_ssl = false
|
||||
email_insecure = false
|
||||
|
||||
##The initial password of Harbor admin, only works for the first time when Harbor starts.
|
||||
#It has no effect after the first launch of Harbor.
|
||||
#Change the admin password from UI after launching Harbor.
|
||||
harbor_admin_password = Harbor12345
|
||||
|
||||
##By default the auth mode is db_auth, i.e. the credentials are stored in a local database.
|
||||
#Set it to ldap_auth if you want to verify a user's credentials against an LDAP server.
|
||||
auth_mode = db_auth
|
||||
|
||||
#The url for an ldap endpoint.
|
||||
ldap_url = ldaps://ldap.mydomain.com
|
||||
|
||||
#A user's DN who has the permission to search the LDAP/AD server.
|
||||
#If your LDAP/AD server does not support anonymous search, you should configure this DN and ldap_search_pwd.
|
||||
#ldap_searchdn = uid=searchuser,ou=people,dc=mydomain,dc=com
|
||||
|
||||
#the password of the ldap_searchdn
|
||||
#ldap_search_pwd = password
|
||||
|
||||
#The base DN from which to look up a user in LDAP/AD
|
||||
ldap_basedn = ou=people,dc=mydomain,dc=com
|
||||
|
||||
#Search filter for LDAP/AD, make sure the syntax of the filter is correct.
|
||||
#ldap_filter = (objectClass=person)
|
||||
|
||||
# The attribute used in a search to match a user, it could be uid, cn, email, sAMAccountName or other attributes depending on your LDAP/AD
|
||||
ldap_uid = uid
|
||||
|
||||
#the scope to search for users, 0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE
|
||||
ldap_scope = 2
|
||||
|
||||
#Timeout (in seconds) when connecting to an LDAP Server. The default value (and most reasonable) is 5 seconds.
|
||||
ldap_timeout = 5
|
||||
|
||||
#Verify certificate from LDAP server
|
||||
ldap_verify_cert = true
|
||||
|
||||
#The base dn from which to lookup a group in LDAP/AD
|
||||
ldap_group_basedn = ou=group,dc=mydomain,dc=com
|
||||
|
||||
#filter to search LDAP/AD group
|
||||
ldap_group_filter = objectclass=group
|
||||
|
||||
#The attribute used to name a LDAP/AD group, it could be cn, name
|
||||
ldap_group_gid = cn
|
||||
|
||||
#The scope to search for ldap groups. 0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE
|
||||
ldap_group_scope = 2
|
||||
|
||||
#Turn on or off the self-registration feature
|
||||
self_registration = on
|
||||
|
||||
#The expiration time (in minute) of token created by token service, default is 30 minutes
|
||||
token_expiration = 30
|
||||
|
||||
#The flag to control what users have permission to create projects
|
||||
#The default value "everyone" allows everyone to creates a project.
|
||||
#Set to "adminonly" so that only admin user can create project.
|
||||
project_creation_restriction = everyone
|
||||
|
||||
#************************END INITIAL PROPERTIES************************
|
||||
|
||||
#######Harbor DB configuration section#######
|
||||
|
||||
#The address of the Harbor database. Only need to change when using external db.
|
||||
db_host = $db_host
|
||||
|
||||
#The password for the root user of Harbor DB. Change this before any production use.
|
||||
db_password = $db_password
|
||||
|
||||
#The port of Harbor database host
|
||||
db_port = $db_port
|
||||
|
||||
#The user name of Harbor database
|
||||
db_user = $db_user
|
||||
|
||||
##### End of Harbor DB configuration#######
|
||||
|
||||
##########Redis server configuration.############
|
||||
|
||||
#Redis connection address
|
||||
redis_host = redis
|
||||
|
||||
#Redis connection port
|
||||
redis_port = 6379
|
||||
|
||||
#Redis connection password
|
||||
redis_password =
|
||||
|
||||
#Redis connection db index
|
||||
#db_index 1,2,3 is for registry, jobservice and chartmuseum.
|
||||
#db_index 0 is for UI, it's unchangeable
|
||||
redis_db_index = 1,2,3
|
||||
|
||||
########## End of Redis server configuration ############
|
||||
|
||||
##########Clair DB configuration############
|
||||
|
||||
#Clair DB host address. Only change it when using an exteral DB.
|
||||
clair_db_host = $clair_db_host
|
||||
#The password of the Clair's postgres database. Only effective when Harbor is deployed with Clair.
|
||||
#Please update it before deployment. Subsequent update will cause Clair's API server and Harbor unable to access Clair's database.
|
||||
clair_db_password = $clair_db_password
|
||||
#Clair DB connect port
|
||||
clair_db_port = $clair_db_port
|
||||
#Clair DB username
|
||||
clair_db_username = $clair_db_username
|
||||
#Clair default database
|
||||
clair_db = $clair_db
|
||||
|
||||
#The interval of clair updaters, the unit is hour, set to 0 to disable the updaters.
|
||||
clair_updaters_interval = 12
|
||||
|
||||
##########End of Clair DB configuration############
|
||||
|
||||
#The following attributes only need to be set when auth mode is uaa_auth
|
||||
uaa_endpoint = $uaa_endpoint
|
||||
uaa_clientid = $uaa_clientid
|
||||
uaa_clientsecret = $uaa_clientsecret
|
||||
uaa_verify_cert = $uaa_verify_cert
|
||||
uaa_ca_cert = $uaa_ca_cert
|
||||
|
||||
|
||||
### Harbor Storage settings ###
|
||||
#Please be aware that the following storage settings will be applied to both docker registry and helm chart repository.
|
||||
#registry_storage_provider can be: filesystem, s3, gcs, azure, etc.
|
||||
registry_storage_provider_name = $registry_storage_provider_name
|
||||
#registry_storage_provider_config is a comma separated "key: value" pairs, e.g. "key1: value, key2: value2".
|
||||
#To avoid duplicated configurations, both docker registry and chart repository follow the same storage configuration specifications of docker registry.
|
||||
#Refer to https://docs.docker.com/registry/configuration/#storage for all available configuration.
|
||||
registry_storage_provider_config = $registry_storage_provider_config
|
||||
#registry_custom_ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||
#of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||
registry_custom_ca_bundle =
|
||||
|
||||
#If reload_config=true, all settings which present in harbor.cfg take effect after prepare and restart harbor, it overwrites exsiting settings.
|
||||
#reload_config=true
|
||||
#Regular expression to match skipped environment variables
|
||||
#skip_reload_env_pattern=(^EMAIL.*)|(^LDAP.*)
|
@ -1,64 +0,0 @@
|
||||
from __future__ import print_function
|
||||
import utils
|
||||
import os
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
acceptable_versions = ['1.7.0']
|
||||
keys = [
|
||||
'hostname',
|
||||
'ui_url_protocol',
|
||||
'ssl_cert',
|
||||
'ssl_cert_key',
|
||||
'admiral_url',
|
||||
'log_rotate_count',
|
||||
'log_rotate_size',
|
||||
'http_proxy',
|
||||
'https_proxy',
|
||||
'no_proxy',
|
||||
'db_host',
|
||||
'db_password',
|
||||
'db_port',
|
||||
'db_user',
|
||||
'clair_db_host',
|
||||
'clair_db_password',
|
||||
'clair_db_port',
|
||||
'clair_db_username',
|
||||
'clair_db',
|
||||
'redis_host',
|
||||
'redis_port',
|
||||
'redis_password',
|
||||
'redis_db_index',
|
||||
'clair_updaters_interval',
|
||||
'max_job_workers',
|
||||
'registry_storage_provider_name',
|
||||
'registry_storage_provider_config',
|
||||
'registry_custom_ca_bundle'
|
||||
]
|
||||
|
||||
def migrate(input_cfg, output_cfg):
|
||||
d = utils.read_conf(input_cfg)
|
||||
val = {}
|
||||
for k in keys:
|
||||
val[k] = d.get(k,'')
|
||||
if val['db_host'] == 'postgresql' and val['db_port'] == '5432':
|
||||
val['external_db'] = False
|
||||
else:
|
||||
val['external_db'] = True
|
||||
# If using default filesystem, didn't need registry_storage_provider_config config
|
||||
if val['registry_storage_provider_name'] == 'filesystem' and not val.get('registry_storage_provider_config'):
|
||||
val['storage_provider_info'] = ''
|
||||
else:
|
||||
val['storage_provider_info'] = utils.get_storage_provider_info(
|
||||
val['registry_storage_provider_name'],
|
||||
val['registry_storage_provider_config']
|
||||
)
|
||||
if val['redis_host'] == 'redis' and val['redis_port'] == '6379':
|
||||
val['external_redis'] = False
|
||||
else:
|
||||
val['registry_db_index'], val['jobservice_db_index'], val['chartmuseum_db_index'] = map(int, val['redis_db_index'].split(','))
|
||||
val['external_redis'] = True
|
||||
|
||||
this_dir = os.path.dirname(__file__)
|
||||
tpl = Environment(loader=FileSystemLoader(this_dir)).get_template('harbor.yml.jinja')
|
||||
|
||||
with open(output_cfg, 'w') as f:
|
||||
f.write(tpl.render(**val))
|
@ -1,136 +0,0 @@
|
||||
## Configuration file of Harbor
|
||||
|
||||
#The IP address or hostname to access admin UI and registry service.
|
||||
#DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
|
||||
hostname: {{hostname}}
|
||||
|
||||
# http related comfig
|
||||
http:
|
||||
# port for http, default is 80. If https enabled, this port will redirect to https port
|
||||
port: 80
|
||||
|
||||
{% if ui_url_protocol == 'https'%}
|
||||
https:
|
||||
port: 443
|
||||
#The path of cert and key files for nginx
|
||||
certificate: {{ ssl_cert }}
|
||||
private_key: {{ ssl_cert_key }}
|
||||
{% else %}
|
||||
# https:
|
||||
# port: 443
|
||||
# #The path of cert and key files for nginx
|
||||
# certificate: /your/certificate/path
|
||||
# private_key: /your/private/key/path
|
||||
{% endif %}
|
||||
|
||||
# Uncomment extearnal_url if you want to enable external proxy
|
||||
# And when it enabled the hostname will no longger used
|
||||
# external_url: https://reg.mydomain.com:8433
|
||||
|
||||
# The initial password of Harbor admin
|
||||
# It only works in first time to install harbor
|
||||
# Remember Change the admin password from UI after launching Harbor.
|
||||
harbor_admin_password: Harbor12345
|
||||
|
||||
## Harbor DB configuration
|
||||
database:
|
||||
#The password for the root user of Harbor DB. Change this before any production use.
|
||||
password: {{ db_password }}
|
||||
|
||||
# The default data volume
|
||||
data_volume: /data
|
||||
|
||||
# Harbor Storage settings by default is using data_volume dir on local filesystem
|
||||
# Uncomment storage_service setting If you want to using external storage
|
||||
storage_service:
|
||||
# ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||
# of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||
ca_bundle: {{ registry_custom_ca_bundle }}
|
||||
|
||||
{{storage_provider_info}}
|
||||
|
||||
# Clair configuration
|
||||
clair:
|
||||
# The interval of clair updaters, the unit is hour, set to 0 to disable the updaters.
|
||||
updaters_interval: {{ clair_updaters_interval }}
|
||||
|
||||
# Config http proxy for Clair, e.g. http://my.proxy.com:3128
|
||||
# Clair doesn't need to connect to harbor internal components via http proxy.
|
||||
http_proxy: {{ http_proxy }}
|
||||
https_proxy: {{ https_proxy }}
|
||||
no_proxy: {{ no_proxy }}
|
||||
|
||||
jobservice:
|
||||
# Maximum number of job workers in job service
|
||||
max_job_workers: {{ max_job_workers }}
|
||||
|
||||
# Log configurations
|
||||
log:
|
||||
# options are debug, info, warn, error
|
||||
level: info
|
||||
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
rotate_count: {{ log_rotate_count }}
|
||||
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||
# are all valid.
|
||||
rotate_size: {{ log_rotate_size }}
|
||||
# The directory on your host that store log
|
||||
location: /var/log/harbor
|
||||
|
||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||
_version: 1.8.0
|
||||
|
||||
{% if external_db %}
|
||||
# Uncomment external_database if using external database. And the password will replace the the password setting in database.
|
||||
# And currently ontly support postgres.
|
||||
external_database:
|
||||
harbor:
|
||||
host: {{ db_host }}
|
||||
port: {{ db_port }}
|
||||
db_name: registry
|
||||
username: {{ db_user }}
|
||||
password: {{ db_password }}
|
||||
ssl_mode: disable
|
||||
clair:
|
||||
host: {{ clair_db_host }}
|
||||
port: {{ clair_db_port }}
|
||||
db_name: {{ clair_db }}
|
||||
username: {{ clair_db_username }}
|
||||
password: {{ clair_db_password }}
|
||||
ssl_mode: disable
|
||||
notary_signer:
|
||||
host: {{ db_host }}
|
||||
port: {{ db_port }}
|
||||
db_name: notarysigner
|
||||
username: {{ db_user }}
|
||||
password: {{ db_password }}
|
||||
ssl_mode: disable
|
||||
notary_server:
|
||||
host: {{ db_host }}
|
||||
port: {{ db_port }}
|
||||
db_name: notaryserver
|
||||
username: {{ db_user }}
|
||||
password: {{ db_password }}
|
||||
ssl_mode: disable
|
||||
{% endif %}
|
||||
|
||||
{% if external_redis %}
|
||||
external_redis:
|
||||
host: {{ redis_host }}
|
||||
port: {{ redis_port }}
|
||||
password: {{ redis_password }}
|
||||
# db_index 0 is for core, it's unchangeable
|
||||
registry_db_index: {{ registry_db_index }}
|
||||
jobservice_db_index: {{ jobservice_db_index }}
|
||||
chartmuseum_db_index: {{ chartmuseum_db_index }}
|
||||
{% else %}
|
||||
# Umcomments external_redis if using external Redis server
|
||||
# external_redis:
|
||||
# host: redis
|
||||
# port: 6379
|
||||
# password:
|
||||
# # db_index 0 is for core, it's unchangeable
|
||||
# registry_db_index: 1
|
||||
# jobservice_db_index: 2
|
||||
# chartmuseum_db_index: 3
|
||||
{% endif %}
|
@ -1,21 +0,0 @@
|
||||
from __future__ import print_function
|
||||
import utils
|
||||
import os
|
||||
import yaml
|
||||
from jinja2 import Environment, FileSystemLoader, StrictUndefined
|
||||
|
||||
acceptable_versions = ['1.8.0']
|
||||
|
||||
def migrate(input_cfg, output_cfg):
|
||||
config_dict = utils.read_conf(input_cfg)
|
||||
|
||||
this_dir = os.path.dirname(__file__)
|
||||
tpl = Environment(
|
||||
loader=FileSystemLoader(this_dir),
|
||||
undefined=StrictUndefined,
|
||||
trim_blocks=True,
|
||||
lstrip_blocks=True
|
||||
).get_template('harbor.yml.jinja')
|
||||
|
||||
with open(output_cfg, 'w') as f:
|
||||
f.write(tpl.render(**config_dict))
|
@ -1,270 +0,0 @@
|
||||
# Configuration file of Harbor
|
||||
|
||||
# The IP address or hostname to access admin UI and registry service.
|
||||
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
|
||||
hostname: {{ hostname }}
|
||||
|
||||
# http related config
|
||||
{% if http %}
|
||||
http:
|
||||
# port for http, default is 80. If https enabled, this port will redirect to https port
|
||||
port: {{ http.port }}
|
||||
{% endif %}
|
||||
|
||||
{% if https is defined %}
|
||||
# https related config
|
||||
https:
|
||||
# https port for harbor, default is 443
|
||||
port: {{ https.port }}
|
||||
# The path of cert and key files for nginx
|
||||
certificate: {{ https.certificate }}
|
||||
private_key: {{ https.private_key }}
|
||||
{% else %}
|
||||
# https related config
|
||||
# https:
|
||||
# # https port for harbor, default is 443
|
||||
# port: 443
|
||||
# # The path of cert and key files for nginx
|
||||
# certificate: /your/certificate/path
|
||||
# private_key: /your/private/key/path
|
||||
{% endif %}
|
||||
|
||||
# Uncomment external_url if you want to enable external proxy
|
||||
# And when it enabled the hostname will no longer used
|
||||
{% if external_url is defined %}
|
||||
external_url: {{ external_url }}
|
||||
{% else %}
|
||||
# external_url: https://reg.mydomain.com:8433
|
||||
{% endif %}
|
||||
|
||||
{% if harbor_admin_password is defined %}
|
||||
# The initial password of Harbor admin
|
||||
# It only works in first time to install harbor
|
||||
# Remember Change the admin password from UI after launching Harbor.
|
||||
harbor_admin_password: {{ harbor_admin_password }}
|
||||
{% endif %}
|
||||
|
||||
{% if database is defined %}
|
||||
# Harbor DB configuration
|
||||
database:
|
||||
# The password for the root user of Harbor DB. Change this before any production use.
|
||||
password: {{ database.password}}
|
||||
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
||||
max_idle_conns: 50
|
||||
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
|
||||
# Note: the default number of connections is 100 for postgres.
|
||||
max_open_conns: 100
|
||||
{% endif %}
|
||||
|
||||
{% if data_volume is defined %}
|
||||
# The default data volume
|
||||
data_volume: {{ data_volume }}
|
||||
{% endif %}
|
||||
|
||||
{% if storage_service is defined %}
|
||||
# Harbor Storage settings by default is using /data dir on local filesystem
|
||||
# Uncomment storage_service setting If you want to using external storage
|
||||
storage_service:
|
||||
{% for key, value in storage_service.items() %}
|
||||
{% if key == 'ca_bundle' %}
|
||||
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||
ca_bundle: {{ value if value is not none else '' }}
|
||||
{% elif key == 'redirect' %}
|
||||
# # set disable to true when you want to disable registry redirect
|
||||
redirect:
|
||||
disabled: {{ value.disabled }}
|
||||
{% else %}
|
||||
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
|
||||
# # for more info about this configuration please refer https://docs.docker.com/registry/configuration/
|
||||
{{ key }}:
|
||||
{% for k, v in value.items() %}
|
||||
{{ k }}: {{ v if v is not none else '' }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
# Harbor Storage settings by default is using /data dir on local filesystem
|
||||
# Uncomment storage_service setting If you want to using external storage
|
||||
# storage_service:
|
||||
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||
# ca_bundle:
|
||||
|
||||
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
|
||||
# # for more info about this configuration please refer https://docs.docker.com/registry/configuration/
|
||||
# filesystem:
|
||||
# maxthreads: 100
|
||||
# # set disable to true when you want to disable registry redirect
|
||||
# redirect:
|
||||
# disabled: false
|
||||
{% endif %}
|
||||
|
||||
{% if clair is defined %}
|
||||
# Clair configuration
|
||||
clair:
|
||||
# The interval of clair updaters, the unit is hour, set to 0 to disable the updaters.
|
||||
updaters_interval: {{ clair.updaters_interval }}
|
||||
{% endif %}
|
||||
|
||||
{% if jobservice is defined %}
|
||||
jobservice:
|
||||
# Maximum number of job workers in job service
|
||||
max_job_workers: {{ jobservice.max_job_workers }}
|
||||
{% endif %}
|
||||
|
||||
notification:
|
||||
# Maximum retry count for webhook job
|
||||
webhook_job_max_retry: 10
|
||||
|
||||
{% if chart is defined %}
|
||||
chart:
|
||||
# Change the value of absolute_url to enabled can enable absolute url in chart
|
||||
absolute_url: {{ chart.absolute_url if chart.absolute_url == 'enabled' else 'disabled' }}
|
||||
{% endif %}
|
||||
|
||||
# Log configurations
|
||||
log:
|
||||
# options are debug, info, warning, error, fatal
|
||||
level: {{ log.level }}
|
||||
# configs for logs in local storage
|
||||
local:
|
||||
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
rotate_count: {{ log.rotate_count }}
|
||||
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||
# are all valid.
|
||||
rotate_size: {{ log.rotate_size }}
|
||||
# The directory on your host that store log
|
||||
location: {{ log.location }}
|
||||
|
||||
# Uncomment following lines to enable external syslog endpoint.
|
||||
# external_endpoint:
|
||||
# # protocol used to transmit log to external endpoint, options is tcp or udp
|
||||
# protocol: tcp
|
||||
# # The host of external endpoint
|
||||
# host: localhost
|
||||
# # Port of external endpoint
|
||||
# port: 5140
|
||||
|
||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||
_version: 1.9.0
|
||||
|
||||
{% if external_database is defined %}
|
||||
# Uncomment external_database if using external database.
|
||||
external_database:
|
||||
harbor:
|
||||
host: {{ external_database.harbor.host }}
|
||||
port: {{ external_database.harbor.port }}
|
||||
db_name: {{ external_database.harbor.db_name }}
|
||||
username: {{ external_database.harbor.username }}
|
||||
password: {{ external_database.harbor.password }}
|
||||
ssl_mode: {{ external_database.harbor.ssl_mode }}
|
||||
max_idle_conns: 2
|
||||
max_open_conns: 0
|
||||
clair:
|
||||
host: {{ external_database.clair.host }}
|
||||
port: {{ external_database.clair.port }}
|
||||
db_name: {{ external_database.clair.db_name }}
|
||||
username: {{ external_database.clair.username }}
|
||||
password: {{ external_database.clair.password }}
|
||||
ssl_mode: {{ external_database.clair.ssl_mode }}
|
||||
notary_signer:
|
||||
host: {{ external_database.notary_signer.host }}
|
||||
port: {{ external_database.notary_signer.port }}
|
||||
db_name: {{external_database.notary_signer.db_name }}
|
||||
username: {{ external_database.notary_signer.username }}
|
||||
password: {{ external_database.notary_signer.password }}
|
||||
ssl_mode: {{ external_database.notary_signer.ssl_mode }}
|
||||
notary_server:
|
||||
host: {{ external_database.notary_server.host }}
|
||||
port: {{ external_database.notary_server.port }}
|
||||
db_name: {{ external_database.notary_server.db_name }}
|
||||
username: {{ external_database.notary_server.username }}
|
||||
password: {{ external_database.notary_server.password }}
|
||||
ssl_mode: {{ external_database.notary_server.ssl_mode }}
|
||||
{% else %}
|
||||
# Uncomment external_database if using external database.
|
||||
# external_database:
|
||||
# harbor:
|
||||
# host: harbor_db_host
|
||||
# port: harbor_db_port
|
||||
# db_name: harbor_db_name
|
||||
# username: harbor_db_username
|
||||
# password: harbor_db_password
|
||||
# ssl_mode: disable
|
||||
# clair:
|
||||
# host: clair_db_host
|
||||
# port: clair_db_port
|
||||
# db_name: clair_db_name
|
||||
# username: clair_db_username
|
||||
# password: clair_db_password
|
||||
# ssl_mode: disable
|
||||
# notary_signer:
|
||||
# host: notary_signer_db_host
|
||||
# port: notary_signer_db_port
|
||||
# db_name: notary_signer_db_name
|
||||
# username: notary_signer_db_username
|
||||
# password: notary_signer_db_password
|
||||
# ssl_mode: disable
|
||||
# notary_server:
|
||||
# host: notary_server_db_host
|
||||
# port: notary_server_db_port
|
||||
# db_name: notary_server_db_name
|
||||
# username: notary_server_db_username
|
||||
# password: notary_server_db_password
|
||||
# ssl_mode: disable
|
||||
{% endif %}
|
||||
|
||||
{% if external_redis is defined %}
|
||||
external_redis:
|
||||
host: {{ external_redis.host }}
|
||||
port: {{ external_redis.port }}
|
||||
password: {{ external_redis.password }}
|
||||
# db_index 0 is for core, it's unchangeable
|
||||
registry_db_index: {{ external_redis.registry_db_index }}
|
||||
jobservice_db_index: {{ external_redis.jobservice_db_index }}
|
||||
chartmuseum_db_index: {{ external_redis.chartmuseum_db_index }}
|
||||
{% else %}
|
||||
# Umcomments external_redis if using external Redis server
|
||||
# external_redis:
|
||||
# host: redis
|
||||
# port: 6379
|
||||
# password:
|
||||
# # db_index 0 is for core, it's unchangeable
|
||||
# registry_db_index: 1
|
||||
# jobservice_db_index: 2
|
||||
# chartmuseum_db_index: 3
|
||||
{% endif %}
|
||||
|
||||
{% if uaa is defined %}
|
||||
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
|
||||
uaa:
|
||||
ca_file: {{ uaa.ca_file }}
|
||||
{% endif %}
|
||||
|
||||
# Global proxy
|
||||
# Config http proxy for components, e.g. http://my.proxy.com:3128
|
||||
# Components doesn't need to connect to each others via http proxy.
|
||||
# Remove component from `components` array if want disable proxy
|
||||
# for it. If you want use proxy for replication, MUST enable proxy
|
||||
# for core and jobservice, and set `http_proxy` and `https_proxy`.
|
||||
# Add domain to the `no_proxy` field, when you want disable proxy
|
||||
# for some special registry.
|
||||
{% if clair is defined and (clair.http_proxy or clair.https_proxy) %}
|
||||
proxy:
|
||||
http_proxy: {{ clair.http_proxy or ''}}
|
||||
https_proxy: {{ clair.https_proxy or ''}}
|
||||
no_proxy: {{ clair.no_proxy or ''}}
|
||||
components:
|
||||
- clair
|
||||
{% else %}
|
||||
proxy:
|
||||
http_proxy:
|
||||
https_proxy:
|
||||
no_proxy:
|
||||
components:
|
||||
- core
|
||||
- jobservice
|
||||
- clair
|
||||
{% endif %}
|
@ -1,70 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
|
||||
from __future__ import print_function
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
import utils
|
||||
import importlib
|
||||
import glob
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
def main():
|
||||
target_version = '1.10.0'
|
||||
parser = argparse.ArgumentParser(description='migrator of harbor.cfg')
|
||||
parser.add_argument('--input', '-i', action="store", dest='input_path', required=True, help='The path to the old harbor.cfg that provides input value, this required value')
|
||||
parser.add_argument('--output','-o', action="store", dest='output_path', required=False, help='The path of the migrated harbor.cfg, if not set the input file will be overwritten')
|
||||
parser.add_argument('--target', action="store", dest='target_version', help='The target version that the harbor.cfg will be migrated to.')
|
||||
args = parser.parse_args()
|
||||
if args.target_version is not None:
|
||||
target_version = args.target_version
|
||||
input_version = utils.get_conf_version(args.input_path)
|
||||
curr_dir = os.path.dirname(__file__)
|
||||
if input_version == target_version:
|
||||
print ("Version of input harbor.yml is identical to target %s, no need to upgrade" % input_version)
|
||||
sys.exit(0)
|
||||
if args.output_path is None:
|
||||
if float(input_version[:input_version.rfind('.')]) < 1.8 and \
|
||||
float(target_version[:target_version.rfind('.')]) >= 1.8:
|
||||
print("Migrating config file from < 1.8.0 to >= 1.8.0, the output path cannot be empty")
|
||||
sys.exit(1)
|
||||
args.output_path = args.input_path
|
||||
chain = []
|
||||
if not search(curr_dir, input_version, target_version, chain):
|
||||
print ("No migrator for version: %s" % input_version)
|
||||
sys.exit(1)
|
||||
else:
|
||||
print ("input version: %s, migrator chain: %s" % (input_version, chain))
|
||||
curr_input_path = args.input_path
|
||||
for c in chain:
|
||||
m = importlib.import_module(to_module_path(c))
|
||||
curr_output_path = "harbor.cfg.%s.tmp" % c
|
||||
print("migrating to version %s" % c)
|
||||
m.migrate(curr_input_path, curr_output_path)
|
||||
curr_input_path = curr_output_path
|
||||
shutil.copy(curr_output_path, args.output_path)
|
||||
print("Written new values to %s" % args.output_path)
|
||||
for tmp_f in glob.glob("harbor.cfg.*.tmp"):
|
||||
os.remove(tmp_f)
|
||||
|
||||
def to_module_path(ver):
|
||||
return "migrator_%s" % ver.replace(".","_")
|
||||
|
||||
def search(basedir, input_ver, target_ver, l):
|
||||
module = to_module_path(target_ver)
|
||||
if os.path.isdir(os.path.join(basedir, module)):
|
||||
m = importlib.import_module(module)
|
||||
if input_ver in m.acceptable_versions:
|
||||
l.append(target_ver)
|
||||
return True
|
||||
for v in m.acceptable_versions:
|
||||
if search(basedir, input_ver, v, l):
|
||||
l.append(target_ver)
|
||||
return True
|
||||
return False
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,75 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
import sys
|
||||
import os
|
||||
import yaml
|
||||
from string import Template
|
||||
|
||||
if sys.version_info[:3][0] == 2:
|
||||
import ConfigParser as ConfigParser
|
||||
import StringIO as StringIO
|
||||
|
||||
if sys.version_info[:3][0] == 3:
|
||||
import configparser as ConfigParser
|
||||
import io as StringIO
|
||||
|
||||
def read_conf(path):
|
||||
with open(path) as f:
|
||||
try:
|
||||
d = yaml.safe_load(f)
|
||||
except yaml.error.YAMLError:
|
||||
f.seek(0)
|
||||
temp_section = "configuration"
|
||||
conf = StringIO.StringIO()
|
||||
conf.write("[%s]\n" % temp_section)
|
||||
conf.write(f.read())
|
||||
conf.seek(0, os.SEEK_SET)
|
||||
rcp = ConfigParser.RawConfigParser()
|
||||
rcp.readfp(conf)
|
||||
d = {}
|
||||
for op in rcp.options(temp_section):
|
||||
d[op] = rcp.get(temp_section, op)
|
||||
else:
|
||||
if "_version" not in d:
|
||||
raise Exception("Bad format configuration file: %s" % path)
|
||||
return d
|
||||
|
||||
def get_conf_version(path):
|
||||
d = read_conf(path)
|
||||
# print json.dumps(d,indent=4)
|
||||
if "_version" in d: # >=1.5.0
|
||||
return d["_version"]
|
||||
if not "clair_db_password" in d:
|
||||
return "unsupported"
|
||||
if "registry_storage_provider_name" in d:
|
||||
return "1.4.0"
|
||||
if "uaa_endpoint" in d:
|
||||
return "1.3.0"
|
||||
return "1.2.0"
|
||||
|
||||
def render(src, dest, **kw):
|
||||
t = Template(open(src, 'r').read())
|
||||
with open(dest, 'w') as f:
|
||||
f.write(t.substitute(**kw))
|
||||
|
||||
def get_storage_provider_info(provider_name, provider_config):
|
||||
provider_config = provider_config.strip('" ')
|
||||
if not provider_config.strip(" "):
|
||||
return ''
|
||||
|
||||
storage_provider_cfg_map = {}
|
||||
for k_v in provider_config.split(","):
|
||||
if k_v > 0:
|
||||
kvs = k_v.split(": ") # add space suffix to avoid existing ":" in the value
|
||||
if len(kvs) == 2:
|
||||
#key must not be empty
|
||||
if kvs[0].strip() != "":
|
||||
storage_provider_cfg_map[kvs[0].strip()] = kvs[1].strip()
|
||||
|
||||
# generate storage configuration section in yaml format
|
||||
|
||||
storage_provider_conf_list = [provider_name + ':']
|
||||
for config in storage_provider_cfg_map.items():
|
||||
storage_provider_conf_list.append('{}: {}'.format(*config))
|
||||
storage_provider_info = ('\n' + ' ' * 4).join(storage_provider_conf_list)
|
||||
return storage_provider_info
|
@ -1,75 +0,0 @@
|
||||
echo "
|
||||
# A generic, single database configuration.
|
||||
|
||||
[alembic]
|
||||
# path to migration scripts
|
||||
script_location = /harbor-migration/db/alembic/postgres/migration_harbor
|
||||
|
||||
# template used to generate migration files
|
||||
# file_template = %%(rev)s_%%(slug)s
|
||||
|
||||
# timezone to use when rendering the date
|
||||
# within the migration file as well as the filename.
|
||||
# string value is passed to dateutil.tz.gettz()
|
||||
# leave blank for localtime
|
||||
# timezone =
|
||||
|
||||
# max length of characters to apply to the
|
||||
# "slug" field
|
||||
#truncate_slug_length = 40
|
||||
|
||||
# set to 'true' to run the environment during
|
||||
# the 'revision' command, regardless of autogenerate
|
||||
# revision_environment = false
|
||||
|
||||
# set to 'true' to allow .pyc and .pyo files without
|
||||
# a source .py file to be detected as revisions in the
|
||||
# versions/ directory
|
||||
# sourceless = false
|
||||
|
||||
# version location specification; this defaults
|
||||
# to migration_harbor/versions. When using multiple version
|
||||
# directories, initial revisions must be specified with --version-path
|
||||
# version_locations = %(here)s/bar %(here)s/bat migration_harbor/versions
|
||||
|
||||
# the output encoding used when revision files
|
||||
# are written from script.py.mako
|
||||
# output_encoding = utf-8
|
||||
|
||||
sqlalchemy.url = postgresql://$PGSQL_USR:$DB_PWD@localhost:5432/registry
|
||||
|
||||
|
||||
# Logging configuration
|
||||
[loggers]
|
||||
keys = root,sqlalchemy,alembic
|
||||
|
||||
[handlers]
|
||||
keys = console
|
||||
|
||||
[formatters]
|
||||
keys = generic
|
||||
|
||||
[logger_root]
|
||||
level = WARN
|
||||
handlers = console
|
||||
qualname =
|
||||
|
||||
[logger_sqlalchemy]
|
||||
level = WARN
|
||||
handlers =
|
||||
qualname = sqlalchemy.engine
|
||||
|
||||
[logger_alembic]
|
||||
level = INFO
|
||||
handlers =
|
||||
qualname = alembic
|
||||
|
||||
[handler_console]
|
||||
class = StreamHandler
|
||||
args = (sys.stderr,)
|
||||
level = NOTSET
|
||||
formatter = generic
|
||||
|
||||
[formatter_generic]
|
||||
format = %(levelname)-5.5s [%(name)s] %(message)s
|
||||
datefmt = %H:%M:%S"
|
@ -1,299 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy.orm import sessionmaker, relationship
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from sqlalchemy.sql import func
|
||||
import datetime
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
|
||||
class User(Base):
|
||||
__tablename__ = 'harbor_user'
|
||||
|
||||
user_id = sa.Column(sa.Integer, primary_key=True)
|
||||
username = sa.Column(sa.String(255), unique=True)
|
||||
email = sa.Column(sa.String(255), unique=True)
|
||||
password = sa.Column(sa.String(40), nullable=False)
|
||||
realname = sa.Column(sa.String(255), nullable=False)
|
||||
comment = sa.Column(sa.String(30))
|
||||
deleted = sa.Column(sa.Boolean, nullable=False, server_default='false')
|
||||
reset_uuid = sa.Column(sa.String(40))
|
||||
salt = sa.Column(sa.String(40))
|
||||
sysadmin_flag = sa.Column(sa.Boolean, nullable=False, server_default='false')
|
||||
creation_time = sa.Column(sa.TIMESTAMP)
|
||||
update_time = sa.Column(sa.TIMESTAMP)
|
||||
|
||||
|
||||
class UserGroup(Base):
|
||||
__tablename__ = 'user_group'
|
||||
|
||||
id = sa.Column(sa.Integer, primary_key=True)
|
||||
group_name = sa.Column(sa.String(255), nullable = False)
|
||||
group_type = sa.Column(sa.SmallInteger, server_default=sa.text("'0'"))
|
||||
ldap_group_dn = sa.Column(sa.String(512), nullable=False)
|
||||
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
|
||||
|
||||
class Properties(Base):
|
||||
__tablename__ = 'properties'
|
||||
|
||||
id = sa.Column(sa.Integer, primary_key=True)
|
||||
k = sa.Column(sa.String(64), unique=True)
|
||||
v = sa.Column(sa.String(128), nullable = False)
|
||||
|
||||
|
||||
class ProjectMember(Base):
|
||||
__tablename__ = 'project_member'
|
||||
|
||||
id = sa.Column(sa.Integer, primary_key=True)
|
||||
project_id = sa.Column(sa.Integer(), nullable=False)
|
||||
entity_id = sa.Column(sa.Integer(), nullable=False)
|
||||
entity_type = sa.Column(sa.String(1), nullable=False)
|
||||
role = sa.Column(sa.Integer(), nullable = False)
|
||||
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
|
||||
__table_args__ = (sa.UniqueConstraint('project_id', 'entity_id', 'entity_type', name='unique_name_and_scope'),)
|
||||
|
||||
|
||||
class UserProjectRole(Base):
|
||||
__tablename__ = 'user_project_role'
|
||||
|
||||
upr_id = sa.Column(sa.Integer(), primary_key = True)
|
||||
user_id = sa.Column(sa.Integer(), sa.ForeignKey('user.user_id'))
|
||||
pr_id = sa.Column(sa.Integer(), sa.ForeignKey('project_role.pr_id'))
|
||||
project_role = relationship("ProjectRole")
|
||||
|
||||
|
||||
class ProjectRole(Base):
|
||||
__tablename__ = 'project_role'
|
||||
|
||||
pr_id = sa.Column(sa.Integer(), primary_key = True)
|
||||
project_id = sa.Column(sa.Integer(), nullable = False)
|
||||
role_id = sa.Column(sa.Integer(), nullable = False)
|
||||
sa.ForeignKeyConstraint(['role_id'], [u'role.role_id'])
|
||||
sa.ForeignKeyConstraint(['project_id'], [u'project.project_id'])
|
||||
|
||||
|
||||
class Access(Base):
|
||||
__tablename__ = 'access'
|
||||
|
||||
access_id = sa.Column(sa.Integer(), primary_key = True)
|
||||
access_code = sa.Column(sa.String(1))
|
||||
comment = sa.Column(sa.String(30))
|
||||
|
||||
|
||||
class Role(Base):
|
||||
__tablename__ = 'role'
|
||||
|
||||
role_id = sa.Column(sa.Integer, primary_key=True)
|
||||
role_mask = sa.Column(sa.Integer, nullable=False, server_default=sa.text("'0'"))
|
||||
role_code = sa.Column(sa.String(20))
|
||||
name = sa.Column(sa.String(20))
|
||||
|
||||
|
||||
class Project(Base):
|
||||
__tablename__ = 'project'
|
||||
|
||||
project_id = sa.Column(sa.Integer, primary_key=True)
|
||||
owner_id = sa.Column(sa.ForeignKey(u'harbor_user.user_id'), nullable=False, index=True)
|
||||
name = sa.Column(sa.String(255), nullable=False, unique=True)
|
||||
creation_time = sa.Column(sa.TIMESTAMP)
|
||||
update_time = sa.Column(sa.TIMESTAMP)
|
||||
deleted = sa.Column(sa.Boolean, nullable=False, server_default='false')
|
||||
owner = relationship(u'User')
|
||||
|
||||
|
||||
class ProjectMetadata(Base):
|
||||
__tablename__ = 'project_metadata'
|
||||
|
||||
id = sa.Column(sa.Integer, primary_key=True)
|
||||
project_id = sa.Column(sa.ForeignKey(u'project.project_id'), nullable=False)
|
||||
name = sa.Column(sa.String(255), nullable=False)
|
||||
value = sa.Column(sa.String(255))
|
||||
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
deleted = sa.Column(sa.Boolean, nullable=False, server_default='false')
|
||||
|
||||
__table_args__ = (sa.UniqueConstraint('project_id', 'name', name='unique_project_id_and_name'),)
|
||||
|
||||
|
||||
class ReplicationPolicy(Base):
|
||||
__tablename__ = "replication_policy"
|
||||
|
||||
id = sa.Column(sa.Integer, primary_key=True)
|
||||
name = sa.Column(sa.String(256))
|
||||
project_id = sa.Column(sa.Integer, nullable=False)
|
||||
target_id = sa.Column(sa.Integer, nullable=False)
|
||||
enabled = sa.Column(sa.Boolean, nullable=False, server_default='true')
|
||||
description = sa.Column(sa.Text)
|
||||
cron_str = sa.Column(sa.String(256))
|
||||
filters = sa.Column(sa.String(1024))
|
||||
replicate_deletion = sa.Column(sa.Boolean, nullable=False, server_default='false')
|
||||
start_time = sa.Column(sa.TIMESTAMP)
|
||||
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
|
||||
|
||||
class ReplicationTarget(Base):
|
||||
__tablename__ = "replication_target"
|
||||
|
||||
id = sa.Column(sa.Integer, primary_key=True)
|
||||
name = sa.Column(sa.String(64))
|
||||
url = sa.Column(sa.String(64))
|
||||
username = sa.Column(sa.String(255))
|
||||
password = sa.Column(sa.String(128))
|
||||
target_type = sa.Column(sa.SmallInteger, nullable=False, server_default=sa.text("'0'"))
|
||||
insecure = sa.Column(sa.Boolean, nullable=False, server_default='false')
|
||||
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
|
||||
|
||||
class ReplicationJob(Base):
|
||||
__tablename__ = "replication_job"
|
||||
|
||||
id = sa.Column(sa.Integer, primary_key=True)
|
||||
status = sa.Column(sa.String(64), nullable=False)
|
||||
policy_id = sa.Column(sa.Integer, nullable=False)
|
||||
repository = sa.Column(sa.String(256), nullable=False)
|
||||
operation = sa.Column(sa.String(64), nullable=False)
|
||||
tags = sa.Column(sa.String(16384))
|
||||
job_uuid = sa.Column(sa.String(64))
|
||||
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
|
||||
__table_args__ = (sa.Index('policy', 'policy_id'),)
|
||||
|
||||
|
||||
class ReplicationImmediateTrigger(Base):
|
||||
__tablename__ = 'replication_immediate_trigger'
|
||||
|
||||
id = sa.Column(sa.Integer, primary_key=True)
|
||||
policy_id = sa.Column(sa.Integer, nullable=False)
|
||||
namespace = sa.Column(sa.String(256), nullable=False)
|
||||
on_push = sa.Column(sa.Boolean, nullable=False, server_default='false')
|
||||
on_deletion = sa.Column(sa.Boolean, nullable=False, server_default='false')
|
||||
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
|
||||
|
||||
class Repository(Base):
|
||||
__tablename__ = "repository"
|
||||
|
||||
repository_id = sa.Column(sa.Integer, primary_key=True)
|
||||
name = sa.Column(sa.String(255), nullable=False, unique=True)
|
||||
project_id = sa.Column(sa.Integer, nullable=False)
|
||||
owner_id = sa.Column(sa.Integer, nullable=False)
|
||||
description = sa.Column(sa.Text)
|
||||
pull_count = sa.Column(sa.Integer,server_default=sa.text("'0'"), nullable=False)
|
||||
star_count = sa.Column(sa.Integer,server_default=sa.text("'0'"), nullable=False)
|
||||
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
|
||||
|
||||
class AccessLog(Base):
|
||||
__tablename__ = "access_log"
|
||||
|
||||
log_id = sa.Column(sa.Integer, primary_key=True)
|
||||
username = sa.Column(sa.String(255), nullable=False)
|
||||
project_id = sa.Column(sa.Integer, nullable=False)
|
||||
repo_name = sa.Column(sa.String(256))
|
||||
repo_tag = sa.Column(sa.String(128))
|
||||
GUID = sa.Column(sa.String(64))
|
||||
operation = sa.Column(sa.String(20))
|
||||
op_time = sa.Column(sa.TIMESTAMP)
|
||||
|
||||
__table_args__ = (sa.Index('project_id', "op_time"),)
|
||||
|
||||
|
||||
class ImageScanJob(Base):
|
||||
__tablename__ = "img_scan_job"
|
||||
|
||||
id = sa.Column(sa.Integer, nullable=False, primary_key=True)
|
||||
status = sa.Column(sa.String(64), nullable=False)
|
||||
repository = sa.Column(sa.String(256), nullable=False)
|
||||
tag = sa.Column(sa.String(128), nullable=False)
|
||||
digest = sa.Column(sa.String(128))
|
||||
job_uuid = sa.Column(sa.String(64))
|
||||
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
|
||||
|
||||
class ImageScanOverview(Base):
|
||||
__tablename__ = "img_scan_overview"
|
||||
|
||||
id = sa.Column(sa.Integer, nullable=False, primary_key=True)
|
||||
image_digest = sa.Column(sa.String(128), nullable=False)
|
||||
scan_job_id = sa.Column(sa.Integer, nullable=False)
|
||||
severity = sa.Column(sa.Integer, nullable=False, server_default=sa.text("'0'"))
|
||||
components_overview = sa.Column(sa.String(2048))
|
||||
details_key = sa.Column(sa.String(128))
|
||||
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
|
||||
|
||||
class ClairVulnTimestamp(Base):
|
||||
__tablename__ = "clair_vuln_timestamp"
|
||||
|
||||
id = sa.Column(sa.Integer, nullable=False, primary_key=True)
|
||||
namespace = sa.Column(sa.String(128), nullable=False, unique=True)
|
||||
last_update = sa.Column(sa.TIMESTAMP)
|
||||
|
||||
|
||||
class HarborLabel(Base):
|
||||
__tablename__ = "harbor_label"
|
||||
|
||||
id = sa.Column(sa.Integer, nullable=False, primary_key=True)
|
||||
name = sa.Column(sa.String(128), nullable=False)
|
||||
description = sa.Column(sa.Text)
|
||||
color = sa.Column(sa.String(16))
|
||||
level = sa.Column(sa.String(1), nullable=False)
|
||||
scope = sa.Column(sa.String(1), nullable=False)
|
||||
project_id = sa.Column(sa.Integer, nullable=False)
|
||||
deleted = sa.Column(sa.Boolean, nullable=False, server_default='false')
|
||||
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
|
||||
__table_args__ = (sa.UniqueConstraint('name', 'scope', 'project_id', name='unique_label'),)
|
||||
|
||||
|
||||
class HarborResourceLabel(Base):
|
||||
__tablename__ = 'harbor_resource_label'
|
||||
|
||||
id = sa.Column(sa.Integer, primary_key=True)
|
||||
label_id = sa.Column(sa.Integer, nullable=False)
|
||||
resource_id = sa.Column(sa.Integer)
|
||||
resource_name = sa.Column(sa.String(256))
|
||||
resource_type = sa.Column(sa.String(1), nullable=False)
|
||||
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
|
||||
|
||||
__table_args__ = (sa.UniqueConstraint('label_id', 'resource_id', 'resource_name', 'resource_type', name='unique_label_resource'),)
|
||||
|
||||
|
||||
class SchemaMigrations(Base):
|
||||
__tablename__ = 'schema_migrations'
|
||||
|
||||
version = sa.Column(sa.BigInteger, primary_key=True)
|
||||
dirty = sa.Column(sa.Boolean, nullable=False)
|
||||
|
||||
class AdminJob(Base):
|
||||
__tablename__ = 'admin_job'
|
||||
|
||||
id = sa.Column(sa.Integer, primary_key=True)
|
||||
job_name = sa.Column(sa.String(64), nullable=False)
|
||||
job_kind = sa.Column(sa.String(64), nullable=False)
|
||||
cron_str = sa.Column(sa.String(256))
|
||||
status = sa.Column(sa.String(64), nullable=False)
|
||||
job_uuid = sa.Column(sa.String(64))
|
||||
deleted = sa.Column(sa.Boolean, nullable=False, server_default='false')
|
||||
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
|
||||
|
||||
__table_args__ = (sa.Index('status', "job_uuid"),)
|
@ -1 +0,0 @@
|
||||
Generic single-database configuration.
|
@ -1,70 +0,0 @@
|
||||
from __future__ import with_statement
|
||||
from alembic import context
|
||||
from sqlalchemy import engine_from_config, pool
|
||||
from logging.config import fileConfig
|
||||
|
||||
# this is the Alembic Config object, which provides
|
||||
# access to the values within the .ini file in use.
|
||||
config = context.config
|
||||
|
||||
# Interpret the config file for Python logging.
|
||||
# This line sets up loggers basically.
|
||||
fileConfig(config.config_file_name)
|
||||
|
||||
# add your model's MetaData object here
|
||||
# for 'autogenerate' support
|
||||
# from myapp import mymodel
|
||||
# target_metadata = mymodel.Base.metadata
|
||||
target_metadata = None
|
||||
|
||||
# other values from the config, defined by the needs of env.py,
|
||||
# can be acquired:
|
||||
# my_important_option = config.get_main_option("my_important_option")
|
||||
# ... etc.
|
||||
|
||||
|
||||
def run_migrations_offline():
|
||||
"""Run migrations in 'offline' mode.
|
||||
|
||||
This configures the context with just a URL
|
||||
and not an Engine, though an Engine is acceptable
|
||||
here as well. By skipping the Engine creation
|
||||
we don't even need a DBAPI to be available.
|
||||
|
||||
Calls to context.execute() here emit the given string to the
|
||||
script output.
|
||||
|
||||
"""
|
||||
url = config.get_main_option("sqlalchemy.url")
|
||||
context.configure(
|
||||
url=url, target_metadata=target_metadata, literal_binds=True)
|
||||
|
||||
with context.begin_transaction():
|
||||
context.run_migrations()
|
||||
|
||||
|
||||
def run_migrations_online():
|
||||
"""Run migrations in 'online' mode.
|
||||
|
||||
In this scenario we need to create an Engine
|
||||
and associate a connection with the context.
|
||||
|
||||
"""
|
||||
connectable = engine_from_config(
|
||||
config.get_section(config.config_ini_section),
|
||||
prefix='sqlalchemy.',
|
||||
poolclass=pool.NullPool)
|
||||
|
||||
with connectable.connect() as connection:
|
||||
context.configure(
|
||||
connection=connection,
|
||||
target_metadata=target_metadata
|
||||
)
|
||||
|
||||
with context.begin_transaction():
|
||||
context.run_migrations()
|
||||
|
||||
if context.is_offline_mode():
|
||||
run_migrations_offline()
|
||||
else:
|
||||
run_migrations_online()
|
@ -1,24 +0,0 @@
|
||||
"""${message}
|
||||
|
||||
Revision ID: ${up_revision}
|
||||
Revises: ${down_revision | comma,n}
|
||||
Create Date: ${create_date}
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
${imports if imports else ""}
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = ${repr(up_revision)}
|
||||
down_revision = ${repr(down_revision)}
|
||||
branch_labels = ${repr(branch_labels)}
|
||||
depends_on = ${repr(depends_on)}
|
||||
|
||||
|
||||
def upgrade():
|
||||
${upgrades if upgrades else "pass"}
|
||||
|
||||
|
||||
def downgrade():
|
||||
${downgrades if downgrades else "pass"}
|
@ -1,39 +0,0 @@
|
||||
# Copyright Project Harbor Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Empty version
|
||||
|
||||
Revision ID: 1.5.0
|
||||
Revises:
|
||||
Create Date: 2018-6-26
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '1.5.0'
|
||||
down_revision = None
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
def upgrade():
|
||||
"""
|
||||
update schema&data
|
||||
"""
|
||||
pass
|
||||
|
||||
def downgrade():
|
||||
"""
|
||||
Downgrade has been disabled.
|
||||
"""
|
||||
pass
|
@ -1,58 +0,0 @@
|
||||
# Copyright Project Harbor Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""1.5.0 to 1.6.0
|
||||
|
||||
Revision ID: 1.6.0
|
||||
Revises:
|
||||
Create Date: 2018-6-26
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '1.6.0'
|
||||
down_revision = '1.5.0'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
from alembic import op
|
||||
from db_meta import *
|
||||
|
||||
Session = sessionmaker()
|
||||
|
||||
def upgrade():
|
||||
"""
|
||||
update schema&data
|
||||
"""
|
||||
bind = op.get_bind()
|
||||
session = Session(bind=bind)
|
||||
|
||||
## Add column deleted to harbor_label
|
||||
op.add_column('harbor_label', sa.Column('deleted', sa.Boolean, nullable=False, server_default='false'))
|
||||
|
||||
## Add schema_migration then insert data
|
||||
SchemaMigrations.__table__.create(bind)
|
||||
session.add(SchemaMigrations(version=1, dirty=False))
|
||||
|
||||
## Add table admin_job
|
||||
AdminJob.__table__.create(bind)
|
||||
op.execute('CREATE TRIGGER admin_job_update_time_at_modtime BEFORE UPDATE ON admin_job FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();')
|
||||
|
||||
session.commit()
|
||||
|
||||
def downgrade():
|
||||
"""
|
||||
Downgrade has been disabled.
|
||||
"""
|
||||
pass
|
@ -1,85 +0,0 @@
|
||||
# What's New in Harbor Database Schema
|
||||
Changelog for harbor database schema
|
||||
|
||||
## 0.1.0
|
||||
|
||||
## 0.1.1
|
||||
|
||||
- create table `project_member`
|
||||
- create table `schema_version`
|
||||
- drop table `user_project_role`
|
||||
- drop table `project_role`
|
||||
- add column `creation_time` to table `user`
|
||||
- add column `sysadmin_flag` to table `user`
|
||||
- add column `update_time` to table `user`
|
||||
- add column `role_mask` to table `role`
|
||||
- add column `update_time` to table `project`
|
||||
- delete data `AMDRWS` from table `role`
|
||||
- delete data `A` from table `access`
|
||||
|
||||
## 0.3.0
|
||||
|
||||
- create table `replication_policy`
|
||||
- create table `replication_target`
|
||||
- create table `replication_job`
|
||||
- add column `repo_tag` to table `access_log`
|
||||
- alter column `repo_name` on table `access_log`
|
||||
- alter column `email` on table `user`
|
||||
|
||||
## 0.4.0
|
||||
|
||||
- add index `pid_optime (project_id, op_time)` on table `access_log`
|
||||
- add index `poid_uptime (policy_id, update_time)` on table `replication_job`
|
||||
- add column `deleted` to table `replication_policy`
|
||||
- alter column `username` on table `user`: varchar(15)->varchar(32)
|
||||
- alter column `password` on table `replication_target`: varchar(40)->varchar(128)
|
||||
- alter column `email` on table `user`: varchar(128)->varchar(255)
|
||||
- alter column `name` on table `project`: varchar(30)->varchar(41)
|
||||
- create table `repository`
|
||||
- alter column `password` on table `replication_target`: varchar(40)->varchar(128)
|
||||
|
||||
## 1.2.0
|
||||
|
||||
- delete column `owner_id` from table `repository`
|
||||
- delete column `user_id` from table `access_log`
|
||||
- delete foreign key (user_id) references user(user_id)from table `access_log`
|
||||
- delete foreign key (project_id) references project(project_id)from table `access_log`
|
||||
- add column `username` varchar (32) to table `access_log`
|
||||
- alter column `realname` on table `user`: varchar(20)->varchar(255)
|
||||
- create table `img_scan_job`
|
||||
- create table `img_scan_overview`
|
||||
- create table `clair_vuln_timestamp`
|
||||
|
||||
## 1.3.0
|
||||
|
||||
- create table `project_metadata`
|
||||
- insert data into table `project_metadata`
|
||||
- delete column `public` from table `project`
|
||||
- add column `insecure` to table `replication_target`
|
||||
|
||||
## 1.4.0
|
||||
|
||||
- add column `filters` to table `replication_policy`
|
||||
- add column `replicate_deletion` to table `replication_policy`
|
||||
- create table `replication_immediate_trigger`
|
||||
- add pk `id` to table `properties`
|
||||
- remove pk index from column 'k' of table `properties`
|
||||
- alter `name` length from 41 to 256 of table `project`
|
||||
|
||||
## 1.5.0
|
||||
|
||||
- create table `harbor_label`
|
||||
- create table `harbor_resource_label`
|
||||
- create table `user_group`
|
||||
- modify table `project_member` use `id` as PK and add column `entity_type` to indicate if the member is user or group.
|
||||
- add `job_uuid` column to `replication_job` and `img_scan_job`
|
||||
- add index `poid_status` in table replication_job
|
||||
- add index `idx_status`, `idx_status`, `idx_digest`, `idx_repository_tag` in table img_scan_job
|
||||
|
||||
## 1.6.0
|
||||
|
||||
- add `deleted` column to table `harbor_label`
|
||||
|
||||
## 1.7.0
|
||||
|
||||
- alter column `v` on table `properties`: varchar(128)->varchar(1024)
|
@ -1,153 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright Project Harbor Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
source $PWD/db/util/pgsql.sh
|
||||
source $PWD/db/util/alembic.sh
|
||||
|
||||
set -e
|
||||
|
||||
ISPGSQL=false
|
||||
|
||||
cur_version=""
|
||||
PGSQL_USR="postgres"
|
||||
|
||||
function init {
|
||||
if [ "$(ls -A /var/lib/postgresql/data)" ]; then
|
||||
ISPGSQL=true
|
||||
fi
|
||||
|
||||
if [ $ISPGSQL == false ]; then
|
||||
echo "No database has been mounted for the migration. Use '-v' to set it in 'docker run'."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ $ISPGSQL == true ]; then
|
||||
launch_pgsql $PGSQL_USR
|
||||
fi
|
||||
}
|
||||
|
||||
function get_version {
|
||||
if [ $ISPGSQL == true ]; then
|
||||
result=$(get_version_pgsql $PGSQL_USR)
|
||||
fi
|
||||
cur_version=$result
|
||||
}
|
||||
|
||||
# first version is less than or equal to second version.
|
||||
function version_le {
|
||||
## if no version specific, see it as larger then 1.5.0
|
||||
if [ $1 = "head" ];then
|
||||
return 1
|
||||
fi
|
||||
test "$(printf '%s\n' "$@" | sort -V | head -n 1)" = "$1";
|
||||
}
|
||||
|
||||
function backup {
|
||||
echo "Performing backup..."
|
||||
if [ $ISPGSQL == true ]; then
|
||||
backup_pgsql
|
||||
fi
|
||||
rc="$?"
|
||||
echo "Backup performed."
|
||||
exit $rc
|
||||
}
|
||||
|
||||
function restore {
|
||||
echo "Performing restore..."
|
||||
if [ $ISPGSQL == true ]; then
|
||||
restore_pgsql
|
||||
fi
|
||||
rc="$?"
|
||||
echo "Restore performed."
|
||||
exit $rc
|
||||
}
|
||||
|
||||
function validate {
|
||||
echo "Performing test..."
|
||||
if [ $ISPGSQL == true ]; then
|
||||
test_pgsql $PGSQL_USR
|
||||
fi
|
||||
rc="$?"
|
||||
echo "Test performed."
|
||||
exit $rc
|
||||
}
|
||||
|
||||
function upgrade {
|
||||
up_harbor $1
|
||||
}
|
||||
|
||||
function up_harbor {
|
||||
local target_version="$1"
|
||||
if [ -z $target_version ]; then
|
||||
target_version="head"
|
||||
echo "Version is not specified. Default version is head."
|
||||
fi
|
||||
|
||||
get_version
|
||||
if [ "$cur_version" = "$target_version" ]; then
|
||||
echo "It has always running the $target_version, no longer need to upgrade."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# $cur_version > '1.5.0', $target_version > '1.5.0', it needs to call pgsql upgrade.
|
||||
if [ $ISPGSQL != true ]; then
|
||||
echo "Please mount the database volume to /var/lib/postgresql/data, then to run the upgrade again."
|
||||
return 1
|
||||
else
|
||||
alembic_up pgsql $target_version
|
||||
return $?
|
||||
fi
|
||||
|
||||
echo "Unsupported DB upgrade from $cur_version to $target_version, please check the inputs."
|
||||
return 1
|
||||
}
|
||||
|
||||
function main {
|
||||
|
||||
if [[ $1 = "help" || $1 = "h" || $# = 0 ]]; then
|
||||
echo "Usage:"
|
||||
echo "backup perform database backup"
|
||||
echo "restore perform database restore"
|
||||
echo "up, upgrade perform database schema upgrade"
|
||||
echo "test test database connection"
|
||||
echo "h, help usage help"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
init
|
||||
|
||||
local key="$1"
|
||||
|
||||
case $key in
|
||||
up|upgrade)
|
||||
upgrade $2
|
||||
;;
|
||||
backup)
|
||||
backup
|
||||
;;
|
||||
restore)
|
||||
restore
|
||||
;;
|
||||
test)
|
||||
validate
|
||||
;;
|
||||
*)
|
||||
echo "unknown option"
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
main "$@"
|
@ -1,37 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright Project Harbor Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
function alembic_up {
|
||||
local db_type="$1"
|
||||
local target_version="$2"
|
||||
|
||||
if [ $db_type = "pgsql" ]; then
|
||||
export PYTHONPATH=/harbor-migration/db/alembic/postgres
|
||||
echo "TODO: add support for pgsql."
|
||||
source /harbor-migration/db/alembic/postgres/alembic.tpl > /harbor-migration/db/alembic/postgres/alembic.ini
|
||||
echo "Performing upgrade $target_version..."
|
||||
alembic -c /harbor-migration/db/alembic/postgres/alembic.ini current
|
||||
alembic -c /harbor-migration/db/alembic/postgres/alembic.ini upgrade $target_version
|
||||
alembic -c /harbor-migration/db/alembic/postgres/alembic.ini current
|
||||
else
|
||||
echo "Unsupported DB type: $db_type"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Upgrade performed."
|
||||
}
|
@ -1,148 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright Project Harbor Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
POSTGRES_PASSWORD=${DB_PWD}
|
||||
|
||||
function file_env {
|
||||
local var="$1"
|
||||
local fileVar="${var}_FILE"
|
||||
local def="${2:-}"
|
||||
if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then
|
||||
echo >&2 "error: both $var and $fileVar are set (but are exclusive)"
|
||||
exit 1
|
||||
fi
|
||||
local val="$def"
|
||||
if [ "${!var:-}" ]; then
|
||||
val="${!var}"
|
||||
elif [ "${!fileVar:-}" ]; then
|
||||
val="$(< "${!fileVar}")"
|
||||
fi
|
||||
export "$var"="$val"
|
||||
unset "$fileVar"
|
||||
}
|
||||
|
||||
if [ "${1:0:1}" = '-' ]; then
|
||||
set -- postgres "$@"
|
||||
fi
|
||||
|
||||
function launch_pgsql {
|
||||
local pg_data=$2
|
||||
if [ -z $2 ]; then
|
||||
pg_data=$PGDATA
|
||||
fi
|
||||
|
||||
if [ "$1" = 'postgres' ]; then
|
||||
chown -R postgres:postgres $pg_data
|
||||
# look specifically for PG_VERSION, as it is expected in the DB dir
|
||||
if [ ! -s "$pg_data/PG_VERSION" ]; then
|
||||
file_env 'POSTGRES_INITDB_ARGS'
|
||||
if [ "$POSTGRES_INITDB_XLOGDIR" ]; then
|
||||
export POSTGRES_INITDB_ARGS="$POSTGRES_INITDB_ARGS --xlogdir $POSTGRES_INITDB_XLOGDIR"
|
||||
fi
|
||||
su - $1 -c "initdb -D $pg_data -U postgres -E UTF-8 --lc-collate=en_US.UTF-8 --lc-ctype=en_US.UTF-8 $POSTGRES_INITDB_ARGS"
|
||||
# check password first so we can output the warning before postgres
|
||||
# messes it up
|
||||
file_env 'POSTGRES_PASSWORD'
|
||||
if [ "$POSTGRES_PASSWORD" ]; then
|
||||
pass="PASSWORD '$POSTGRES_PASSWORD'"
|
||||
authMethod=md5
|
||||
else
|
||||
# The - option suppresses leading tabs but *not* spaces. :)
|
||||
echo "Use \"-e POSTGRES_PASSWORD=password\" to set the password in \"docker run\"."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
{
|
||||
echo
|
||||
echo "host all all all $authMethod"
|
||||
} >> "$pg_data/pg_hba.conf"
|
||||
# internal start of server in order to allow set-up using psql-client
|
||||
# does not listen on external TCP/IP and waits until start finishes
|
||||
su - $1 -c "pg_ctl -D \"$pg_data\" -o \"-c listen_addresses='localhost'\" -w start"
|
||||
|
||||
file_env 'POSTGRES_USER' 'postgres'
|
||||
file_env 'POSTGRES_DB' "$POSTGRES_USER"
|
||||
|
||||
psql=( psql -v ON_ERROR_STOP=1 )
|
||||
|
||||
if [ "$POSTGRES_DB" != 'postgres' ]; then
|
||||
"${psql[@]}" --username postgres <<-EOSQL
|
||||
CREATE DATABASE "$POSTGRES_DB" ;
|
||||
EOSQL
|
||||
echo
|
||||
fi
|
||||
|
||||
if [ "$POSTGRES_USER" = 'postgres' ]; then
|
||||
op='ALTER'
|
||||
else
|
||||
op='CREATE'
|
||||
fi
|
||||
"${psql[@]}" --username postgres <<-EOSQL
|
||||
$op USER "$POSTGRES_USER" WITH SUPERUSER $pass ;
|
||||
EOSQL
|
||||
echo
|
||||
|
||||
psql+=( --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" )
|
||||
|
||||
echo
|
||||
for f in /docker-entrypoint-initdb.d/*; do
|
||||
case "$f" in
|
||||
*.sh) echo "$0: running $f"; . "$f" ;;
|
||||
*.sql) echo "$0: running $f"; "${psql[@]}" -f "$f"; echo ;;
|
||||
*.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${psql[@]}"; echo ;;
|
||||
*) echo "$0: ignoring $f" ;;
|
||||
esac
|
||||
echo
|
||||
done
|
||||
|
||||
#PGUSER="${PGUSER:-postgres}" \
|
||||
#su - $1 -c "pg_ctl -D \"$pg_data\" -m fast -w stop"
|
||||
|
||||
echo
|
||||
echo 'PostgreSQL init process complete; ready for start up.'
|
||||
echo
|
||||
else
|
||||
su - $PGSQL_USR -c "pg_ctl -D \"$pg_data\" -o \"-c listen_addresses='localhost'\" -w start"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function stop_pgsql {
|
||||
local pg_data=$2
|
||||
if [ -z $2 ]; then
|
||||
pg_data=$PGDATA
|
||||
fi
|
||||
su - $1 -c "pg_ctl -D \"$pg_data\" -w stop"
|
||||
}
|
||||
|
||||
function get_version_pgsql {
|
||||
version=$(psql -U $1 -d registry -t -c "select * from alembic_version;")
|
||||
echo $version
|
||||
}
|
||||
|
||||
function test_pgsql {
|
||||
echo "TODO: needs to implement test pgsql connection..."
|
||||
}
|
||||
|
||||
function backup_pgsql {
|
||||
echo "TODO: needs to implement backup registry..."
|
||||
}
|
||||
|
||||
function restore_pgsql {
|
||||
echo "TODO: needs to implement restore registry..."
|
||||
}
|
@ -1,4 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
python ./migrator.py "$@"
|
@ -1,241 +0,0 @@
|
||||
from shutil import copyfile
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
|
||||
if sys.version_info[:3][0] == 2:
|
||||
import ConfigParser as ConfigParser
|
||||
import StringIO as StringIO
|
||||
|
||||
if sys.version_info[:3][0] == 3:
|
||||
import configparser as ConfigParser
|
||||
import io as StringIO
|
||||
|
||||
RC_VALIDATE = 101
|
||||
RC_UP = 102
|
||||
RC_DOWN = 103
|
||||
RC_BACKUP = 104
|
||||
RC_RESTORE = 105
|
||||
RC_UNKNOWN_TYPE = 106
|
||||
RC_GEN = 110
|
||||
|
||||
class DBMigrator():
|
||||
|
||||
def __init__(self, target):
|
||||
self.target = target
|
||||
self.script = "./db/run.sh"
|
||||
|
||||
def backup(self):
|
||||
return run_cmd(self.script + " backup") == 0
|
||||
|
||||
def restore(self):
|
||||
return run_cmd(self.script + " restore") == 0
|
||||
|
||||
def up(self):
|
||||
cmd = self.script + " up"
|
||||
if self.target != '':
|
||||
cmd = cmd + " " + self.target
|
||||
return run_cmd(cmd) == 0
|
||||
|
||||
def validate(self):
|
||||
return run_cmd(self.script + " test") == 0
|
||||
|
||||
class CfgMigrator():
|
||||
|
||||
def __init__(self, target, output):
|
||||
cfg_dir = "/harbor-migration/harbor-cfg"
|
||||
cfg_out_dir = "/harbor-migration/harbor-cfg-out"
|
||||
|
||||
self.target = target
|
||||
self.cfg_path = self.__config_filepath(cfg_dir)
|
||||
if not self.cfg_path:
|
||||
self.cfg_path = os.path.join(cfg_dir, "harbor.cfg")
|
||||
|
||||
if output:
|
||||
self.output = output
|
||||
elif self.__config_filepath(cfg_out_dir):
|
||||
self.output = self.__config_filepath(cfg_out_dir)
|
||||
elif os.path.isdir(cfg_out_dir):
|
||||
self.output = os.path.join(cfg_out_dir, os.path.basename(self.cfg_path))
|
||||
else:
|
||||
self.output = ""
|
||||
|
||||
self.backup_path = "/harbor-migration/backup"
|
||||
self.restore_src = self.__config_filepath(self.backup_path)
|
||||
self.restore_tgt = os.path.join(os.path.dirname(self.cfg_path), os.path.basename(self.restore_src))
|
||||
|
||||
@staticmethod
|
||||
def __config_filepath(d):
|
||||
if os.path.isfile(os.path.join(d, "harbor.yml")):
|
||||
return os.path.join(d, "harbor.yml")
|
||||
elif os.path.isfile(os.path.join(d, "harbor.cfg")):
|
||||
return os.path.join(d, "harbor.cfg")
|
||||
return ""
|
||||
|
||||
def backup(self):
|
||||
try:
|
||||
copyfile(self.cfg_path, os.path.join(self.backup_path, os.path.basename(self.cfg_path)))
|
||||
print ("Success to backup harbor.cfg.")
|
||||
return True
|
||||
except Exception as e:
|
||||
print ("Back up error: %s" % str(e))
|
||||
return False
|
||||
|
||||
def restore(self):
|
||||
if not self.restore_src:
|
||||
print("unable to locate harbor config file in directory: %s" % self.backup_path)
|
||||
return False
|
||||
|
||||
try:
|
||||
copyfile(self.restore_src, self.restore_tgt)
|
||||
print ("Success to restore harbor.cfg.")
|
||||
return True
|
||||
except Exception as e:
|
||||
print ("Restore error: %s" % str(e))
|
||||
return False
|
||||
|
||||
def up(self):
|
||||
if not os.path.exists(self.cfg_path):
|
||||
print ("Skip cfg up as no harbor.cfg in the path.")
|
||||
return True
|
||||
|
||||
if self.output and os.path.isdir(self.output):
|
||||
cmd = "python ./cfg/run.py --input " + self.cfg_path + " --output " + os.path.join(self.output, os.path.basename(self.cfg_path))
|
||||
elif self.output and os.path.isfile(self.output):
|
||||
cmd = "python ./cfg/run.py --input " + self.cfg_path + " --output " + self.output
|
||||
else:
|
||||
print ("The path of the migrated harbor.cfg is not set, the input file will be overwritten.")
|
||||
cmd = "python ./cfg/run.py --input " + self.cfg_path
|
||||
|
||||
if self.target != '':
|
||||
cmd = cmd + " --target " + self.target
|
||||
print("Command for config file migration: %s" % cmd)
|
||||
return run_cmd(cmd) == 0
|
||||
|
||||
def validate(self):
|
||||
if not os.path.exists(self.cfg_path):
|
||||
print ("Unable to locate the input harbor configuration file: %s, please check." % self.cfg_path)
|
||||
return False
|
||||
return True
|
||||
|
||||
class Parameters(object):
|
||||
def __init__(self):
|
||||
self.db_user = os.getenv('DB_USR', '')
|
||||
self.db_pwd = os.getenv('DB_PWD', '')
|
||||
self.skip_confirm = os.getenv('SKIP_CONFIRM', 'n')
|
||||
self.output = False
|
||||
self.is_migrate_db = True
|
||||
self.is_migrate_cfg = True
|
||||
self.target_version = ''
|
||||
self.action = ''
|
||||
self.init_from_input()
|
||||
|
||||
def is_action(self, action):
|
||||
if action == "test" or action == "backup" or action == "restore" or action == "up":
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def parse_input(self):
|
||||
argv_len = len(sys.argv[1:])
|
||||
last_argv = sys.argv[argv_len:][0]
|
||||
if not self.is_action(last_argv):
|
||||
print ("Fail to parse input: the last parameter should in test:up:restore:backup")
|
||||
sys.exit(RC_GEN)
|
||||
|
||||
if last_argv == 'up':
|
||||
if self.skip_confirm != 'y':
|
||||
if not pass_skip_confirm():
|
||||
sys.exit(RC_GEN)
|
||||
|
||||
if argv_len == 1:
|
||||
return (True, True, '', False, last_argv)
|
||||
|
||||
parser = argparse.ArgumentParser(description='migrator of harbor')
|
||||
parser.add_argument('--db', action="store_true", dest='is_migrate_db', required=False, default=False, help='The flag to upgrade db.')
|
||||
parser.add_argument('--cfg', action="store_true", dest='is_migrate_cfg', required=False, default=False, help='The flag to upgrede cfg.')
|
||||
parser.add_argument('--version', action="store", dest='target_version', required=False, default='', help='The target version that the harbor will be migrated to.')
|
||||
parser.add_argument('--output', action="store_true", dest='output', required=False, default=False, help='The path of the migrated harbor.cfg, if not set the input file will be overwritten.')
|
||||
|
||||
args = parser.parse_args(sys.argv[1:argv_len])
|
||||
args.action = last_argv
|
||||
return (args.is_migrate_db, args.is_migrate_cfg, args.target_version, args.output, args.action)
|
||||
|
||||
def init_from_input(self):
|
||||
(self.is_migrate_db, self.is_migrate_cfg, self.target_version, self.output, self.action) = self.parse_input()
|
||||
|
||||
def run_cmd(cmd):
|
||||
return os.system(cmd)
|
||||
|
||||
def pass_skip_confirm():
|
||||
valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
|
||||
message = "Please backup before upgrade, \nEnter y to continue updating or n to abort: "
|
||||
while True:
|
||||
sys.stdout.write(message)
|
||||
choice = raw_input().lower()
|
||||
if choice == '':
|
||||
return False
|
||||
elif choice in valid:
|
||||
return valid[choice]
|
||||
else:
|
||||
sys.stdout.write("Please respond with 'yes' or 'no' "
|
||||
"(or 'y' or 'n').\n")
|
||||
|
||||
def main():
|
||||
commandline_input = Parameters()
|
||||
|
||||
db_migrator = DBMigrator(commandline_input.target_version)
|
||||
cfg_migrator = CfgMigrator(commandline_input.target_version, commandline_input.output)
|
||||
|
||||
try:
|
||||
# test
|
||||
if commandline_input.action == "test":
|
||||
if commandline_input.is_migrate_db:
|
||||
if not db_migrator.validate():
|
||||
print ("Fail to validate: please make sure your DB auth is correct.")
|
||||
sys.exit(RC_VALIDATE)
|
||||
|
||||
if commandline_input.is_migrate_cfg:
|
||||
if not cfg_migrator.validate():
|
||||
print ("Fail to validate: please make sure your cfg path is correct.")
|
||||
sys.exit(RC_VALIDATE)
|
||||
|
||||
# backup
|
||||
elif commandline_input.action == "backup":
|
||||
if commandline_input.is_migrate_db:
|
||||
if not db_migrator.backup():
|
||||
sys.exit(RC_BACKUP)
|
||||
|
||||
if commandline_input.is_migrate_cfg:
|
||||
if not cfg_migrator.backup():
|
||||
sys.exit(RC_BACKUP)
|
||||
|
||||
# up
|
||||
elif commandline_input.action == "up":
|
||||
if commandline_input.is_migrate_db:
|
||||
if not db_migrator.up():
|
||||
sys.exit(RC_UP)
|
||||
|
||||
if commandline_input.is_migrate_cfg:
|
||||
if not cfg_migrator.up():
|
||||
sys.exit(RC_UP)
|
||||
|
||||
# restore
|
||||
elif commandline_input.action == "restore":
|
||||
if commandline_input.is_migrate_db:
|
||||
if not db_migrator.restore():
|
||||
sys.exit(RC_RESTORE)
|
||||
|
||||
if commandline_input.is_migrate_cfg:
|
||||
if not cfg_migrator.restore():
|
||||
sys.exit(RC_RESTORE)
|
||||
|
||||
else:
|
||||
print ("Unknow action type: " + str(commandline_input.action))
|
||||
sys.exit(RC_UNKNOWN_TYPE)
|
||||
except Exception as ex:
|
||||
print ("Migrator fail to execute, err: " + ex.message)
|
||||
sys.exit(RC_GEN)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
Loading…
Reference in New Issue
Block a user