mirror of
https://github.com/goharbor/harbor.git
synced 2024-11-21 17:55:30 +01:00
add prepare migration script for 2.12.0 (#21022)
Signed-off-by: yminer <miner.yang@broadcom.com>
This commit is contained in:
parent
de281220b5
commit
ab59a46a87
@ -174,7 +174,7 @@ log:
|
||||
# port: 5140
|
||||
|
||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||
_version: 2.11.0
|
||||
_version: 2.12.0
|
||||
|
||||
# Uncomment external_database if using external database.
|
||||
# external_database:
|
||||
|
@ -10,7 +10,7 @@ from migrations import accept_versions
|
||||
@click.command()
|
||||
@click.option('-i', '--input', 'input_', required=True, help="The path of original config file")
|
||||
@click.option('-o', '--output', default='', help="the path of output config file")
|
||||
@click.option('-t', '--target', default='2.11.0', help="target version of input path")
|
||||
@click.option('-t', '--target', default='2.12.0', help="target version of input path")
|
||||
def migrate(input_, output, target):
|
||||
"""
|
||||
migrate command will migrate config file style to specific version
|
||||
|
@ -2,4 +2,4 @@ import os
|
||||
|
||||
MIGRATION_BASE_DIR = os.path.dirname(__file__)
|
||||
|
||||
accept_versions = {'1.9.0', '1.10.0', '2.0.0', '2.1.0', '2.2.0', '2.3.0', '2.4.0', '2.5.0', '2.6.0', '2.7.0', '2.8.0', '2.9.0','2.10.0', '2.11.0'}
|
||||
accept_versions = {'1.9.0', '1.10.0', '2.0.0', '2.1.0', '2.2.0', '2.3.0', '2.4.0', '2.5.0', '2.6.0', '2.7.0', '2.8.0', '2.9.0','2.10.0', '2.11.0', '2.12.0'}
|
21
make/photon/prepare/migrations/version_2_12_0/__init__.py
Normal file
21
make/photon/prepare/migrations/version_2_12_0/__init__.py
Normal file
@ -0,0 +1,21 @@
|
||||
import os
|
||||
from jinja2 import Environment, FileSystemLoader, StrictUndefined, select_autoescape
|
||||
from utils.migration import read_conf
|
||||
|
||||
revision = '2.12.0'
|
||||
down_revisions = ['2.11.0']
|
||||
|
||||
def migrate(input_cfg, output_cfg):
|
||||
current_dir = os.path.dirname(__file__)
|
||||
tpl = Environment(
|
||||
loader=FileSystemLoader(current_dir),
|
||||
undefined=StrictUndefined,
|
||||
trim_blocks=True,
|
||||
lstrip_blocks=True,
|
||||
autoescape = select_autoescape()
|
||||
).get_template('harbor.yml.jinja')
|
||||
|
||||
config_dict = read_conf(input_cfg)
|
||||
|
||||
with open(output_cfg, 'w') as f:
|
||||
f.write(tpl.render(**config_dict))
|
737
make/photon/prepare/migrations/version_2_12_0/harbor.yml.jinja
Normal file
737
make/photon/prepare/migrations/version_2_12_0/harbor.yml.jinja
Normal file
@ -0,0 +1,737 @@
|
||||
# Configuration file of Harbor
|
||||
|
||||
# The IP address or hostname to access admin UI and registry service.
|
||||
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
|
||||
hostname: {{ hostname }}
|
||||
|
||||
# http related config
|
||||
{% if http is defined %}
|
||||
http:
|
||||
# port for http, default is 80. If https enabled, this port will redirect to https port
|
||||
port: {{ http.port }}
|
||||
{% else %}
|
||||
# http:
|
||||
# # port for http, default is 80. If https enabled, this port will redirect to https port
|
||||
# port: 80
|
||||
{% endif %}
|
||||
|
||||
{% if https is defined %}
|
||||
# https related config
|
||||
https:
|
||||
# https port for harbor, default is 443
|
||||
port: {{ https.port }}
|
||||
# The path of cert and key files for nginx
|
||||
certificate: {{ https.certificate }}
|
||||
private_key: {{ https.private_key }}
|
||||
# enable strong ssl ciphers (default: false)
|
||||
{% if strong_ssl_ciphers is defined %}
|
||||
strong_ssl_ciphers: {{ strong_ssl_ciphers | lower }}
|
||||
{% else %}
|
||||
strong_ssl_ciphers: false
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# https related config
|
||||
# https:
|
||||
# # https port for harbor, default is 443
|
||||
# port: 443
|
||||
# # The path of cert and key files for nginx
|
||||
# certificate: /your/certificate/path
|
||||
# private_key: /your/private/key/path
|
||||
# enable strong ssl ciphers (default: false)
|
||||
# strong_ssl_ciphers: false
|
||||
{% endif %}
|
||||
|
||||
# # Harbor will set ipv4 enabled only by default if this block is not configured
|
||||
# # Otherwise, please uncomment this block to configure your own ip_family stacks
|
||||
{% if ip_family is defined %}
|
||||
ip_family:
|
||||
# ipv6Enabled set to true if ipv6 is enabled in docker network, currently it affected the nginx related component
|
||||
{% if ip_family.ipv6 is defined %}
|
||||
ipv6:
|
||||
enabled: {{ ip_family.ipv6.enabled | lower }}
|
||||
{% else %}
|
||||
ipv6:
|
||||
enabled: false
|
||||
{% endif %}
|
||||
# ipv4Enabled set to true by default, currently it affected the nginx related component
|
||||
{% if ip_family.ipv4 is defined %}
|
||||
ipv4:
|
||||
enabled: {{ ip_family.ipv4.enabled | lower }}
|
||||
{% else %}
|
||||
ipv4:
|
||||
enabled: true
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# ip_family:
|
||||
# # ipv6Enabled set to true if ipv6 is enabled in docker network, currently it affected the nginx related component
|
||||
# ipv6:
|
||||
# enabled: false
|
||||
# # ipv4Enabled set to true by default, currently it affected the nginx related component
|
||||
# ipv4:
|
||||
# enabled: true
|
||||
{% endif %}
|
||||
|
||||
{% if internal_tls is defined %}
|
||||
# Uncomment following will enable tls communication between all harbor components
|
||||
internal_tls:
|
||||
# set enabled to true means internal tls is enabled
|
||||
enabled: {{ internal_tls.enabled | lower }}
|
||||
{% if internal_tls.dir is defined %}
|
||||
# put your cert and key files on dir
|
||||
dir: {{ internal_tls.dir }}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# internal_tls:
|
||||
# # set enabled to true means internal tls is enabled
|
||||
# enabled: true
|
||||
# # put your cert and key files on dir
|
||||
# dir: /etc/harbor/tls/internal
|
||||
{% endif %}
|
||||
|
||||
# Uncomment external_url if you want to enable external proxy
|
||||
# And when it enabled the hostname will no longer used
|
||||
{% if external_url is defined %}
|
||||
external_url: {{ external_url }}
|
||||
{% else %}
|
||||
# external_url: https://reg.mydomain.com:8433
|
||||
{% endif %}
|
||||
|
||||
# The initial password of Harbor admin
|
||||
# It only works in first time to install harbor
|
||||
# Remember Change the admin password from UI after launching Harbor.
|
||||
{% if harbor_admin_password is defined %}
|
||||
harbor_admin_password: {{ harbor_admin_password }}
|
||||
{% else %}
|
||||
harbor_admin_password: Harbor12345
|
||||
{% endif %}
|
||||
|
||||
# Harbor DB configuration
|
||||
database:
|
||||
{% if database is defined %}
|
||||
# The password for the root user of Harbor DB. Change this before any production use.
|
||||
password: {{ database.password}}
|
||||
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
||||
max_idle_conns: {{ database.max_idle_conns }}
|
||||
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
|
||||
# Note: the default number of connections is 1024 for postgres of harbor.
|
||||
max_open_conns: {{ database.max_open_conns }}
|
||||
# The maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's age.
|
||||
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
{% if database.conn_max_lifetime is defined %}
|
||||
conn_max_lifetime: {{ database.conn_max_lifetime }}
|
||||
{% else %}
|
||||
conn_max_lifetime: 5m
|
||||
{% endif %}
|
||||
# The maximum amount of time a connection may be idle. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's idle time.
|
||||
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
{% if database.conn_max_idle_time is defined %}
|
||||
conn_max_idle_time: {{ database.conn_max_idle_time }}
|
||||
{% else %}
|
||||
conn_max_idle_time: 0
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# The password for the root user of Harbor DB. Change this before any production use.
|
||||
password: root123
|
||||
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
||||
max_idle_conns: 100
|
||||
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
|
||||
# Note: the default number of connections is 1024 for postgres of harbor.
|
||||
max_open_conns: 900
|
||||
# The maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's age.
|
||||
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
conn_max_lifetime: 5m
|
||||
# The maximum amount of time a connection may be idle. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's idle time.
|
||||
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
conn_max_idle_time: 0
|
||||
{% endif %}
|
||||
|
||||
{% if data_volume is defined %}
|
||||
# The default data volume
|
||||
data_volume: {{ data_volume }}
|
||||
{% else %}
|
||||
# The default data volume
|
||||
data_volume: /data
|
||||
{% endif %}
|
||||
|
||||
# Harbor Storage settings by default is using /data dir on local filesystem
|
||||
# Uncomment storage_service setting If you want to using external storage
|
||||
{% if storage_service is defined %}
|
||||
storage_service:
|
||||
{% for key, value in storage_service.items() %}
|
||||
{% if key == 'ca_bundle' %}
|
||||
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||
ca_bundle: {{ value if value is not none else '' }}
|
||||
{% elif key == 'redirect' %}
|
||||
# # set disable to true when you want to disable registry redirect
|
||||
redirect:
|
||||
{% if storage_service.redirect.disabled is defined %}
|
||||
disable: {{ storage_service.redirect.disabled | lower}}
|
||||
{% else %}
|
||||
disable: {{ storage_service.redirect.disable | lower}}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
|
||||
# # for more info about this configuration please refer https://distribution.github.io/distribution/about/configuration/
|
||||
# # and https://distribution.github.io/distribution/storage-drivers/
|
||||
{{ key }}:
|
||||
{% for k, v in value.items() %}
|
||||
{{ k }}: {{ v if v is not none else '' }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
# storage_service:
|
||||
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||
# ca_bundle:
|
||||
|
||||
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
|
||||
# # for more info about this configuration please refer https://distribution.github.io/distribution/about/configuration/
|
||||
# # and https://distribution.github.io/distribution/storage-drivers/
|
||||
# filesystem:
|
||||
# maxthreads: 100
|
||||
# # set disable to true when you want to disable registry redirect
|
||||
# redirect:
|
||||
# disable: false
|
||||
{% endif %}
|
||||
|
||||
# Trivy configuration
|
||||
#
|
||||
# Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
|
||||
# It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
|
||||
# in the local file system. In addition, the database contains the update timestamp so Trivy can detect whether it
|
||||
# should download a newer version from the Internet or use the cached one. Currently, the database is updated every
|
||||
# 12 hours and published as a new release to GitHub.
|
||||
{% if trivy is defined %}
|
||||
trivy:
|
||||
# ignoreUnfixed The flag to display only fixed vulnerabilities
|
||||
{% if trivy.ignore_unfixed is defined %}
|
||||
ignore_unfixed: {{ trivy.ignore_unfixed | lower }}
|
||||
{% else %}
|
||||
ignore_unfixed: false
|
||||
{% endif %}
|
||||
# skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
|
||||
#
|
||||
# You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
|
||||
# If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
|
||||
# `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
|
||||
{% if trivy.skip_update is defined %}
|
||||
skip_update: {{ trivy.skip_update | lower }}
|
||||
{% else %}
|
||||
skip_update: false
|
||||
{% endif %}
|
||||
{% if trivy.skip_java_db_update is defined %}
|
||||
# skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the
|
||||
# `/home/scanner/.cache/trivy/java-db/trivy-java.db` path
|
||||
skip_java_db_update: {{ trivy.skip_java_db_update | lower }}
|
||||
{% else %}
|
||||
skip_java_db_update: false
|
||||
{% endif %}
|
||||
#
|
||||
{% if trivy.offline_scan is defined %}
|
||||
offline_scan: {{ trivy.offline_scan | lower }}
|
||||
{% else %}
|
||||
offline_scan: false
|
||||
{% endif %}
|
||||
#
|
||||
# Comma-separated list of what security issues to detect. Possible values are `vuln`, `config` and `secret`. Defaults to `vuln`.
|
||||
{% if trivy.security_check is defined %}
|
||||
security_check: {{ trivy.security_check }}
|
||||
{% else %}
|
||||
security_check: vuln
|
||||
{% endif %}
|
||||
#
|
||||
# insecure The flag to skip verifying registry certificate
|
||||
{% if trivy.insecure is defined %}
|
||||
insecure: {{ trivy.insecure | lower }}
|
||||
{% else %}
|
||||
insecure: false
|
||||
{% endif %}
|
||||
#
|
||||
{% if trivy.timeout is defined %}
|
||||
# timeout The duration to wait for scan completion.
|
||||
# There is upper bound of 30 minutes defined in scan job. So if this `timeout` is larger than 30m0s, it will also timeout at 30m0s.
|
||||
timeout: {{ trivy.timeout}}
|
||||
{% else %}
|
||||
timeout: 5m0s
|
||||
{% endif %}
|
||||
#
|
||||
# github_token The GitHub access token to download Trivy DB
|
||||
#
|
||||
# Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
|
||||
# for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
|
||||
# requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
|
||||
# https://developer.github.com/v3/#rate-limiting
|
||||
#
|
||||
# You can create a GitHub token by following the instructions in
|
||||
# https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
|
||||
#
|
||||
{% if trivy.github_token is defined %}
|
||||
github_token: {{ trivy.github_token }}
|
||||
{% else %}
|
||||
# github_token: xxx
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# trivy:
|
||||
# # ignoreUnfixed The flag to display only fixed vulnerabilities
|
||||
# ignore_unfixed: false
|
||||
# # skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
|
||||
# #
|
||||
# # You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
|
||||
# # If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
|
||||
# # `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
|
||||
# skip_update: false
|
||||
# #
|
||||
# # skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the
|
||||
# # `/home/scanner/.cache/trivy/java-db/trivy-java.db` path
|
||||
# skip_java_db_update: false
|
||||
# #
|
||||
# #The offline_scan option prevents Trivy from sending API requests to identify dependencies.
|
||||
# # Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it.
|
||||
# # For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't
|
||||
# # exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode.
|
||||
# # It would work if all the dependencies are in local.
|
||||
# # This option doesn’t affect DB download. You need to specify "skip-update" as well as "offline-scan" in an air-gapped environment.
|
||||
# offline_scan: false
|
||||
# #
|
||||
# # insecure The flag to skip verifying registry certificate
|
||||
# insecure: false
|
||||
# # github_token The GitHub access token to download Trivy DB
|
||||
# #
|
||||
# # Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
|
||||
# # for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
|
||||
# # requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
|
||||
# # https://developer.github.com/v3/#rate-limiting
|
||||
# #
|
||||
# # timeout The duration to wait for scan completion.
|
||||
# # There is upper bound of 30 minutes defined in scan job. So if this `timeout` is larger than 30m0s, it will also timeout at 30m0s.
|
||||
# timeout: 5m0s
|
||||
# #
|
||||
# # You can create a GitHub token by following the instructions in
|
||||
# # https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
|
||||
# #
|
||||
# # github_token: xxx
|
||||
{% endif %}
|
||||
|
||||
jobservice:
|
||||
# Maximum number of job workers in job service
|
||||
{% if jobservice is defined %}
|
||||
max_job_workers: {{ jobservice.max_job_workers }}
|
||||
# The jobLoggers backend name, only support "STD_OUTPUT", "FILE" and/or "DB"
|
||||
{% if jobservice.job_loggers is defined %}
|
||||
job_loggers:
|
||||
{% for job_logger in jobservice.job_loggers %}
|
||||
- {{job_logger}}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
job_loggers:
|
||||
- STD_OUTPUT
|
||||
- FILE
|
||||
# - DB
|
||||
{% endif %}
|
||||
# The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
|
||||
{% if jobservice.logger_sweeper_duration is defined %}
|
||||
logger_sweeper_duration: {{ jobservice.logger_sweeper_duration }}
|
||||
{% else %}
|
||||
logger_sweeper_duration: 1
|
||||
{% endif %}
|
||||
{% else %}
|
||||
max_job_workers: 10
|
||||
# The jobLoggers backend name, only support "STD_OUTPUT", "FILE" and/or "DB"
|
||||
job_loggers:
|
||||
- STD_OUTPUT
|
||||
- FILE
|
||||
# - DB
|
||||
# The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
|
||||
logger_sweeper_duration: 1
|
||||
{% endif %}
|
||||
|
||||
notification:
|
||||
# Maximum retry count for webhook job
|
||||
{% if notification is defined %}
|
||||
webhook_job_max_retry: {{ notification.webhook_job_max_retry}}
|
||||
# HTTP client timeout for webhook job
|
||||
{% if notification.webhook_job_http_client_timeout is defined %}
|
||||
webhook_job_http_client_timeout: {{ notification.webhook_job_http_client_timeout }}
|
||||
{% else %}
|
||||
webhook_job_http_client_timeout: 3 #seconds
|
||||
{% endif %}
|
||||
{% else %}
|
||||
webhook_job_max_retry: 3
|
||||
# HTTP client timeout for webhook job
|
||||
webhook_job_http_client_timeout: 3 #seconds
|
||||
{% endif %}
|
||||
|
||||
# Log configurations
|
||||
log:
|
||||
# options are debug, info, warning, error, fatal
|
||||
{% if log is defined %}
|
||||
level: {{ log.level }}
|
||||
# configs for logs in local storage
|
||||
local:
|
||||
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
rotate_count: {{ log.local.rotate_count }}
|
||||
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||
# are all valid.
|
||||
rotate_size: {{ log.local.rotate_size }}
|
||||
# The directory on your host that store log
|
||||
location: {{ log.local.location }}
|
||||
{% if log.external_endpoint is defined %}
|
||||
external_endpoint:
|
||||
# protocol used to transmit log to external endpoint, options is tcp or udp
|
||||
protocol: {{ log.external_endpoint.protocol }}
|
||||
# The host of external endpoint
|
||||
host: {{ log.external_endpoint.host }}
|
||||
# Port of external endpoint
|
||||
port: {{ log.external_endpoint.port }}
|
||||
{% else %}
|
||||
# Uncomment following lines to enable external syslog endpoint.
|
||||
# external_endpoint:
|
||||
# # protocol used to transmit log to external endpoint, options is tcp or udp
|
||||
# protocol: tcp
|
||||
# # The host of external endpoint
|
||||
# host: localhost
|
||||
# # Port of external endpoint
|
||||
# port: 5140
|
||||
{% endif %}
|
||||
{% else %}
|
||||
level: info
|
||||
# configs for logs in local storage
|
||||
local:
|
||||
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
rotate_count: 50
|
||||
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||
# are all valid.
|
||||
rotate_size: 200M
|
||||
# The directory on your host that store log
|
||||
location: /var/log/harbor
|
||||
|
||||
# Uncomment following lines to enable external syslog endpoint.
|
||||
# external_endpoint:
|
||||
# # protocol used to transmit log to external endpoint, options is tcp or udp
|
||||
# protocol: tcp
|
||||
# # The host of external endpoint
|
||||
# host: localhost
|
||||
# # Port of external endpoint
|
||||
# port: 5140
|
||||
{% endif %}
|
||||
|
||||
|
||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||
_version: 2.12.0
|
||||
{% if external_database is defined %}
|
||||
# Uncomment external_database if using external database.
|
||||
external_database:
|
||||
harbor:
|
||||
host: {{ external_database.harbor.host }}
|
||||
port: {{ external_database.harbor.port }}
|
||||
db_name: {{ external_database.harbor.db_name }}
|
||||
username: {{ external_database.harbor.username }}
|
||||
password: {{ external_database.harbor.password }}
|
||||
ssl_mode: {{ external_database.harbor.ssl_mode }}
|
||||
max_idle_conns: {{ external_database.harbor.max_idle_conns}}
|
||||
max_open_conns: {{ external_database.harbor.max_open_conns}}
|
||||
{% else %}
|
||||
# Uncomment external_database if using external database.
|
||||
# external_database:
|
||||
# harbor:
|
||||
# host: harbor_db_host
|
||||
# port: harbor_db_port
|
||||
# db_name: harbor_db_name
|
||||
# username: harbor_db_username
|
||||
# password: harbor_db_password
|
||||
# ssl_mode: disable
|
||||
# max_idle_conns: 2
|
||||
# max_open_conns: 0
|
||||
{% endif %}
|
||||
|
||||
{% if redis is defined %}
|
||||
redis:
|
||||
# # db_index 0 is for core, it's unchangeable
|
||||
{% if redis.registry_db_index is defined %}
|
||||
registry_db_index: {{ redis.registry_db_index }}
|
||||
{% else %}
|
||||
# # registry_db_index: 1
|
||||
{% endif %}
|
||||
{% if redis.jobservice_db_index is defined %}
|
||||
jobservice_db_index: {{ redis.jobservice_db_index }}
|
||||
{% else %}
|
||||
# # jobservice_db_index: 2
|
||||
{% endif %}
|
||||
{% if redis.trivy_db_index is defined %}
|
||||
trivy_db_index: {{ redis.trivy_db_index }}
|
||||
{% else %}
|
||||
# # trivy_db_index: 5
|
||||
{% endif %}
|
||||
{% if redis.harbor_db_index is defined %}
|
||||
harbor_db_index: {{ redis.harbor_db_index }}
|
||||
{% else %}
|
||||
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||
# # harbor_db_index: 6
|
||||
{% endif %}
|
||||
{% if redis.cache_layer_db_index is defined %}
|
||||
cache_layer_db_index: {{ redis.cache_layer_db_index }}
|
||||
{% else %}
|
||||
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||
# # cache_layer_db_index: 7
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# Uncomment redis if need to customize redis db
|
||||
# redis:
|
||||
# # db_index 0 is for core, it's unchangeable
|
||||
# # registry_db_index: 1
|
||||
# # jobservice_db_index: 2
|
||||
# # trivy_db_index: 5
|
||||
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||
# # harbor_db_index: 6
|
||||
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||
# # cache_layer_db_index: 7
|
||||
{% endif %}
|
||||
|
||||
{% if external_redis is defined %}
|
||||
external_redis:
|
||||
# support redis, redis+sentinel
|
||||
# host for redis: <host_redis>:<port_redis>
|
||||
# host for redis+sentinel:
|
||||
# <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
|
||||
host: {{ external_redis.host }}
|
||||
password: {{ external_redis.password }}
|
||||
# Redis AUTH command was extended in Redis 6, it is possible to use it in the two-arguments AUTH <username> <password> form.
|
||||
{% if external_redis.username is defined %}
|
||||
username: {{ external_redis.username }}
|
||||
{% else %}
|
||||
# username:
|
||||
{% endif %}
|
||||
# sentinel_master_set must be set to support redis+sentinel
|
||||
#sentinel_master_set:
|
||||
# db_index 0 is for core, it's unchangeable
|
||||
registry_db_index: {{ external_redis.registry_db_index }}
|
||||
jobservice_db_index: {{ external_redis.jobservice_db_index }}
|
||||
trivy_db_index: 5
|
||||
idle_timeout_seconds: 30
|
||||
{% if external_redis.harbor_db_index is defined %}
|
||||
harbor_db_index: {{ redis.harbor_db_index }}
|
||||
{% else %}
|
||||
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||
# # harbor_db_index: 6
|
||||
{% endif %}
|
||||
{% if external_redis.cache_layer_db_index is defined %}
|
||||
cache_layer_db_index: {{ redis.cache_layer_db_index }}
|
||||
{% else %}
|
||||
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||
# # cache_layer_db_index: 7
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# Uncomments external_redis if using external Redis server
|
||||
# external_redis:
|
||||
# # support redis, redis+sentinel
|
||||
# # host for redis: <host_redis>:<port_redis>
|
||||
# # host for redis+sentinel:
|
||||
# # <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
|
||||
# host: redis:6379
|
||||
# password:
|
||||
# # Redis AUTH command was extended in Redis 6, it is possible to use it in the two-arguments AUTH <username> <password> form.
|
||||
# # username:
|
||||
# # sentinel_master_set must be set to support redis+sentinel
|
||||
# #sentinel_master_set:
|
||||
# # db_index 0 is for core, it's unchangeable
|
||||
# registry_db_index: 1
|
||||
# jobservice_db_index: 2
|
||||
# trivy_db_index: 5
|
||||
# idle_timeout_seconds: 30
|
||||
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||
# # harbor_db_index: 6
|
||||
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||
# # cache_layer_db_index: 7
|
||||
{% endif %}
|
||||
|
||||
{% if uaa is defined %}
|
||||
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
|
||||
uaa:
|
||||
ca_file: {{ uaa.ca_file }}
|
||||
{% else %}
|
||||
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
|
||||
# uaa:
|
||||
# ca_file: /path/to/ca
|
||||
{% endif %}
|
||||
|
||||
|
||||
# Global proxy
|
||||
# Config http proxy for components, e.g. http://my.proxy.com:3128
|
||||
# Components doesn't need to connect to each others via http proxy.
|
||||
# Remove component from `components` array if want disable proxy
|
||||
# for it. If you want use proxy for replication, MUST enable proxy
|
||||
# for core and jobservice, and set `http_proxy` and `https_proxy`.
|
||||
# Add domain to the `no_proxy` field, when you want disable proxy
|
||||
# for some special registry.
|
||||
{% if proxy is defined %}
|
||||
proxy:
|
||||
http_proxy: {{ proxy.http_proxy or ''}}
|
||||
https_proxy: {{ proxy.https_proxy or ''}}
|
||||
no_proxy: {{ proxy.no_proxy or ''}}
|
||||
{% if proxy.components is defined %}
|
||||
components:
|
||||
{% for component in proxy.components %}
|
||||
{% if component != 'clair' %}
|
||||
- {{component}}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
proxy:
|
||||
http_proxy:
|
||||
https_proxy:
|
||||
no_proxy:
|
||||
components:
|
||||
- core
|
||||
- jobservice
|
||||
- trivy
|
||||
{% endif %}
|
||||
|
||||
{% if metric is defined %}
|
||||
metric:
|
||||
enabled: {{ metric.enabled }}
|
||||
port: {{ metric.port }}
|
||||
path: {{ metric.path }}
|
||||
{% else %}
|
||||
# metric:
|
||||
# enabled: false
|
||||
# port: 9090
|
||||
# path: /metrics
|
||||
{% endif %}
|
||||
|
||||
# Trace related config
|
||||
# only can enable one trace provider(jaeger or otel) at the same time,
|
||||
# and when using jaeger as provider, can only enable it with agent mode or collector mode.
|
||||
# if using jaeger collector mode, uncomment endpoint and uncomment username, password if needed
|
||||
# if using jaeger agetn mode uncomment agent_host and agent_port
|
||||
{% if trace is defined %}
|
||||
trace:
|
||||
enabled: {{ trace.enabled | lower}}
|
||||
sample_rate: {{ trace.sample_rate }}
|
||||
# # namespace used to differentiate different harbor services
|
||||
{% if trace.namespace is defined %}
|
||||
namespace: {{ trace.namespace }}
|
||||
{% else %}
|
||||
# namespace:
|
||||
{% endif %}
|
||||
# # attributes is a key value dict contains user defined attributes used to initialize trace provider
|
||||
{% if trace.attributes is defined%}
|
||||
attributes:
|
||||
{% for name, value in trace.attributes.items() %}
|
||||
{{name}}: {{value}}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
# attributes:
|
||||
# application: harbor
|
||||
{% endif %}
|
||||
{% if trace.jaeger is defined%}
|
||||
jaeger:
|
||||
endpoint: {{trace.jaeger.endpoint or '' }}
|
||||
username: {{trace.jaeger.username or ''}}
|
||||
password: {{trace.jaeger.password or ''}}
|
||||
agent_host: {{trace.jaeger.agent_host or ''}}
|
||||
agent_port: {{trace.jaeger.agent_port or ''}}
|
||||
{% else %}
|
||||
# jaeger:
|
||||
# endpoint:
|
||||
# username:
|
||||
# password:
|
||||
# agent_host:
|
||||
# agent_port:
|
||||
{% endif %}
|
||||
{% if trace. otel is defined %}
|
||||
otel:
|
||||
endpoint: {{trace.otel.endpoint or '' }}
|
||||
url_path: {{trace.otel.url_path or '' }}
|
||||
compression: {{trace.otel.compression | lower }}
|
||||
insecure: {{trace.otel.insecure | lower }}
|
||||
timeout: {{trace.otel.timeout or '' }}
|
||||
{% else %}
|
||||
# otel:
|
||||
# endpoint: hostname:4318
|
||||
# url_path: /v1/traces
|
||||
# compression: false
|
||||
# insecure: true
|
||||
# # timeout is in seconds
|
||||
# timeout: 10
|
||||
{% endif%}
|
||||
{% else %}
|
||||
# trace:
|
||||
# enabled: true
|
||||
# # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
|
||||
# sample_rate: 1
|
||||
# # # namespace used to differentiate different harbor services
|
||||
# # namespace:
|
||||
# # # attributes is a key value dict contains user defined attributes used to initialize trace provider
|
||||
# # attributes:
|
||||
# # application: harbor
|
||||
# # jaeger:
|
||||
# # endpoint: http://hostname:14268/api/traces
|
||||
# # username:
|
||||
# # password:
|
||||
# # agent_host: hostname
|
||||
# # agent_port: 6831
|
||||
# # otel:
|
||||
# # endpoint: hostname:4318
|
||||
# # url_path: /v1/traces
|
||||
# # compression: false
|
||||
# # insecure: true
|
||||
# # # timeout is in seconds
|
||||
# # timeout: 10
|
||||
{% endif %}
|
||||
|
||||
# enable purge _upload directories
|
||||
{% if upload_purging is defined %}
|
||||
upload_purging:
|
||||
enabled: {{ upload_purging.enabled | lower}}
|
||||
age: {{ upload_purging.age }}
|
||||
interval: {{ upload_purging.interval }}
|
||||
dryrun: {{ upload_purging.dryrun | lower}}
|
||||
{% else %}
|
||||
upload_purging:
|
||||
enabled: true
|
||||
# remove files in _upload directories which exist for a period of time, default is one week.
|
||||
age: 168h
|
||||
# the interval of the purge operations
|
||||
interval: 24h
|
||||
dryrun: false
|
||||
{% endif %}
|
||||
|
||||
# Cache layer related config
|
||||
{% if cache is defined %}
|
||||
cache:
|
||||
enabled: {{ cache.enabled | lower}}
|
||||
expire_hours: {{ cache.expire_hours }}
|
||||
{% else %}
|
||||
cache:
|
||||
enabled: false
|
||||
expire_hours: 24
|
||||
{% endif %}
|
||||
|
||||
# Harbor core configurations
|
||||
# Uncomment to enable the following harbor core related configuration items.
|
||||
{% if core is defined %}
|
||||
core:
|
||||
# The provider for updating project quota(usage), there are 2 options, redis or db,
|
||||
# by default is implemented by db but you can switch the updation via redis which
|
||||
# can improve the performance of high concurrent pushing to the same project,
|
||||
# and reduce the database connections spike and occupies.
|
||||
# By redis will bring up some delay for quota usage updation for display, so only
|
||||
# suggest switch provider to redis if you were ran into the db connections spike aroud
|
||||
# the scenario of high concurrent pushing to same project, no improvment for other scenes.
|
||||
quota_update_provider: {{ core.quota_update_provider }}
|
||||
{% else %}
|
||||
# core:
|
||||
# # The provider for updating project quota(usage), there are 2 options, redis or db,
|
||||
# # by default is implemented by db but you can switch the updation via redis which
|
||||
# # can improve the performance of high concurrent pushing to the same project,
|
||||
# # and reduce the database connections spike and occupies.
|
||||
# # By redis will bring up some delay for quota usage updation for display, so only
|
||||
# # suggest switch provider to redis if you were ran into the db connections spike around
|
||||
# # the scenario of high concurrent pushing to same project, no improvement for other scenes.
|
||||
# quota_update_provider: redis # Or db
|
||||
{% endif %}
|
Loading…
Reference in New Issue
Block a user