Merge pull request #5742 from reasonerjt/remove-compose-ha

Remove reference of docker-compsoe based HA
This commit is contained in:
Daniel Jiang 2018-08-28 15:09:22 +08:00 committed by GitHub
commit c4eaf25ed1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 3 additions and 840 deletions

View File

@ -319,14 +319,10 @@ build:
modify_composefile: modify_composefile_notary modify_composefile_clair modify_composefile_chartmuseum
@echo "preparing docker-compose file..."
@cp $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSETPLFILENAME) $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
@cp $(DOCKERCOMPOSEFILEPATH)/ha/$(DOCKERCOMPOSETPLFILENAME) $(DOCKERCOMPOSEFILEPATH)/ha/$(DOCKERCOMPOSEFILENAME)
@$(SEDCMD) -i 's/__version__/$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/ha/$(DOCKERCOMPOSEFILENAME)
@$(SEDCMD) -i 's/__version__/$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
@$(SEDCMD) -i 's/__postgresql_version__/$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
@$(SEDCMD) -i 's/__reg_version__/$(REGISTRYVERSION)-$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
@$(SEDCMD) -i 's/__reg_version__/$(REGISTRYVERSION)-$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/ha/$(DOCKERCOMPOSEFILENAME)
@$(SEDCMD) -i 's/__nginx_version__/$(NGINXVERSION)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
@$(SEDCMD) -i 's/__nginx_version__/$(NGINXVERSION)/g' $(DOCKERCOMPOSEFILEPATH)/ha/$(DOCKERCOMPOSEFILENAME)
@$(SEDCMD) -i 's/__redis_version__/$(REDISVERSION)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
modify_composefile_notary:
@ -340,8 +336,6 @@ modify_composefile_clair:
@cp $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECLAIRTPLFILENAME) $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECLAIRFILENAME)
@$(SEDCMD) -i 's/__postgresql_version__/$(CLAIRDBVERSION)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECLAIRFILENAME)
@$(SEDCMD) -i 's/__clair_version__/$(CLAIRVERSION)-$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECLAIRFILENAME)
@cp $(DOCKERCOMPOSEFILEPATH)/ha/$(DOCKERCOMPOSECLAIRTPLFILENAME) $(DOCKERCOMPOSEFILEPATH)/ha/$(DOCKERCOMPOSECLAIRFILENAME)
@$(SEDCMD) -i 's/__clair_version__/$(CLAIRVERSION)-$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/ha/$(DOCKERCOMPOSECLAIRFILENAME)
modify_composefile_chartmuseum:
@echo "preparing docker-compose chartmuseum file..."
@ -364,8 +358,6 @@ package_online: modify_composefile
@if [ -n "$(REGISTRYSERVER)" ] ; then \
$(SEDCMD) -i 's/image\: goharbor/image\: $(REGISTRYSERVER)\/$(REGISTRYPROJECTNAME)/' \
$(HARBORPKG)/docker-compose.yml ; \
$(SEDCMD) -i 's/image\: goharbor/image\: $(REGISTRYSERVER)\/$(REGISTRYPROJECTNAME)/' \
$(HARBORPKG)/ha/docker-compose.yml ; \
fi
@cp LICENSE $(HARBORPKG)/LICENSE
@cp open_source_license $(HARBORPKG)/open_source_license
@ -381,7 +373,6 @@ package_offline: compile version build modify_sourcefiles modify_composefile
@cp LICENSE $(HARBORPKG)/LICENSE
@cp open_source_license $(HARBORPKG)/open_source_license
@cp NOTICE $(HARBORPKG)/NOTICE
@cp $(HARBORPKG)/photon/db/initial-registry.sql $(HARBORPKG)/ha/
@echo "saving harbor docker image"
@$(DOCKERSAVE) $(DOCKERSAVE_PARA) > $(HARBORPKG)/$(DOCKERIMGFILE).$(VERSIONTAG).tar

View File

@ -1,363 +0,0 @@
# Harbor High Availability Guide
## Contents
**Notices**
Notices take these forms:
**Note**: A comment with additional information that explains a part of the text.
**Important**: Something you must be aware of before proceeding.
**Tip**: An extra but helpful piece of practical advice.
## Abstract
This guide describes how to install and configure Harbor for high availability. It supplements the [Installation Guides](installation_guide.md) and assumes that you are familiar with the material in those guides.
**Important** This guide was last updated as of the Harbor 1.5.0 release. It will not apply to releases before 1.4.0. We advise that you read this at your own discretion when planning on your Harbor high availability implementation.
This guide is intended as advice only.
## Harbor High Availability Introduction
This document discusses some common methods of implementing highly available systems, with an emphasis on the core Harbor services and other open source services that are closely aligned with Harbor.
You will need to address high availability concerns for any applications software that you run on your Harbor environment. The important thing is to make sure that your services are redundant and available. How you achieve that is up to you.
### Stateless service
To make a stateless service highly available, you need to provide redundant instances and load balance them. Harbor services that are stateless include:
- Adminserver
- UI
- Registry
- Logs
- Jobservice
- Clair
- Proxy
### Stateful service
Stateful services are more difficult to manage. Providing additional instances and load balancing does not solve the problem. Harbor services that are stateful include the follow services:
- Harbor database(MariaDB)
- Clair database(PostgresSQL)
- Notary database(MariaDB)
- Redis
Making those services highly available can depend the high availability implementation for each application.
## High Availability Architecture
Again, this architecture is for advice only.
![HA](img/ha/Architecture.png)
As you can seen in the above figure, components involved in the architecture are:
**VIP**: [Virtual IP](https://en.wikipedia.org/wiki/Virtual_IP_address) The Harbor user will access Harbor through this virtual IP address. This VIP will only active on one load balancer node at the same time. It will automatically switch to the other node if the active loadbalancer node is down.
**LoadBalancer 01 and 02**: They together compose as a group which avoid single point failure of load balancer nodes. [Keepalived](www.keepalived.org) is installed on both load balancer nodes. The two Keepalived instances will form a VRRP group to provide the VIP and ensure the VIP only shows on one node at the same time. The LVS component in Keepalived is responsible for balance the requests between different Harbor servers according to the routing algorithm.
**Harbor server 1..n**: These are the running Harbor instances. They are in active-active mode. User can setup multiple nodes according to their workload.
**Harbor DB cluster**: The MariaDB is used by Harbor to store user authentication information, image metadata information and so on. User should follow its best practice to make it HA protected.
**Clair DB cluster**: The PostgreSQL is used by Clair to store the vulnerability data which will be used by scanning the images. User should follow it's best practice to make it HA protected
**Shared Storage**: The shared storage is used for storing Docker Volumes used by Harbor. Images pushed by users are actually stored in this shared storage. The shared storage makes sure that multiple Harbor instances have consistent storage backend. Shared Storages can be Swift, NFS, S3, azure, GCS Ceph or OSS. User should follow its best practice to make it HA protected.
**Redis**: The purpose of having Redis is to store UI session data and store the registry metadata cache. When one Harbor instance fails or the load balancer routes a user request to another Harbor instance, any Harbor instance can query the Redis to retrieve session information to make sure the end-user has a continued session. User should follow the best practice of Redis to make it HA protected.
From the above high availability architecture, we can see that we don't setup LB per stateless services. Instead we group those stateless service as a group. The communicate between each services are protected by host based docker network with isolation. **Note** As the component communicate with each other through rest API. You can always define the group granularity according to your use scenarios.
### Limitation
Currently Harbor doesnt support Notary in HA scenario. Which means Content trust functionality is not supported in this HA setup.
## Setup
Follow the setup instruction in this section we can build a Harbor high availability deployment as the follow figure shows. You can setup more Harbor nodes if needed.
![setup](img/ha/LabInstallation.png)
### Prerequisites
- 1> MariaDB cluster(Harbor-DB,192.168.1.215, currently Harbor use MariaDB 10.2.10)
- 2> Shared Storage (Swift Server. 192.168.1.216)
- 3> Redis cluster(192.168.1.217)
- 4> PostgreSQL (Clair DB 192.168.1.50)
- 5> 2 VMs for Load balancer cluster.
- 6> n VMs for Harbor stateless services (n >=2), in this example we will set up 2 Harbor nodes.
- 7> n+1 static IPs (1 for VIP and the other n IPs will be used by Harbor stateless servers)
**Important** Item 1,2,3,4 are statefull components to Harbor. Before configuring Harbor HA, we assume these components are present and all of them are HA protected. Otherwise, any of these components can be a single point of failure.
The shared storage is replaceable you can choose other shared storage, just need to make sure the storage you used is supported by registry https://docs.docker.com/registry/storage-drivers
The PostgreSQL is optional, it only needed when you use the vulnerability scanning function, currently harbor use PostgreSQL 9.6.5
**Tips**
If you are only setting up the HA for POC purpose. You can use docker to run MariaDB, Redis and PostgreSQL in one OS with follow commands.
```
#> docker run --name redis-server -p 6379:6379 -d redis
#> docker run -d --restart=always -e MYSQL_ROOT_PASSWORD=root123 -v /data/database:/var/lib/mysql:z -p 3306:3306 --name mariadb vmware/mariadb-photon:10.2.10
#> docker run -d -e POSTGRES_PASSWORD="password" -p 5432:5432 postgres:9.6
```
### Load Harbor DB schema
Import Harbor database schema to your external MariaDB
#### 1> Login to a machine that has MariaDB client installed
#### 2> Save the [Harbor DB Schema](https://github.com/vmware/harbor/blob/release-1.5.0/make/photon/db/registry.sql) to ```registry.sql```
#### 3> Load the schema
```
#> mysql -u your_db_username -p -h your_db_ip < registry.sql
```
### Load balancer setup
As all the Harbor nodes are in active status. A loadbancer will be needed to efficiently distributing incoming requests between the Harbor nodes. You can choose either hardware loadbalancer or software loadbalancer at your convenient.
Here we will use Ubuntu16.04 + Keepalived to build up a software loadbalancer.
#### On Loadbalancer01:
##### 1> Install Keepalived and curl applications.
Curl will be used in the keepalived check script.
```
#> apt-get install keepalived curl
```
##### 2> Config Keepalived
Save the [Keepalived configuration file](https://github.com/vmware/harbor/blob/release-1.5.0/make/ha/sample/active_active/keepalived_active_active.conf) to ```/etc/keepalived/keepalived.conf```
**Important**
You need to change the **<change_to_VIP_address>** to your VIP address.(There are two place.)
Change **harbor_node1_IP** (Two places) and **harbor_node2_IP** (Two places)to the real harbor nodes IPs.
If you have more than two nodes then please add more real_server definitions in the keepalived.conf
##### 3> Configure health check
Save the server [health check](https://github.com/vmware/harbor/blob/release-1.5.0/make/ha/sample/active_active/check.sh) script to ```/usr/local/bin/check.sh```
Run the follow command to add the execute privilege.
```#> chmod +x /usr/local/bin/check.sh```
##### 4> Enable ip forward
```
add the follow two lines to /etc/sysctl.conf
net.ipv4.ip_forward = 1
net.ipv4.ip_nonlocal_bind = 1
Run the follow command to apply the change.
#>sysctl -p
```
##### 5> Restart the Keepalived service.
```
#>systemctl restart keepalived
```
#### On Loadbalancer02:
Follow the same steps 1 to 5 as Loadbalancer01 list, only change the ```priority``` to 20 in the /etc/keepalived/keepalived.conf in step 2. The higher number will get the VIP address.
#### Harbor node 1 setup
##### 1> Download harbor offline package from [GitHub](https://github.com/vmware/harbor/releases) to your home directory
##### 2> Extract the harbor-offline-installer-v.x.x.x.tgz You will get a folder ```harbor``` in the current directory
##### 3> cd to ```harbor``` directory
##### 4> Configure hostname
In ```harbor.cfg```
```
hostname = reg.mydomain.com
```
Change reg.mydomain.com to your FQDN or VIP(eg. 192.168.1.220)
##### 5> Provide the Harbor db connection info
In ```Harbor.cfg```
Change the value to your Harbor DB's
```
#The address of the Harbor database. Only need to change when using external db.
db_host = 192.168.1.215
#The password for the root user of Harbor database. Change this before any production use.
db_password = root123
#The port of Harbor database host
db_port = 3306
#The user name of Harbor database
db_user = root
```
##### 6> Provide the Redis server/cluster address
In harbor.cfg
```
#The redis server address
redis_url = 192.168.1.217:6379
```
##### 7> Provide the Clair DB connection information
In harbor.cfg
```
clair_db_host = 192.168.1.50
clair_db_password = password
clair_db_port = 5432
clair_db_username = postgres
clair_db = postgres
```
##### 8> Provide the storage config information
In harbor.cfg
```
### Docker Registry setting ###
#registry_storage_provider can be: filesystem, s3, gcs, azure, etc.
registry_storage_provider_name = filesystem
#registry_storage_provider_config is a comma separated "key: value" pairs, e.g. "key1: value, key2: value2".
#Refer to https://docs.docker.com/registry/configuration/#storage for all available configuration.
registry_storage_provider_config =
```
You can find the configure examples for all kinds of storages at https://docs.docker.com/registry/configuration/#storage .
For example if you use swift as the storage backend then you need to set the follow:
```
registry_storage_provider_name = swift
registry_storage_provider_config = username: yourusername,password: yourpass,authurl: http://192.168.1.217/identity/v3,tenant: admin,domain: default,region: RegionOne,container: docker_images
```
**Important**
If you set ```filesystem``` for the ```registry_storage_provider_name``` you must make sure the registry directory ```/data/registry``` mount to a shared storage like NFS,Ceph, etc. You need to create the /data/registry directory first and change it's owner to 10000:10000 as registry will run as userID 10000 and groupID 10000.
##### 9> (Optional) If you enable https, you need to prepare the certificate and key and copy them to ```/data/cert/``` directory(you need to create that folder if it not exist).
```
#>mkdir -p /data/cert
#>cp server.crt /data/cert/
#>cp server.key /data/cert/
#>mkdir /data/ca_download
#>cp ca.crt /data/ca_download/
```
If you want keep your own filename for the certificate, you need to modify the ssl_cert and ssl_cert_key properties in harbor.cfg. If you use a certificate that signed by a private CA then you need to put your CA file to the /data/ca_download/ca.crt
##### 10> Start the Harbor on the first node
```
#>./install.sh --ha
```
**Note**
If you want to use vulnerability scanning functionality. Then use the follow command instead
```
#>./install.sh --ha --with-clair
```
##### 11> Change iptables
**Important**
You need to change 192.168.1.220 to your VIP address before issue the follow command, if you just use http for Harbor, then you don't need run the second command.
```
#>iptables -t nat -A PREROUTING -p tcp -d 192.168.1.220 --dport 80 -j REDIRECT
#>iptables -t nat -A PREROUTING -p tcp -d 192.168.1.220 --dport 443 -j REDIRECT
```
##### 12> Zip the harbor directory
```
#> tar -cvf harbor_ha.tar ~/harbor
```
##### 13> Copy the harbor_ha.tar to harbor_node2
#### Harbor node 2…n setup
##### 1>Place the tar file to home directory
Move the harbor_ha.tar file to your home directory on harbor_node2
##### 2> Untar the file
```
#> tar -xvf harbor_ha.tar
```
You will get a “harbor” folder in your home directory.
##### 3> (Optional)Create certificate folder
This step is only need when enable https for Harbor.
These folder will be used to store the certificate file.
```
#> mkdir -p /data/cert
#> mkdir -p /data/ca_download
```
##### 4> Start Harbor
```
#> cd harbor
#> ./install.sh --ha
```
**Note**
If you enable vulnerability scanning then use
```
#> ./install.sh --ha --with-clair
```
##### 5> Change iptables
**Important**
You need to change 192.168.1.220 to your VIP address before issue the follow command, if you just use http for Harbor, then you don't need to run the second command.
```
#>iptables -t nat -A PREROUTING -p tcp -d 192.168.1.220 --dport 80 -j REDIRECT
#>iptables -t nat -A PREROUTING -p tcp -d 192.168.1.220 --dport 443 -j REDIRECT
```
If you want to setup more harbor node, repeat step 1 to 4. The Keepalived configuration will also need to update in both loadbalancer servers.
Now you can access harbor by http(s)://VIP
## Known issue
1>https://github.com/vmware/harbor/issues/3919
Workaround:
- For all Harbor servers, mount the /data/job_logs directory to a folder on NFS server.
- Make sure the folder on the NFS server has read/write permission for UID:GroupID 10000:10000
- Restart jobservices container by ```docker harbor-jobservice restart``` on all Harbor servers.
2> https://github.com/vmware/harbor/issues/4012
In Harbor 1.4 we support stop a running Jobs. But in HA scenarios, you may not be able to stop the Jobs. As currently the Job status is stored in memory instead of persistent storages. Request may not be able to schedule to the node which execute the job. We will plan to refactor the jobservices model to sovle this limitation in next release.

View File

@ -1,38 +0,0 @@
version: 0.1
log:
level: debug
fields:
service: registry
storage:
cache:
layerinfo: redis
$storage_provider_info
maintenance:
uploadpurging:
enabled: false
delete:
enabled: true
redis:
addr: $redis_host:$redis_port
password: $redis_password
db: $redis_db_index_reg
http:
addr: :5000
secret: placeholder
debug:
addr: localhost:5001
auth:
token:
issuer: harbor-token-issuer
realm: $public_url/service/token
rootcertbundle: /etc/registry/root.crt
service: harbor-registry
notifications:
endpoints:
- name: harbor
disabled: false
url: $ui_url/service/notifications
timeout: 3000ms
threshold: 5
backoff: 1s

View File

@ -1,34 +0,0 @@
version: '2'
services:
ui:
networks:
harbor-clair:
aliases:
- harbor-ui
jobservice:
networks:
- harbor-clair
registry:
networks:
- harbor-clair
clair:
networks:
- harbor-clair
container_name: clair
image: goharbor/clair-photon:__clair_version__
restart: always
cpu_quota: 150000
depends_on:
- log
volumes:
- ./common/config/clair:/config
logging:
driver: "syslog"
options:
syslog-address: "tcp://127.0.0.1:1514"
tag: "clair"
env_file:
./common/config/clair/clair_env
networks:
harbor-clair:
external: false

View File

@ -1,121 +0,0 @@
version: '2'
services:
log:
image: goharbor/harbor-log:__version__
container_name: harbor-log
restart: always
volumes:
- /var/log/harbor/:/var/log/docker/:z
- ./common/config/log/:/etc/logrotate.d/:z
ports:
- 127.0.0.1:1514:10514
networks:
- harbor
registry:
image: goharbor/registry-photon:__reg_version__
container_name: registry
restart: always
volumes:
- /data/registry:/storage:z
- ./common/config/registry/:/etc/registry/:z
networks:
- harbor
environment:
- GODEBUG=netdns=cgo
command:
["serve", "/etc/registry/config.yml"]
depends_on:
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://127.0.0.1:1514"
tag: "registry"
adminserver:
image: goharbor/harbor-adminserver:__version__
container_name: harbor-adminserver
env_file:
- ./common/config/adminserver/env
restart: always
volumes:
- /data/config/:/etc/adminserver/config/:z
- /data/secretkey:/etc/adminserver/key:z
- /data/:/data/:z
networks:
- harbor
depends_on:
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://127.0.0.1:1514"
tag: "adminserver"
ui:
image: goharbor/harbor-ui:__version__
container_name: harbor-ui
env_file:
- ./common/config/ui/env
restart: always
volumes:
- ./common/config/ui/app.conf:/etc/ui/app.conf:z
- ./common/config/ui/private_key.pem:/etc/ui/private_key.pem:z
- ./common/config/ui/certificates/:/etc/ui/certificates/:z
- /data/secretkey:/etc/ui/key:z
- /data/ca_download/:/etc/ui/ca/:z
- /data/psc/:/etc/ui/token/:z
networks:
- harbor
depends_on:
- log
- adminserver
- registry
logging:
driver: "syslog"
options:
syslog-address: "tcp://127.0.0.1:1514"
tag: "ui"
jobservice:
image: goharbor/harbor-jobservice:__version__
container_name: harbor-jobservice
env_file:
- ./common/config/jobservice/env
restart: always
volumes:
- /data/job_logs:/var/log/jobs:z
- ./common/config/jobservice/config.yml:/etc/jobservice/config.yml:z
- /data/secretkey:/etc/jobservice/key:z
networks:
- harbor
depends_on:
- ui
- adminserver
logging:
driver: "syslog"
options:
syslog-address: "tcp://127.0.0.1:1514"
tag: "jobservice"
proxy:
image: goharbor/nginx-photon:__nginx_version__
container_name: nginx
restart: always
volumes:
- ./common/config/nginx:/etc/nginx:z
networks:
- harbor
ports:
- 80:80
- 443:443
- 4443:4443
depends_on:
- registry
- ui
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://127.0.0.1:1514"
tag: "proxy"
networks:
harbor:
external: false

View File

@ -1,44 +0,0 @@
#!/bin/bash
set -e
#get protocol
#LOG=/var/log/keepalived_check.log
nodeip=$1
nodeaddress="http://${nodeip}"
http_code=`curl -s -o /dev/null -w "%{http_code}" ${nodeaddress}`
if [ $http_code == 200 ] ; then
protocol="http"
elif [ $http_code == 301 ]
then
protocol="https"
else
# echo "`date +"%Y-%m-%d %H:%M:%S"` $1, CHECK_CODE=$http_code" >> $LOG
exit 1
fi
systeminfo=`curl -k -o - -s ${protocol}://${nodeip}/api/systeminfo`
echo $systeminfo | grep "registry_url"
if [ $? != 0 ] ; then
exit 1
fi
#TODO need to check Clair, but currently Clair status api is unreachable from LB.
# echo $systeminfo | grep "with_clair" | grep "true"
# if [ $? == 0 ] ; then
# clair is enabled
# do some clair check
# else
# clair is disabled
# fi
#check top api
http_code=`curl -k -s -o /dev/null -w "%{http_code}\n" ${protocol}://${nodeip}/api/repositories/top`
set +e
if [ $http_code == 200 ] ; then
exit 0
else
exit 1
fi

View File

@ -1,87 +0,0 @@
global_defs {
router_id haborlb
}
vrrp_sync_groups VG1 {
group {
VI_1
}
}
#Please change "ens160" to the interface name on you loadbalancer hosts.
#In some case it will be eth0, ens16xxx etc.
vrrp_instance VI_1 {
interface ens160
track_interface {
ens160
}
state MASTER
virtual_router_id 51
priority 10
virtual_ipaddress {
<change_to_VIP_address>/32
}
advert_int 1
authentication {
auth_type PASS
auth_pass d0cker
}
}
########## Section for Harbor use HTTP protocol ######################
#Please change <change_to_VIP_address>, <harbor_node1_ip>, <harbor_node2_ip> to real ip address
virtual_server <change_to_VIP_address> 80 {
delay_loop 15
lb_algo rr
lb_kind DR
protocol TCP
nat_mask 255.255.255.0
persistence_timeout 10
real_server <harbor_node1_ip> 80 {
weight 10
MISC_CHECK {
misc_path "/usr/local/bin/check.sh <harbor_node1_ip>"
misc_timeout 5
}
}
real_server <harbor_node2_ip> 80 {
weight 10
MISC_CHECK {
misc_path "/usr/local/bin/check.sh <harbor_node2_ip>"
misc_timeout 5
}
}
}
#########################End of HTTP############################
##########################HTTPS#################################
#Please uncomment the follow when harbor running under https
#virtual_server <change_to_VIP_address> 443 {
# delay_loop 15
# lb_algo rr
# lb_kind DR
# protocol TCP
# nat_mask 255.255.255.0
# persistence_timeout 10
#
# real_server <harbor_node1_ip> 443 {
# weight 10
# MISC_CHECK {
# misc_path "/usr/local/bin/check.sh <harbor_node1_ip>"
# misc_timeout 5
# }
# }
#
# real_server harbor_node2_ip 443 {
# weight 10
# MISC_CHECK {
# misc_path "/usr/local/bin/check.sh <harbor_node2_ip>"
# misc_timeout 5
# }
# }
#}
#########################End of HTTPS Section#################

View File

@ -1,7 +0,0 @@
#!/bin/bash
http_code = `curl -s -o /dev/null -w "%{http_code}" 127.0.0.1`
if [ $http_code == 200 ] || [ $http_code == 301 ] ; then
exit 0
else
exit 1
fi

View File

@ -1,36 +0,0 @@
global_defs {
router_id haborlb
}
vrrp_script check_harbor {
script "/usr/local/bin/check_harbor.sh"
interval 15
fail 5
rise 2
}
vrrp_sync_groups VG1 {
group {
VI_1
}
}
#Please change to ens160 to the interface name on you loadbalancer hosts.
vrrp_instance VI_1 {
interface ens160
track_interface {
ens160
}
state MASTER
virtual_router_id 51
priority 10
virtual_ipaddress {
VIP/32
}
advert_int 1
authentication {
auth_type PASS
auth_pass d0cker
}
}

View File

@ -59,8 +59,6 @@ item=0
with_notary=$false
# clair is not enabled by default
with_clair=$false
# HA mode is not enabled by default
harbor_ha=$false
# chartmuseum is not enabled by default
with_chartmuseum=$false
@ -73,8 +71,6 @@ while [ $# -gt 0 ]; do
with_notary=true;;
--with-clair)
with_clair=true;;
--ha)
harbor_ha=true;;
--with-chartmuseum)
with_chartmuseum=true;;
*)
@ -167,7 +163,7 @@ then
sed "s/^hostname = .*/hostname = $host/g" -i ./harbor.cfg
fi
prepare_para=
if [ $with_notary ] && [ ! $harbor_ha ]
if [ $with_notary ]
then
prepare_para="${prepare_para} --with-notary"
fi
@ -175,10 +171,6 @@ if [ $with_clair ]
then
prepare_para="${prepare_para} --with-clair"
fi
if [ $harbor_ha ]
then
prepare_para="${prepare_para} --ha"
fi
if [ $with_chartmuseum ]
then
prepare_para="${prepare_para} --with-chartmuseum"
@ -189,7 +181,7 @@ echo ""
h2 "[Step $item]: checking existing instance of Harbor ..."; let item+=1
docker_compose_list='-f docker-compose.yml'
if [ $with_notary ] && [ ! $harbor_ha ]
if [ $with_notary ]
then
docker_compose_list="${docker_compose_list} -f docker-compose.notary.yml"
fi
@ -210,13 +202,6 @@ fi
echo ""
h2 "[Step $item]: starting Harbor ..."
if [ $harbor_ha ]
then
mv docker-compose.yml docker-compose.yml.bak
cp ha/docker-compose.yml docker-compose.yml
mv docker-compose.clair.yml docker-compose.clair.yml.bak
cp ha/docker-compose.clair.yml docker-compose.clair.yml
fi
docker-compose $docker_compose_list up -d
protocol=http

View File

@ -23,32 +23,6 @@ DATA_VOL = "/data"
def validate(conf, args):
if args.ha_mode:
db_host = rcp.get("configuration", "db_host")
if db_host == "mysql":
raise Exception("Error: In HA mode, db_host in harbor.cfg needs to point to an external DB address.")
registry_storage_provider_name = rcp.get("configuration",
"registry_storage_provider_name").strip()
if registry_storage_provider_name == "filesystem" and not args.yes:
msg = 'Is the Harbor Docker Registry configured to use shared storage (e.g. NFS, Ceph etc.)? [yes/no]:'
if raw_input(msg).lower() != "yes":
raise Exception("Error: In HA mode, shared storage configuration for Docker Registry in harbor.cfg is required. Refer to HA installation guide for details.")
if args.notary_mode:
raise Exception("Error: HA mode doesn't support Notary currently")
if args.clair_mode:
clair_db_host = rcp.get("configuration", "clair_db_host")
if "postgres" == clair_db_host:
raise Exception("Error: In HA mode, clair_db_host in harbor.cfg needs to point to an external Postgres DB address.")
cert_path = rcp.get("configuration", "ssl_cert")
cert_key_path = rcp.get("configuration", "ssl_cert_key")
shared_cert_key = os.path.join(base_dir, "ha", os.path.basename(cert_key_path))
shared_cert_path = os.path.join(base_dir, "ha", os.path.basename(cert_path))
if os.path.isfile(shared_cert_key):
shutil.copy2(shared_cert_key, cert_key_path)
if os.path.isfile(shared_cert_path):
shutil.copy2(shared_cert_path, cert_path)
protocol = rcp.get("configuration", "ui_url_protocol")
if protocol != "https" and args.notary_mode:
raise Exception("Error: the protocol must be https when Harbor is deployed with Notary")
@ -96,58 +70,6 @@ def mark_file(path, mode=0o600, uid=10000, gid=10000):
os.chmod(path, mode)
os.chown(path, uid, gid)
def prepare_ha(conf, args):
#files under ha folder will have high prority
protocol = rcp.get("configuration", "ui_url_protocol")
if protocol == "https":
#copy nginx certificate
cert_path = rcp.get("configuration", "ssl_cert")
cert_key_path = rcp.get("configuration", "ssl_cert_key")
shared_cert_key = os.path.join(base_dir, "ha", os.path.basename(cert_key_path))
shared_cert_path = os.path.join(base_dir, "ha", os.path.basename(cert_path))
if os.path.isfile(shared_cert_key):
shutil.copy2(shared_cert_key, cert_key_path)
else:
if os.path.isfile(cert_key_path):
shutil.copy2(cert_key_path, shared_cert_key)
if os.path.isfile(shared_cert_path):
shutil.copy2(shared_cert_path, cert_path)
else:
if os.path.isfile(cert_path):
shutil.copy2(cert_path, shared_cert_path)
#check if ca exsit
cert_ca_path = os.path.join(DATA_VOL, 'ca_download', 'ca.crt')
shared_ca_path = os.path.join(base_dir, "ha", os.path.basename(cert_ca_path))
if os.path.isfile(shared_ca_path):
shutil.copy2(shared_ca_path, cert_ca_path)
else:
if os.path.isfile(cert_ca_path):
shutil.copy2(cert_ca_path, shared_ca_path)
#check root.crt and priviate_key.pem
private_key_pem = os.path.join(config_dir, "ui", "private_key.pem")
root_crt = os.path.join(config_dir, "registry", "root.crt")
shared_private_key_pem = os.path.join(base_dir, "ha", "private_key.pem")
shared_root_crt = os.path.join(base_dir, "ha", "root.crt")
if os.path.isfile(shared_private_key_pem):
shutil.copy2(shared_private_key_pem, private_key_pem)
else:
if os.path.isfile(private_key_pem):
shutil.copy2(private_key_pem, shared_private_key_pem)
if os.path.isfile(shared_root_crt):
shutil.copy2(shared_root_crt, root_crt)
else:
if os.path.isfile(root_crt):
shutil.copy2(root_crt, shared_root_crt)
#secretkey
shared_secret_key = os.path.join(base_dir, "ha", "secretkey")
secretkey_path = rcp.get("configuration", "secretkey_path")
secret_key = os.path.join(secretkey_path, "secretkey")
if os.path.isfile(shared_secret_key):
shutil.copy2(shared_secret_key, secret_key)
else:
if os.path.isfile(secret_key):
shutil.copy2(secret_key, shared_secret_key)
def get_secret_key(path):
secret_key = _get_secret(path, "secretkey")
if len(secret_key) != 16:
@ -206,8 +128,6 @@ parser = argparse.ArgumentParser()
parser.add_argument('--conf', dest='cfgfile', default=base_dir+'/harbor.cfg',type=str,help="the path of Harbor configuration file")
parser.add_argument('--with-notary', dest='notary_mode', default=False, action='store_true', help="the Harbor instance is to be deployed with notary")
parser.add_argument('--with-clair', dest='clair_mode', default=False, action='store_true', help="the Harbor instance is to be deployed with clair")
parser.add_argument('--ha', dest='ha_mode', default=False, action='store_true', help="the Harbor instance is to be deployed in HA mode")
parser.add_argument('--yes', dest='yes', default=False, action='store_true', help="Answer yes to all questions")
parser.add_argument('--with-chartmuseum', dest='chart_mode', default=False, action='store_true', help="the Harbor instance is to be deployed with chart repository supporting")
args = parser.parse_args()
@ -470,7 +390,7 @@ render(os.path.join(templates_dir, "ui", "env"),
chart_cache_driver = chart_cache_driver,
redis_url_reg = redis_url_reg)
registry_config_file = "config_ha.yml" if args.ha_mode else "config.yml"
registry_config_file = "config.yml"
if storage_provider_name == "filesystem":
if not storage_provider_config:
storage_provider_config = "rootdirectory: /storage"
@ -682,9 +602,6 @@ if args.clair_mode:
https_proxy = https_proxy,
no_proxy = no_proxy)
if args.ha_mode:
prepare_ha(rcp, args)
# config chart repository
if args.chart_mode:
chartm_temp_dir = os.path.join(templates_dir, "chartserver")