enable migrator to support 1.5.0 migration from mysql to pgsql (#5029)

This commit is to enable data migrator to support migrates data
from mysql to pgsql, this is a specific step for user to upgrade
harbor across v1.5.0, as we have move harbor DB to pgsql from
1.5.0. It supports both harbor and notary db data migration,
and be split into two steps with dependency.

It also fix issue #4847, add build DB migrator in make process.
This commit is contained in:
Yan 2018-06-01 14:58:43 +08:00 committed by GitHub
parent a07a0d09f8
commit 6d800cabbd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
51 changed files with 1111 additions and 943 deletions

View File

@ -18,7 +18,6 @@ env:
POSTGRESQL_USR: postgres
POSTGRESQL_PWD: root123
POSTGRESQL_DATABASE: registry
SQLITE_FILE: /tmp/registry.db
ADMINSERVER_URL: http://127.0.0.1:8888
DOCKER_COMPOSE_VERSION: 1.7.1
HARBOR_ADMIN: admin
@ -72,8 +71,7 @@ install:
before_script:
# create tables and load data
# - mysql < ./make/db/registry.sql -uroot --verbose
- sudo sqlite3 /tmp/registry.db < make/photon/db/registry_sqlite.sql
- sudo chmod 777 /tmp/registry.db
# - sudo chmod 777 /tmp/registry.db
script:
- sudo mkdir -p /etc/ui/ca/
@ -84,7 +82,7 @@ script:
- sudo make run_clarity_ut CLARITYIMAGE=vmware/harbor-clarity-ui-builder:1.4.1
- cat ./src/ui_ng/npm-ut-test-results
- sudo ./tests/testprepare.sh
- sudo make -f make/photon/Makefile _build_postgresql _build_db _build_registry -e VERSIONTAG=dev -e CLAIRDBVERSION=dev -e REGISTRYVERSION=v2.6.2
- sudo make -f make/photon/Makefile _build_db _build_registry -e VERSIONTAG=dev -e CLAIRDBVERSION=dev -e REGISTRYVERSION=v2.6.2
- sudo sed -i 's/__reg_version__/v2.6.2-dev/g' ./make/docker-compose.test.yml
- sudo sed -i 's/__version__/dev/g' ./make/docker-compose.test.yml
- sudo mkdir -p ./make/common/config/registry/

View File

@ -210,6 +210,7 @@ DOCKERSAVE_PARA=$(DOCKERIMAGENAME_ADMINSERVER):$(VERSIONTAG) \
$(DOCKERIMAGENAME_DB):$(VERSIONTAG) \
$(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG) \
vmware/redis-photon:$(REDISVERSION) \
vmware/harbor-migrator:$(VERSIONTAG) \
vmware/nginx-photon:$(NGINXVERSION) vmware/registry-photon:$(REGISTRYVERSION)-$(VERSIONTAG) \
vmware/photon:$(PHOTONVERSION)
PACKAGE_OFFLINE_PARA=-zcvf harbor-offline-installer-$(PKGVERSIONTAG).tgz \

View File

@ -11,6 +11,7 @@ BUILDPATH=$(CURDIR)
MAKEPATH=$(BUILDPATH)/make
MAKEDEVPATH=$(MAKEPATH)/dev
SRCPATH=./src
TOOLSPATH=$(CURDIR)/tools
SEDCMD=$(shell which sed)
WGET=$(shell which wget)
@ -50,7 +51,7 @@ DOCKERFILEPATH_LOG=$(DOCKERFILEPATH)/log
DOCKERFILENAME_LOG=Dockerfile
DOCKERIMAGENAME_LOG=vmware/harbor-log
DOCKERFILEPATH_DB=$(DOCKERFILEPATH)/db/postgresql
DOCKERFILEPATH_DB=$(DOCKERFILEPATH)/db
DOCKERFILENAME_DB=Dockerfile
DOCKERIMAGENAME_DB=vmware/harbor-db
@ -70,10 +71,6 @@ DOCKERFILEPATH_REG=$(DOCKERFILEPATH)/registry
DOCKERFILENAME_REG=Dockerfile
DOCKERIMAGENAME_REG=vmware/registry-photon
DOCKERFILEPATH_MARIADB=$(DOCKERFILEPATH)/mariadb
DOCKERFILENAME_MARIADB=Dockerfile
DOCKERIMAGENAME_MARIADB=vmware/mariadb-photon
DOCKERFILEPATH_NOTARY=$(DOCKERFILEPATH)/notary
DOCKERFILENAME_NOTARYSIGNER=signer.Dockerfile
DOCKERIMAGENAME_NOTARYSIGNER=vmware/notary-signer-photon
@ -84,13 +81,14 @@ DOCKERFILEPATH_REDIS=$(DOCKERFILEPATH)/redis
DOCKERFILENAME_REDIS=Dockerfile
DOCKERIMAGENAME_REDIS=vmware/redis-photon
DOCKERFILEPATH_MIGRATOR=$(TOOLSPATH)/migration
DOCKERFILENAME_MIGRATOR=Dockerfile
DOCKERIMAGENAME_MIGRATOR=vmware/harbor-migrator
_build_db:
@echo "modify the db dockerfile..."
@$(SEDCMD) -i 's/__postgresql_version__/$(VERSIONTAG)/g' $(DOCKERFILEPATH_DB)/$(DOCKERFILENAME_DB)
@echo "building db container for photon..."
@cd $(DOCKERFILEPATH_DB) && $(DOCKERBUILD) -f $(DOCKERFILEPATH_DB)/$(DOCKERFILENAME_DB) -t $(DOCKERIMAGENAME_DB):$(VERSIONTAG) .
@echo "Done."
@$(SEDCMD) -i 's/$(VERSIONTAG)/__postgresql_version__/g' $(DOCKERFILEPATH_DB)/$(DOCKERFILENAME_DB)
_build_adminiserver:
@echo "building adminserver container for photon..."
@ -112,11 +110,6 @@ _build_log:
$(DOCKERBUILD) -f $(DOCKERFILEPATH_LOG)/$(DOCKERFILENAME_LOG) -t $(DOCKERIMAGENAME_LOG):$(VERSIONTAG) $(DOCKERFILEPATH_LOG)
@echo "Done."
_build_postgresql:
@echo "building postgresql container for photon..."
@cd $(DOCKERFILEPATH_POSTGRESQL) && $(DOCKERBUILD) -f $(DOCKERFILEPATH_POSTGRESQL)/$(DOCKERFILENAME_POSTGRESQL) -t $(DOCKERIMAGENAME_POSTGRESQL):$(CLAIRDBVERSION) .
@echo "Done."
_build_clair:
@if [ "$(CLAIRFLAG)" = "true" ] ; then \
if [ "$(BUILDBIN)" != "true" ] ; then \
@ -165,22 +158,22 @@ _build_registry:
@cd $(DOCKERFILEPATH_REG) && chmod 655 $(DOCKERFILEPATH_REG)/binary/registry && $(DOCKERBUILD) -f $(DOCKERFILEPATH_REG)/$(DOCKERFILENAME_REG) -t $(DOCKERIMAGENAME_REG):$(REGISTRYVERSION)-$(VERSIONTAG) .
@rm -rf $(DOCKERFILEPATH_REG)/binary
@echo "Done."
_build_mariadb:
@echo "building mariadb container for photon..."
@cd $(DOCKERFILEPATH_MARIADB) && $(DOCKERBUILD) -f $(DOCKERFILEPATH_MARIADB)/$(DOCKERFILENAME_MARIADB) -t $(DOCKERIMAGENAME_MARIADB):$(MARIADBVERSION) .
@echo "Done."
_build_redis:
@echo "building redis container for photon..."
@cd $(DOCKERFILEPATH_REDIS) && $(DOCKERBUILD) -f $(DOCKERFILEPATH_REDIS)/$(DOCKERFILENAME_REDIS) -t $(DOCKERIMAGENAME_REDIS):$(REDISVERSION) .
@echo "Done."
_build_migrator:
@echo "building db migrator container for photon..."
@cd $(DOCKERFILEPATH_MIGRATOR) && $(DOCKERBUILD) -f $(DOCKERFILEPATH_MIGRATOR)/$(DOCKERFILENAME_MIGRATOR) -t $(DOCKERIMAGENAME_MIGRATOR):$(VERSIONTAG) .
@echo "Done."
define _get_binary
$(WGET) --timeout 30 --no-check-certificate $1 -O $2
endef
build: _build_postgresql _build_db _build_adminiserver _build_ui _build_jobservice _build_log _build_nginx _build_registry _build_notary _build_clair _build_redis
build: _build_db _build_adminiserver _build_ui _build_jobservice _build_log _build_nginx _build_registry _build_notary _build_clair _build_redis _build_migrator
cleanimage:
@echo "cleaning image for photon..."

View File

@ -1,7 +1,7 @@
FROM vmware/photon:1.0
RUN tdnf erase vim -y \
&& tdnf distro-sync -y || echo \
&& tdnf distro-sync -y \
&& tdnf install -y sudo >> /dev/null \
&& tdnf clean all \
&& groupadd -r -g 10000 harbor && useradd --no-log-init -r -g 10000 -u 10000 harbor \

View File

@ -1,8 +1,35 @@
FROM vmware/mariadb-photon:__version__
FROM vmware/photon:1.0
HEALTHCHECK CMD mysqladmin -uroot -p$MYSQL_ROOT_PASSWORD ping
ENV PGDATA /var/lib/postgresql/data
RUN touch /etc/localtime.bak \
&& tdnf distro-sync -y \
&& tdnf install -y sed shadow gzip postgresql >> /dev/null\
&& groupadd -r postgres --gid=999 \
&& useradd -r -g postgres --uid=999 postgres \
&& mkdir -p /docker-entrypoint-initdb.d \
&& mkdir -p /run/postgresql \
&& chown -R postgres:postgres /run/postgresql \
&& chmod 2777 /run/postgresql \
&& mkdir -p "$PGDATA" && chown -R postgres:postgres "$PGDATA" && chmod 777 "$PGDATA" \
&& sed -i "s|#listen_addresses = 'localhost'.*|listen_addresses = '*'|g" /usr/share/postgresql/postgresql.conf.sample \
&& sed -i "s|#unix_socket_directories = '/tmp'.*|unix_socket_directories = '/run/postgresql'|g" /usr/share/postgresql/postgresql.conf.sample \
&& touch /usr/share/locale/locale.alias \
&& locale-gen.sh en_US.UTF-8 \
&& tdnf clean all
VOLUME /var/lib/postgresql/data
ADD docker-entrypoint.sh /entrypoint.sh
ADD docker-healthcheck.sh /docker-healthcheck.sh
RUN chmod u+x /entrypoint.sh /docker-healthcheck.sh
ENTRYPOINT ["/entrypoint.sh"]
HEALTHCHECK CMD ["/docker-healthcheck.sh"]
COPY registry.sql /docker-entrypoint-initdb.d/
COPY registry-flag.sh /docker-entrypoint-initdb.d/
COPY upgrade.sh /docker-entrypoint-updatedb.d/
COPY initial-notaryserver.sql /docker-entrypoint-initdb.d/
COPY initial-notarysigner.sql /docker-entrypoint-initdb.d/
EXPOSE 5432
CMD ["postgres"]

View File

@ -1,6 +0,0 @@
FROM vmware/postgresql-photon:__postgresql_version__
COPY registry.sql /docker-entrypoint-initdb.d/
COPY initial-notaryserver.sql /docker-entrypoint-initdb.d/
COPY initial-notarysigner.sql /docker-entrypoint-initdb.d/

View File

@ -1,3 +0,0 @@
#!/bin/sh
touch /var/lib/mysql/created_in_mariadb.flag
echo "dumped flag for MariaDB"

View File

@ -1,13 +1,11 @@
drop database if exists registry;
create database registry charset = utf8;
CREATE DATABASE registry ENCODING 'UTF8';
use registry;
\c registry;
create table access (
access_id int NOT NULL AUTO_INCREMENT,
access_id SERIAL PRIMARY KEY NOT NULL,
access_code char(1),
comment varchar (30),
primary key (access_id)
comment varchar (30)
);
insert into access (access_code, comment) values
@ -17,14 +15,13 @@ insert into access (access_code, comment) values
('D', 'Delete access for project'),
('S', 'Search access for project');
create table role (
role_id int NOT NULL AUTO_INCREMENT,
role_id SERIAL PRIMARY KEY NOT NULL,
role_mask int DEFAULT 0 NOT NULL,
role_code varchar(20),
name varchar (20),
primary key (role_id)
name varchar (20)
);
/*
role mask is used for future enhancement when a project member can have multi-roles
currently set to 0
@ -35,46 +32,39 @@ insert into role (role_code, name) values
('RWS', 'developer'),
('RS', 'guest');
create table user (
user_id int NOT NULL AUTO_INCREMENT,
# The max length of username controlled by API is 20,
# and 11 is reserved for marking the deleted users.
# The mark of deleted user is "#user_id".
# The 11 consist of 10 for the max value of user_id(4294967295)
# in MySQL and 1 of '#'.
create table harbor_user (
user_id SERIAL PRIMARY KEY NOT NULL,
username varchar(255),
# 11 bytes is reserved for marking the deleted users.
email varchar(255),
password varchar(40) NOT NULL,
realname varchar (255) NOT NULL,
comment varchar (30),
deleted tinyint (1) DEFAULT 0 NOT NULL,
deleted boolean DEFAULT false NOT NULL,
reset_uuid varchar(40) DEFAULT NULL,
salt varchar(40) DEFAULT NULL,
sysadmin_flag tinyint (1),
creation_time timestamp,
update_time timestamp,
primary key (user_id),
sysadmin_flag boolean DEFAULT false NOT NULL,
creation_time timestamp(0),
update_time timestamp(0),
UNIQUE (username),
UNIQUE (email)
);
insert into user (username, email, password, realname, comment, deleted, sysadmin_flag, creation_time, update_time) values
('admin', 'admin@example.com', '', 'system admin', 'admin user',0, 1, NOW(), NOW()),
('anonymous', 'anonymous@example.com', '', 'anonymous user', 'anonymous user', 1, 0, NOW(), NOW());
insert into harbor_user (username, email, password, realname, comment, deleted, sysadmin_flag, creation_time, update_time) values
('admin', 'admin@example.com', '', 'system admin', 'admin user',false, true, NOW(), NOW()),
('anonymous', 'anonymous@example.com', '', 'anonymous user', 'anonymous user', true, false, NOW(), NOW());
create table project (
project_id int NOT NULL AUTO_INCREMENT,
project_id SERIAL PRIMARY KEY NOT NULL,
owner_id int NOT NULL,
# The max length of name controlled by API is 30,
# and 11 is reserved for marking the deleted project.
/*
The max length of name controlled by API is 30,
and 11 is reserved for marking the deleted project.
*/
name varchar (255) NOT NULL,
creation_time timestamp,
update_time timestamp,
deleted tinyint (1) DEFAULT 0 NOT NULL,
primary key (project_id),
FOREIGN KEY (owner_id) REFERENCES user(user_id),
deleted boolean DEFAULT false NOT NULL,
FOREIGN KEY (owner_id) REFERENCES harbor_user(user_id),
UNIQUE (name)
);
@ -82,51 +72,67 @@ insert into project (owner_id, name, creation_time, update_time) values
(1, 'library', NOW(), NOW());
create table project_member (
id int not null AUTO_INCREMENT,
id SERIAL NOT NULL,
project_id int NOT NULL,
entity_id int NOT NULL,
entity_type char(1) NOT NULL, ## u for user, g for user group
/*
entity_type indicates the type of member,
u for user, g for user group
*/
entity_type char(1) NOT NULL,
role int NOT NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
PRIMARY KEY (id),
CONSTRAINT unique_project_entity_type UNIQUE (project_id, entity_id, entity_type)
);
);
CREATE FUNCTION update_update_time_at_column() RETURNS trigger
LANGUAGE plpgsql
AS $$
BEGIN
NEW.update_time = NOW();
RETURN NEW;
END;
$$;
CREATE TRIGGER project_member_update_time_at_modtime BEFORE UPDATE ON project_member FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
insert into project_member (project_id, entity_id, role, entity_type) values
(1, 1, 1, 'u');
create table project_metadata (
id int NOT NULL AUTO_INCREMENT,
id SERIAL NOT NULL,
project_id int NOT NULL,
name varchar(255) NOT NULL,
value varchar(255),
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
deleted tinyint (1) DEFAULT 0 NOT NULL,
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
deleted boolean DEFAULT false NOT NULL,
PRIMARY KEY (id),
CONSTRAINT unique_project_id_and_name UNIQUE (project_id,name),
FOREIGN KEY (project_id) REFERENCES project(project_id)
);
insert into project_metadata (id, project_id, name, value, creation_time, update_time, deleted) values
(1, 1, 'public', 'true', NOW(), NOW(), 0);
CREATE TRIGGER project_metadata_update_time_at_modtime BEFORE UPDATE ON project_metadata FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
insert into project_metadata (project_id, name, value, creation_time, update_time, deleted) values
(1, 'public', 'true', NOW(), NOW(), false);
create table user_group
(
id int NOT NULL AUTO_INCREMENT,
group_name varchar(255) NOT NULL,
group_type int default 0,
ldap_group_dn varchar(512) NOT NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
PRIMARY KEY (id)
create table user_group (
id SERIAL NOT NULL,
group_name varchar(255) NOT NULL,
group_type smallint default 0,
ldap_group_dn varchar(512) NOT NULL,
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
PRIMARY KEY (id)
);
CREATE TRIGGER user_group_update_time_at_modtime BEFORE UPDATE ON user_group FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table access_log (
log_id int NOT NULL AUTO_INCREMENT,
log_id SERIAL NOT NULL,
username varchar (255) NOT NULL,
project_id int NOT NULL,
repo_name varchar (256),
@ -134,42 +140,47 @@ create table access_log (
GUID varchar(64),
operation varchar(20) NOT NULL,
op_time timestamp,
primary key (log_id),
INDEX pid_optime (project_id, op_time)
primary key (log_id)
);
CREATE INDEX pid_optime ON access_log (project_id, op_time);
create table repository (
repository_id int NOT NULL AUTO_INCREMENT,
repository_id SERIAL NOT NULL,
name varchar(255) NOT NULL,
project_id int NOT NULL,
description text,
pull_count int DEFAULT 0 NOT NULL,
star_count int DEFAULT 0 NOT NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
primary key (repository_id),
UNIQUE (name)
);
CREATE TRIGGER repository_update_time_at_modtime BEFORE UPDATE ON repository FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table replication_policy (
id int NOT NULL AUTO_INCREMENT,
id SERIAL NOT NULL,
name varchar(256),
project_id int NOT NULL,
target_id int NOT NULL,
enabled tinyint(1) NOT NULL DEFAULT 1,
enabled boolean NOT NULL DEFAULT true,
description text,
deleted tinyint (1) DEFAULT 0 NOT NULL,
deleted boolean DEFAULT false NOT NULL,
cron_str varchar(256),
filters varchar(1024),
replicate_deletion tinyint (1) DEFAULT 0 NOT NULL,
replicate_deletion boolean DEFAULT false NOT NULL,
start_time timestamp NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
PRIMARY KEY (id)
);
CREATE TRIGGER replication_policy_update_time_at_modtime BEFORE UPDATE ON replication_policy FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table replication_target (
id int NOT NULL AUTO_INCREMENT,
id SERIAL NOT NULL,
name varchar(64),
url varchar(64),
username varchar(255),
@ -179,60 +190,74 @@ create table replication_target (
0 means it's a harbor instance,
1 means it's a regulart registry
*/
target_type tinyint(1) NOT NULL DEFAULT 0,
insecure tinyint(1) NOT NULL DEFAULT 0,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
target_type SMALLINT NOT NULL DEFAULT 0,
insecure boolean NOT NULL DEFAULT false,
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
PRIMARY KEY (id)
);
CREATE TRIGGER replication_target_update_time_at_modtime BEFORE UPDATE ON replication_target FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table replication_job (
id int NOT NULL AUTO_INCREMENT,
id SERIAL NOT NULL,
status varchar(64) NOT NULL,
policy_id int NOT NULL,
repository varchar(256) NOT NULL,
operation varchar(64) NOT NULL,
tags varchar(16384),
#New job service only records uuid, for compatibility in this table both IDs are stored.
/*
New job service only records uuid, for compatibility in this table both IDs are stored.
*/
job_uuid varchar(64),
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
PRIMARY KEY (id),
INDEX policy (policy_id),
INDEX poid_uptime (policy_id, update_time),
INDEX poid_status (policy_id, status)
);
create table replication_immediate_trigger (
id int NOT NULL AUTO_INCREMENT,
policy_id int NOT NULL,
namespace varchar(256) NOT NULL,
on_push tinyint(1) NOT NULL DEFAULT 0,
on_deletion tinyint(1) NOT NULL DEFAULT 0,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
PRIMARY KEY (id)
);
create table img_scan_job (
id int NOT NULL AUTO_INCREMENT,
CREATE INDEX policy ON replication_job (policy_id);
CREATE INDEX poid_uptime ON replication_job (policy_id, update_time);
CREATE INDEX poid_status ON replication_job (policy_id, status);
CREATE TRIGGER replication_job_update_time_at_modtime BEFORE UPDATE ON replication_job FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table replication_immediate_trigger (
id SERIAL NOT NULL,
policy_id int NOT NULL,
namespace varchar(256) NOT NULL,
on_push boolean NOT NULL DEFAULT false,
on_deletion boolean NOT NULL DEFAULT false,
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
PRIMARY KEY (id)
);
CREATE TRIGGER replication_immediate_trigger_update_time_at_modtime BEFORE UPDATE ON replication_immediate_trigger FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table img_scan_job (
id SERIAL NOT NULL,
status varchar(64) NOT NULL,
repository varchar(256) NOT NULL,
tag varchar(128) NOT NULL,
digest varchar(128),
#New job service only records uuid, for compatibility in this table both IDs are stored.
/*
New job service only records uuid, for compatibility in this table both IDs are stored.
*/
job_uuid varchar(64),
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
PRIMARY KEY (id),
INDEX idx_status (status),
INDEX idx_digest (digest),
INDEX idx_uuid (job_uuid),
INDEX idx_repository_tag (repository,tag)
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
PRIMARY KEY (id)
);
CREATE INDEX idx_status ON img_scan_job (status);
CREATE INDEX idx_digest ON img_scan_job (digest);
CREATE INDEX idx_uuid ON img_scan_job (job_uuid);
CREATE INDEX idx_repository_tag ON img_scan_job (repository,tag);
CREATE TRIGGER img_scan_job_update_time_at_modtime BEFORE UPDATE ON img_scan_job FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table img_scan_overview (
id int NOT NULL AUTO_INCREMENT,
id SERIAL NOT NULL,
image_digest varchar(128) NOT NULL,
scan_job_id int NOT NULL,
/* 0 indicates none, the higher the number, the more severe the status */
@ -241,14 +266,16 @@ create table img_scan_overview (
components_overview varchar(2048),
/* primary key for querying details, in clair it should be the name of the "top layer" */
details_key varchar(128),
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
PRIMARY KEY(id),
UNIQUE(image_digest)
);
CREATE TRIGGER img_scan_overview_update_time_at_modtime BEFORE UPDATE ON img_scan_overview FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table clair_vuln_timestamp (
id int NOT NULL AUTO_INCREMENT,
id SERIAL NOT NULL,
namespace varchar(128) NOT NULL,
last_update timestamp NOT NULL,
PRIMARY KEY(id),
@ -256,7 +283,7 @@ UNIQUE(namespace)
);
create table properties (
id int NOT NULL AUTO_INCREMENT,
id SERIAL NOT NULL,
k varchar(64) NOT NULL,
v varchar(128) NOT NULL,
PRIMARY KEY(id),
@ -264,43 +291,58 @@ create table properties (
);
create table harbor_label (
id int NOT NULL AUTO_INCREMENT,
id SERIAL NOT NULL,
name varchar(128) NOT NULL,
description text,
color varchar(16),
# 's' for system level labels
# 'u' for user level labels
/*
's' for system level labels
'u' for user level labels
*/
level char(1) NOT NULL,
# 'g' for global labels
# 'p' for project labels
/*
'g' for global labels
'p' for project labels
*/
scope char(1) NOT NULL,
project_id int,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
PRIMARY KEY(id),
CONSTRAINT unique_label UNIQUE (name,scope, project_id)
);
CREATE TRIGGER harbor_label_update_time_at_modtime BEFORE UPDATE ON harbor_label FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table harbor_resource_label (
id int NOT NULL AUTO_INCREMENT,
id SERIAL NOT NULL,
label_id int NOT NULL,
# the resource_id is the ID of project when the resource_type is p
# the resource_id is the ID of repository when the resource_type is r
/*
the resource_id is the ID of project when the resource_type is p
the resource_id is the ID of repository when the resource_type is r
*/
resource_id int,
# the resource_name is the name of image when the resource_type is i
/*
the resource_name is the name of image when the resource_type is i
*/
resource_name varchar(256),
# 'p' for project
# 'r' for repository
# 'i' for image
/*
'p' for project
'r' for repository
'i' for image
*/
resource_type char(1) NOT NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
PRIMARY KEY(id),
CONSTRAINT unique_label_resource UNIQUE (label_id,resource_id, resource_name, resource_type)
);
CREATE TABLE IF NOT EXISTS `alembic_version` (
`version_num` varchar(32) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TRIGGER harbor_resource_label_update_time_at_modtime BEFORE UPDATE ON harbor_resource_label FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
CREATE TABLE IF NOT EXISTS alembic_version (
version_num varchar(32) NOT NULL
);
insert into alembic_version values ('1.5.0');

View File

@ -1,294 +0,0 @@
create table access (
access_id INTEGER PRIMARY KEY,
access_code char(1),
comment varchar (30)
);
insert into access (access_code, comment) values
('M', 'Management access for project'),
('R', 'Read access for project'),
('W', 'Write access for project'),
('D', 'Delete access for project'),
('S', 'Search access for project');
create table role (
role_id INTEGER PRIMARY KEY,
role_mask int DEFAULT 0 NOT NULL,
role_code varchar(20),
name varchar (20)
);
/*
role mask is used for future enhancement when a project member can have multi-roles
currently set to 0
*/
insert into role (role_code, name) values
('MDRWS', 'projectAdmin'),
('RWS', 'developer'),
('RS', 'guest');
create table harbor_user (
user_id INTEGER PRIMARY KEY,
/*
The max length of username controlled by API is 20,
and 11 is reserved for marking the deleted users.
The mark of deleted user is "#user_id".
The 11 consist of 10 for the max value of user_id(4294967295)
in MySQL and 1 of '#'.
*/
username varchar(255),
/*
11 bytes is reserved for marking the deleted users.
*/
email varchar(255),
password varchar(40) NOT NULL,
realname varchar (255) NOT NULL,
comment varchar (30),
deleted tinyint (1) DEFAULT 0 NOT NULL,
reset_uuid varchar(40) DEFAULT NULL,
salt varchar(40) DEFAULT NULL,
sysadmin_flag tinyint (1),
creation_time timestamp,
update_time timestamp,
UNIQUE (username),
UNIQUE (email)
);
insert into harbor_user (username, email, password, realname, comment, deleted, sysadmin_flag, creation_time, update_time) values
('admin', 'admin@example.com', '', 'system admin', 'admin user',0, 1, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP),
('anonymous', 'anonymous@example.com', '', 'anonymous user', 'anonymous user', 1, 0, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP);
create table user_group (
id INTEGER PRIMARY KEY,
group_name varchar(255) NOT NULL,
group_type int default 0,
ldap_group_dn varchar(512) NOT NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP
);
create table project (
project_id INTEGER PRIMARY KEY,
owner_id int NOT NULL,
/*
The max length of name controlled by API is 30,
and 11 is reserved for marking the deleted project.
*/
name varchar (255) NOT NULL,
creation_time timestamp,
update_time timestamp,
deleted tinyint (1) DEFAULT 0 NOT NULL,
FOREIGN KEY (owner_id) REFERENCES harbor_user(user_id),
UNIQUE (name)
);
insert into project (owner_id, name, creation_time, update_time) values
(1, 'library', CURRENT_TIMESTAMP, CURRENT_TIMESTAMP);
create table project_member (
id INTEGER PRIMARY KEY,
project_id int NOT NULL,
entity_id int NOT NULL,
entity_type char NOT NULL,
role int NOT NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
UNIQUE (project_id, entity_id, entity_type)
);
insert into project_member (project_id, entity_id, role, entity_type) values
(1, 1, 1, 'u');
create table project_metadata (
id INTEGER PRIMARY KEY,
project_id int NOT NULL,
name varchar(255) NOT NULL,
value varchar(255),
creation_time timestamp,
update_time timestamp,
deleted tinyint (1) DEFAULT 0 NOT NULL,
UNIQUE(project_id, name),
FOREIGN KEY (project_id) REFERENCES project(project_id)
);
insert into project_metadata (id, project_id, name, value, creation_time, update_time, deleted) values
(1, 1, 'public', 'true', CURRENT_TIMESTAMP, CURRENT_TIMESTAMP, 0);
create table access_log (
log_id INTEGER PRIMARY KEY,
username varchar (255) NOT NULL,
project_id int NOT NULL,
repo_name varchar (256),
repo_tag varchar (128),
GUID varchar(64),
operation varchar(20) NOT NULL,
op_time timestamp
);
CREATE INDEX pid_optime ON access_log (project_id, op_time);
create table repository (
repository_id INTEGER PRIMARY KEY,
name varchar(255) NOT NULL,
project_id int NOT NULL,
description text,
pull_count int DEFAULT 0 NOT NULL,
star_count int DEFAULT 0 NOT NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
UNIQUE (name)
);
create table replication_policy (
id INTEGER PRIMARY KEY,
name varchar(256),
project_id int NOT NULL,
target_id int NOT NULL,
enabled tinyint(1) NOT NULL DEFAULT 1,
description text,
deleted tinyint (1) DEFAULT 0 NOT NULL,
cron_str varchar(256),
filters varchar(1024),
replicate_deletion tinyint (1) DEFAULT 0 NOT NULL,
start_time timestamp NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP
);
create table replication_target (
id INTEGER PRIMARY KEY,
name varchar(64),
url varchar(64),
username varchar(255),
password varchar(128),
/*
target_type indicates the type of target registry,
0 means it's a harbor instance,
1 means it's a regulart registry
*/
target_type tinyint(1) NOT NULL DEFAULT 0,
insecure tinyint(1) NOT NULL DEFAULT 0,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP
);
create table replication_job (
id INTEGER PRIMARY KEY,
status varchar(64) NOT NULL,
policy_id int NOT NULL,
repository varchar(256) NOT NULL,
operation varchar(64) NOT NULL,
tags varchar(16384),
job_uuid varchar(64),
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP
);
create table replication_immediate_trigger (
id INTEGER PRIMARY KEY,
policy_id int NOT NULL,
namespace varchar(256) NOT NULL,
on_push tinyint(1) NOT NULL DEFAULT 0,
on_deletion tinyint(1) NOT NULL DEFAULT 0,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP
);
create table img_scan_job (
id INTEGER PRIMARY KEY,
status varchar(64) NOT NULL,
repository varchar(256) NOT NULL,
tag varchar(128) NOT NULL,
digest varchar(64),
job_uuid varchar(64),
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP
);
create table img_scan_overview (
id INTEGER PRIMARY KEY,
image_digest varchar(128),
scan_job_id int NOT NULL,
/* 0 indicates none, the higher the number, the more severe the status */
severity int NOT NULL default 0,
/* the json string to store components severity status, currently use a json to be more flexible and avoid creating additional tables. */
components_overview varchar(2048),
/* primary key for querying details, in clair it should be the name of the "top layer" */
details_key varchar(128),
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
UNIQUE(image_digest)
);
CREATE INDEX policy ON replication_job (policy_id);
CREATE INDEX poid_uptime ON replication_job (policy_id, update_time);
create table clair_vuln_timestamp (
id INTEGER PRIMARY KEY,
namespace varchar(128) NOT NULL,
last_update timestamp NOT NULL,
UNIQUE(namespace)
);
create table properties (
id INTEGER PRIMARY KEY,
k varchar(64) NOT NULL,
v varchar(128) NOT NULL,
UNIQUE(k)
);
create table harbor_label (
id INTEGER PRIMARY KEY,
name varchar(128) NOT NULL,
description text,
color varchar(16),
/*
's' for system level labels
'u' for user level labels
*/
level char(1) NOT NULL,
/*
'g' for global labels
'p' for project labels
*/
scope char(1) NOT NULL,
project_id int,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
UNIQUE(name, scope, project_id)
);
create table harbor_resource_label (
id INTEGER PRIMARY KEY,
label_id int NOT NULL,
/*
the resource_id is the ID of project when the resource_type is p
the resource_id is the ID of repository when the resource_type is r
*/
resource_id int,
/*
the resource_name is the name of image when the resource_type is i
*/
resource_name varchar(256),
/*
'p' for project
'r' for repository
'i' for image
*/
resource_type char(1) NOT NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
UNIQUE (label_id,resource_id,resource_name,resource_type)
);
create table alembic_version (
version_num varchar(32) NOT NULL
);
insert into alembic_version values ('1.5.0');

View File

@ -1,28 +0,0 @@
#!/bin/bash
set +e
if [ ! -f /var/lib/mysql/created_in_mariadb.flag ]; then
echo "Maria DB flag not found, the DB was created in mysql image, running upgrade..."
mysqld >/dev/null 2>&1 &
pid="$!"
for i in {30..0}; do
mysqladmin -uroot -p$MYSQL_ROOT_PASSWORD processlist >/dev/null 2>&1
if [ $? = 0 ]; then
break
fi
echo 'Waiting for MySQL start...'
sleep 1
done
if [ "$i" = 0 ]; then
echo >&2 'MySQL failed to start.'
exit 1
fi
set -e
mysql_upgrade -p$MYSQL_ROOT_PASSWORD
echo 'Finished upgrading'
if ! kill -s TERM "$pid" || ! wait "$pid"; then
echo >&2 'Failed to stop MySQL for upgrading.'
exit 1
fi
else
echo "DB was created in Maria DB, skip upgrade."
fi

View File

@ -1,7 +1,7 @@
FROM vmware/photon:1.0
RUN mkdir /harbor/ \
&& tdnf distro-sync -y || echo \
&& tdnf distro-sync -y \
&& tdnf install sudo -y >> /dev/null\
&& tdnf clean all \
&& groupadd -r -g 10000 harbor && useradd --no-log-init -r -g 10000 -u 10000 harbor

View File

@ -1,25 +0,0 @@
FROM vmware/photon:1.0
#The Docker Daemon has to be running with storage backend btrfs when building the image
RUN tdnf distro-sync -y \
&& tdnf install -y sed shadow procps-ng gawk gzip sudo net-tools >> /dev/null\
&& groupadd -r -g 10000 mysql && useradd --no-log-init -r -g 10000 -u 10000 mysql \
&& tdnf install -y mariadb-server mariadb >> /dev/null\
&& mkdir /docker-entrypoint-initdb.d /docker-entrypoint-updatedb.d \
&& rm -fr /var/lib/mysql \
&& mkdir -p /var/lib/mysql /var/run/mysqld \
&& chown -R mysql:mysql /var/lib/mysql /var/run/mysqld \
&& chmod 777 /var/run/mysqld /docker-entrypoint-initdb.d /docker-entrypoint-updatedb.d \
&& tdnf clean all
COPY docker-entrypoint.sh /usr/local/bin/
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
COPY my.cnf /etc/
RUN ln -s usr/local/bin/docker-entrypoint.sh /
VOLUME /var/lib/mysql /docker-entrypoint-initdb.d /docker-entrypoint-updatedb.d /tmp /var/run/mysqld
EXPOSE 3306
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
CMD ["mysqld"]

View File

@ -1,203 +0,0 @@
#!/bin/bash
set -eo pipefail
shopt -s nullglob
# if command starts with an option, prepend mysqld
if [ "${1:0:1}" = '-' ]; then
set -- mysqld "$@"
fi
# skip setup if they want an option that stops mysqld
wantHelp=
for arg; do
case "$arg" in
-'?'|--help|--print-defaults|-V|--version)
wantHelp=1
break
;;
esac
done
# usage: file_env VAR [DEFAULT]
# ie: file_env 'XYZ_DB_PASSWORD' 'example'
# (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of
# "$XYZ_DB_PASSWORD" from a file, especially for Docker's secrets feature)
file_env() {
local var="$1"
local fileVar="${var}_FILE"
local def="${2:-}"
if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then
echo >&2 "error: both $var and $fileVar are set (but are exclusive)"
exit 1
fi
local val="$def"
if [ "${!var:-}" ]; then
val="${!var}"
elif [ "${!fileVar:-}" ]; then
val="$(< "${!fileVar}")"
fi
export "$var"="$val"
unset "$fileVar"
}
_check_config() {
toRun=( "$@" --verbose --help --log-bin-index="$(mktemp -u)" )
if ! errors="$("${toRun[@]}" 2>&1 >/dev/null)"; then
cat >&2 <<-EOM
ERROR: mysqld failed while attempting to check config
command was: "${toRun[*]}"
$errors
EOM
exit 1
fi
}
# Fetch value from server config
# We use mysqld --verbose --help instead of my_print_defaults because the
# latter only show values present in config files, and not server defaults
_get_config() {
local conf="$1"; shift
"$@" --verbose --help --log-bin-index="$(mktemp -u)" 2>/dev/null | awk '$1 == "'"$conf"'" { print $2; exit }'
}
# allow the container to be started with `--user`
if [ "$1" = 'mysqld' -a -z "$wantHelp" -a "$(id -u)" = '0' ]; then
_check_config "$@"
DATADIR="$(_get_config 'datadir' "$@")"
mkdir -p "$DATADIR"
chown -R mysql:mysql "$DATADIR"
if [ -d '/docker-entrypoint-initdb.d' ]; then
chmod -R +rx /docker-entrypoint-initdb.d
fi
if [ -d '/docker-entrypoint-updatedb.d' ]; then
chmod -R +rx /docker-entrypoint-updatedb.d
fi
exec sudo -u mysql -E "$BASH_SOURCE" "$@"
fi
if [ "$1" = 'mysqld' -a -z "$wantHelp" ]; then
# still need to check config, container may have started with --user
_check_config "$@"
# Get config
DATADIR="$(_get_config 'datadir' "$@")"
if [ ! -d "$DATADIR/mysql" ]; then
file_env 'MYSQL_ROOT_PASSWORD'
if [ -z "$MYSQL_ROOT_PASSWORD" -a -z "$MYSQL_ALLOW_EMPTY_PASSWORD" ]; then
echo >&2 'error: database is uninitialized and password option is not specified '
echo >&2 ' You need to specify one of MYSQL_ROOT_PASSWORD, MYSQL_ALLOW_EMPTY_PASSWORD'
exit 1
fi
mkdir -p "$DATADIR"
echo 'Initializing database'
cd /usr
mysql_install_db --datadir="$DATADIR" --rpm
cd -
echo 'Database initialized'
SOCKET="$(_get_config 'socket' "$@")"
"$@" --skip-networking --socket="${SOCKET}" &
pid="$!"
mysql=( mysql --protocol=socket -uroot -hlocalhost --socket="${SOCKET}" )
for i in {30..0}; do
if echo 'SELECT 1' | "${mysql[@]}" &> /dev/null; then
break
fi
echo 'MySQL init process in progress...'
sleep 1
done
if [ "$i" = 0 ]; then
echo >&2 'MySQL init process failed.'
exit 1
fi
if [ -z "$MYSQL_INITDB_SKIP_TZINFO" ]; then
# sed is for https://bugs.mysql.com/bug.php?id=20545
mysql_tzinfo_to_sql /usr/share/zoneinfo | sed 's/Local time zone must be set--see zic manual page/FCTY/' | "${mysql[@]}" mysql
fi
rootCreate=
# default root to listen for connections from anywhere
file_env 'MYSQL_ROOT_HOST' '%'
if [ ! -z "$MYSQL_ROOT_HOST" -a "$MYSQL_ROOT_HOST" != 'localhost' ]; then
# no, we don't care if read finds a terminating character in this heredoc
# https://unix.stackexchange.com/questions/265149/why-is-set-o-errexit-breaking-this-read-heredoc-expression/265151#265151
read -r -d '' rootCreate <<-EOSQL || true
CREATE USER 'root'@'${MYSQL_ROOT_HOST}' IDENTIFIED BY '${MYSQL_ROOT_PASSWORD}' ;
GRANT ALL ON *.* TO 'root'@'${MYSQL_ROOT_HOST}' WITH GRANT OPTION ;
EOSQL
fi
"${mysql[@]}" <<-EOSQL
-- What's done in this file shouldn't be replicated
-- or products like mysql-fabric won't work
SET @@SESSION.SQL_LOG_BIN=0;
DELETE FROM mysql.user WHERE user NOT IN ('mysql.sys', 'mysqlxsys', 'root') OR host NOT IN ('localhost') ;
SET PASSWORD FOR 'root'@'localhost'=PASSWORD('${MYSQL_ROOT_PASSWORD}') ;
GRANT ALL ON *.* TO 'root'@'localhost' WITH GRANT OPTION ;
${rootCreate}
DROP DATABASE IF EXISTS test ;
FLUSH PRIVILEGES ;
EOSQL
if [ ! -z "$MYSQL_ROOT_PASSWORD" ]; then
mysql+=( -p"${MYSQL_ROOT_PASSWORD}" )
fi
file_env 'MYSQL_DATABASE'
if [ "$MYSQL_DATABASE" ]; then
echo "CREATE DATABASE IF NOT EXISTS \`$MYSQL_DATABASE\` ;" | "${mysql[@]}"
mysql+=( "$MYSQL_DATABASE" )
fi
file_env 'MYSQL_USER'
file_env 'MYSQL_PASSWORD'
if [ "$MYSQL_USER" -a "$MYSQL_PASSWORD" ]; then
echo "CREATE USER '$MYSQL_USER'@'%' IDENTIFIED BY '$MYSQL_PASSWORD' ;" | "${mysql[@]}"
if [ "$MYSQL_DATABASE" ]; then
echo "GRANT ALL ON \`$MYSQL_DATABASE\`.* TO '$MYSQL_USER'@'%' ;" | "${mysql[@]}"
fi
echo 'FLUSH PRIVILEGES ;' | "${mysql[@]}"
fi
echo
for f in /docker-entrypoint-initdb.d/*; do
case "$f" in
*.sh) echo "$0: running $f"; . "$f" ;;
*.sql) echo "$0: running $f"; "${mysql[@]}" < "$f"; echo ;;
*.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${mysql[@]}"; echo ;;
*) echo "$0: ignoring $f" ;;
esac
echo
done
if ! kill -s TERM "$pid" || ! wait "$pid"; then
echo >&2 'MySQL init process failed.'
exit 1
fi
echo
echo 'MySQL init process done. Ready for start up.'
echo
fi
for f in /docker-entrypoint-updatedb.d/*; do
case "$f" in
*.sh) echo "$0: running $f"; . "$f" ;;
### Not supported for now... until needed
# *.sql) echo "$0: running $f"; "${mysql[@]}" < "$f"; echo ;;
# *.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${mysql[@]}"; echo ;;
*) echo "$0: ignoring $f" ;;
esac
echo
done
fi
exec "$@"

View File

@ -1,6 +1,6 @@
FROM vmware/photon:1.0
RUN tdnf distro-sync -y || echo \
RUN tdnf distro-sync -y \
&& tdnf erase vim -y \
&& tdnf install -y shadow sudo \
&& tdnf clean all \

View File

@ -1,6 +1,6 @@
FROM vmware/photon:1.0
RUN tdnf distro-sync -y || echo \
RUN tdnf distro-sync -y \
&& tdnf erase vim -y \
&& tdnf install -y shadow sudo \
&& tdnf clean all \

View File

@ -1,30 +0,0 @@
FROM vmware/photon:1.0
ENV PGDATA /var/lib/postgresql/data
RUN touch /etc/localtime.bak \
&& tdnf distro-sync -y \
&& tdnf install -y sed shadow gzip postgresql >> /dev/null\
&& groupadd -r postgres --gid=999 \
&& useradd -r -g postgres --uid=999 postgres \
&& mkdir -p /docker-entrypoint-initdb.d \
&& mkdir -p /run/postgresql \
&& chown -R postgres:postgres /run/postgresql \
&& chmod 2777 /run/postgresql \
&& mkdir -p "$PGDATA" && chown -R postgres:postgres "$PGDATA" && chmod 777 "$PGDATA" \
&& sed -i "s|#listen_addresses = 'localhost'.*|listen_addresses = '*'|g" /usr/share/postgresql/postgresql.conf.sample \
&& sed -i "s|#unix_socket_directories = '/tmp'.*|unix_socket_directories = '/run/postgresql'|g" /usr/share/postgresql/postgresql.conf.sample \
&& touch /usr/share/locale/locale.alias \
&& locale-gen.sh en_US.UTF-8 \
&& tdnf clean all
VOLUME /var/lib/postgresql/data
ADD docker-entrypoint.sh /entrypoint.sh
ADD docker-healthcheck.sh /docker-healthcheck.sh
RUN chmod u+x /entrypoint.sh /docker-healthcheck.sh
ENTRYPOINT ["/entrypoint.sh"]
HEALTHCHECK CMD ["/docker-healthcheck.sh"]
EXPOSE 5432
CMD ["postgres"]

View File

@ -1,8 +0,0 @@
#!/bin/sh
psql -h "localhost" -U "postgres" -c 'select 1'
ret_code=$?
if [ $ret_code != 0 ]; then
exit 1
fi

View File

@ -215,6 +215,7 @@ var (
common.TokenServiceURL: "TOKEN_SERVICE_URL",
common.ClairURL: "CLAIR_URL",
common.NotaryURL: "NOTARY_URL",
common.DatabaseType: "DATABASE_TYPE",
}
)

View File

@ -1,11 +1,36 @@
FROM vmware/mariadb-photon:10.2.8
FROM vmware/photon:1.0
RUN tdnf distro-sync -y \
&& tdnf install -y mariadb-devel python2 python2-devel python-pip gcc \
linux-api-headers glibc-devel binutils zlib-devel openssl-devel \
ENV PGDATA /var/lib/postgresql/data
## have both mysql and pgsql installed.
RUN tdnf distro-sync -y || echo \
&& tdnf install -y sed shadow procps-ng gawk gzip sudo net-tools >> /dev/null\
&& groupadd -r -g 10000 mysql && useradd --no-log-init -r -g 10000 -u 10000 mysql \
&& tdnf install -y mariadb-server mariadb mariadb-devel python2 python2-devel python-pip gcc \
linux-api-headers glibc-devel binutils zlib-devel openssl-devel postgresql >> /dev/null\
&& pip install mysqlclient alembic \
&& tdnf clean all \
&& mkdir -p /harbor-migration
&& mkdir /docker-entrypoint-initdb.d /docker-entrypoint-updatedb.d \
&& rm -fr /var/lib/mysql \
&& mkdir -p /var/lib/mysql /var/run/mysqld \
&& chown -R mysql:mysql /var/lib/mysql /var/run/mysqld \
&& chmod 777 /var/run/mysqld /docker-entrypoint-initdb.d /docker-entrypoint-updatedb.d \
&& mkdir -p /harbor-migration \
&& touch /etc/localtime.bak \
&& groupadd -r postgres --gid=999 \
&& useradd -r -g postgres --uid=999 postgres \
&& mkdir -p /run/postgresql \
&& chown -R postgres:postgres /run/postgresql \
&& chmod 2777 /run/postgresql \
&& mkdir -p "$PGDATA" && chown -R postgres:postgres "$PGDATA" && chmod 777 "$PGDATA" \
&& sed -i "s|#listen_addresses = 'localhost'.*|listen_addresses = '*'|g" /usr/share/postgresql/postgresql.conf.sample \
&& sed -i "s|#unix_socket_directories = '/tmp'.*|unix_socket_directories = '/run/postgresql'|g" /usr/share/postgresql/postgresql.conf.sample \
&& touch /usr/share/locale/locale.alias \
&& locale-gen.sh en_US.UTF-8 \
&& tdnf clean all
COPY ./db/my.cnf /etc/
VOLUME /var/lib/postgresql/data
WORKDIR /harbor-migration

View File

@ -1,14 +0,0 @@
FROM vmware/mariadb-photon:10.2.8
RUN tdnf distro-sync -y \
&& tdnf install -y mariadb-devel python2 python2-devel python-pip gcc \
linux-api-headers glibc-devel binutils zlib-devel openssl-devel \
&& pip install mysqlclient alembic \
&& tdnf clean all \
&& mkdir -p /harbor-migration
WORKDIR /harbor-migration
COPY ./ ./
ENTRYPOINT ["./run.sh"]

View File

@ -3,7 +3,7 @@ echo "
[alembic]
# path to migration scripts
script_location = /harbor-migration/db/migration_harbor
script_location = /harbor-migration/db/alembic/migration_harbor
# template used to generate migration files
# file_template = %%(rev)s_%%(slug)s

View File

@ -1,125 +1,241 @@
#!/bin/bash
# Copyright 2017 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
source $PWD/db/util/mysql.sh
source $PWD/db/util/pgsql.sh
source $PWD/db/util/mysql_pgsql_1_5_0.sh
source $PWD/db/util/alembic.sh
export PYTHONPATH=$PYTHONPATH:/harbor-migration/db
if [ -z "$DB_USR" -o -z "$DB_PWD" ]; then
echo "DB_USR or DB_PWD not set, exiting..."
exit 1
fi
set -e
source /harbor-migration/db/alembic.tpl > /harbor-migration/db/alembic.ini
ISMYSQL=false
ISPGSQL=false
ISNOTARY=false
DBCNF="-hlocalhost -u${DB_USR}"
cur_version=""
PGSQL_USR="postgres"
#prevent shell to print insecure message
export MYSQL_PWD="${DB_PWD}"
if [[ $1 = "help" || $1 = "h" || $# = 0 ]]; then
echo "Usage:"
echo "backup perform database backup"
echo "restore perform database restore"
echo "up, upgrade perform database schema upgrade"
echo "test test database connection"
echo "h, help usage help"
exit 0
fi
# if [[ ( $1 = "up" || $1 = "upgrade" ) && ${SKIP_CONFIRM} != "y" ]]; then
# echo "Please backup before upgrade."
# read -p "Enter y to continue updating or n to abort:" ans
# case $ans in
# [Yy]* )
# ;;
# [Nn]* )
# exit 0
# ;;
# * ) echo "illegal answer: $ans. Upgrade abort!!"
# exit 1
# ;;
# esac
# fi
echo 'Trying to start mysql server...'
chown -R 10000:10000 /var/lib/mysql
mysqld &
echo 'Waiting for MySQL start...'
for i in {60..0}; do
mysqladmin -u$DB_USR -p$DB_PWD processlist >/dev/null 2>&1
if [ $? = 0 ]; then
break
fi
sleep 1
done
if [ "$i" = 0 ]; then
echo "timeout. Can't run mysql server."
if [[ $1 = "test" ]]; then
echo "DB test failed."
fi
exit 1
fi
if [[ $1 = "test" ]]; then
echo "DB test passed."
exit 0
fi
key="$1"
case $key in
up|upgrade)
VERSION="$2"
if [[ -z $VERSION ]]; then
VERSION="head"
echo "Version is not specified. Default version is head."
fi
echo "Performing upgrade ${VERSION}..."
if [[ $(mysql $DBCNF -N -s -e "select count(*) from information_schema.tables \
where table_schema='registry' and table_name='alembic_version';") -eq 0 ]]; then
echo "table alembic_version does not exist. Trying to initial alembic_version."
mysql $DBCNF < ./alembic.sql
#compatible with version 0.1.0 and 0.1.1
if [[ $(mysql $DBCNF -N -s -e "select count(*) from information_schema.tables \
where table_schema='registry' and table_name='properties'") -eq 0 ]]; then
echo "table properties does not exist. The version of registry is 0.1.0"
else
echo "The version of registry is 0.1.1"
mysql $DBCNF -e "insert into registry.alembic_version values ('0.1.1')"
function init {
if [ "$(ls -A /var/lib/mysql)" ]; then
# As after the first success run, the data will be migrated to pgsql,
# the PG_VERSION should be in /var/lib/mysql if user repeats the UP command.
if [ -e '/var/lib/mysql/PG_VERSION' ]; then
ISPGSQL=true
elif [ -d '/var/lib/mysql/mysql' ]; then
ISMYSQL=true
if [ -d '/var/lib/mysql/notaryserver' ]; then
ISNOTARY=true
fi
fi
fi
alembic -c /harbor-migration/db/alembic.ini current
alembic -c /harbor-migration/db/alembic.ini upgrade ${VERSION}
rc="$?"
alembic -c /harbor-migration/db/alembic.ini current
echo "Upgrade performed."
exit $rc
;;
backup)
if [ "$(ls -A /var/lib/postgresql/data)" ]; then
ISPGSQL=true
fi
if [ $ISMYSQL == false ] && [ $ISPGSQL == false ]; then
echo "No database has been mounted for the migration. Use '-v' to set it in 'docker run'."
exit 1
fi
if [ $ISMYSQL == true ]; then
# as for UP notary, user does not need to provide username and pwd.
# the check works for harbor DB only.
if [ $ISNOTARY == false ]; then
if [ -z "$DB_USR" -o -z "$DB_PWD" ]; then
echo "DB_USR or DB_PWD not set, exiting..."
exit 1
fi
launch_mysql $DB_USR $DB_PWD
else
launch_mysql root
fi
fi
if [ $ISPGSQL == true ]; then
launch_pgsql $PGSQL_USR
fi
}
function get_version {
if [ $ISMYSQL == true ]; then
result=$(get_version_mysql)
fi
if [ $ISPGSQL == true ]; then
result=$(get_version_pgsql $PGSQL_USR)
fi
cur_version=$result
}
# first version is less than or equal to second version.
function version_le {
## if no version specific, see it as larger then 1.5.0
if [ $1 = "head" ];then
return 1
fi
test "$(printf '%s\n' "$@" | sort -V | head -n 1)" = "$1";
}
function backup {
echo "Performing backup..."
mysqldump $DBCNF --add-drop-database --databases registry > /harbor-migration/backup/registry.sql
if [ $ISMYSQL == true ]; then
backup_mysql
fi
if [ $ISPGSQL == true ]; then
backup_pgsql
fi
rc="$?"
echo "Backup performed."
exit $rc
;;
export)
echo "Performing export..."
/harbor-migration/db/export --dbuser ${DB_USR} --dbpwd ${DB_PWD} --exportpath ${EXPORTPATH}
rc="$?"
echo "Export performed."
exit $rc
;;
mapprojects)
echo "Performing map projects..."
/harbor-migration/db/mapprojects --dbuser ${DB_USR} --dbpwd ${DB_PWD} --mapprojectsfile ${MAPPROJECTFILE}
rc="$?"
echo "Map projects performed."
exit $rc
;;
restore)
}
function restore {
echo "Performing restore..."
mysql $DBCNF < /harbor-migration/backup/registry.sql
if [ $ISMYSQL == true ]; then
restore_mysql
fi
if [ $ISPGSQL == true ]; then
restore_pgsql
fi
rc="$?"
echo "Restore performed."
exit $rc
;;
*)
echo "unknown option"
exit 0
;;
esac
}
function validate {
echo "Performing test..."
if [ $ISMYSQL == true ]; then
test_mysql $DB_USR $DB_PWD
fi
if [ $ISPGSQL == true ]; then
test_pgsql $PGSQL_USR
fi
rc="$?"
echo "Test performed."
exit $rc
}
function upgrade {
if [ $ISNOTARY == true ];then
up_notary $PGSQL_USR
else
up_harbor $1
fi
}
function up_harbor {
local target_version="$1"
if [ -z $target_version ]; then
target_version="head"
echo "Version is not specified. Default version is head."
fi
get_version
if [ "$cur_version" = "$target_version" ]; then
echo "It has always running the $target_version, no longer need to upgrade."
exit 0
fi
# $cur_version <='1.5.0', $target_version <='1.5.0', it needs to call mysql upgrade.
if version_le $cur_version '1.5.0' && version_le $target_version '1.5.0'; then
if [ $ISMYSQL != true ]; then
echo "Please mount the database volume to /var/lib/mysql, then to run the upgrade again."
return 1
else
alembic_up mysql $target_version
return $?
fi
fi
# $cur_version > '1.5.0', $target_version > '1.5.0', it needs to call pgsql upgrade.
if ! version_le $cur_version '1.5.0' && ! version_le $target_version '1.5.0'; then
if [ $ISPGSQL != true ]; then
echo "Please mount the database volume to /var/lib/postgresql/data, then to run the upgrade again."
return 1
else
alembic_up pgsql $target_version
return $?
fi
fi
# $cur_version <='1.5.0', $target_version >'1.5.0', it needs to upgrade to $cur_version.mysql => 1.5.0.mysql => 1.5.0.pgsql => target_version.pgsql.
if version_le $cur_version '1.5.0' && ! version_le $target_version '1.5.0'; then
if [ $ISMYSQL != true ]; then
echo "Please make sure to mount the correct the data volume."
return 1
else
launch_pgsql $PGSQL_USR
mysql_2_pgsql_1_5_0 $PGSQL_USR
# Pgsql won't run the init scripts as the migration script has already created the PG_VERSION,
# which is a flag that used by entrypoint.sh of pgsql to define whether to run init scripts to create harbor DBs.
# Here to force init notary DBs just align with new harbor launch process.
# Otherwise, user could get db failure when to launch harbor with notary as no data was created.
psql -U $PGSQL_USR -f /harbor-migration/db/schema/notaryserver_init.pgsql
psql -U $PGSQL_USR -f /harbor-migration/db/schema/notarysigner_init.pgsql
## it needs to call the alembic_up to target, disable it as it's now unsupported.
#alembic_up $target_version
stop_pgsql $PGSQL_USR
stop_mysql $DB_USR $DB_PWD
rm -rf /var/lib/mysql/*
cp -rf $PGDATA/* /var/lib/mysql
return 0
fi
fi
echo "Unsupported DB upgrade from $cur_version to $target_version, please check the inputs."
return 1
}
function main {
if [[ $1 = "help" || $1 = "h" || $# = 0 ]]; then
echo "Usage:"
echo "backup perform database backup"
echo "restore perform database restore"
echo "up, upgrade perform database schema upgrade"
echo "test test database connection"
echo "h, help usage help"
exit 0
fi
init
local key="$1"
case $key in
up|upgrade)
upgrade $2
;;
backup)
backup
;;
restore)
restore
;;
test)
validate
;;
*)
echo "unknown option"
exit 0
;;
esac
}
main "$@"

View File

@ -0,0 +1,39 @@
\c notaryserver;
CREATE TABLE "tuf_files" (
"id" int PRIMARY KEY,
"created_at" timestamp NULL DEFAULT NULL,
"updated_at" timestamp NULL DEFAULT NULL,
"deleted_at" timestamp NULL DEFAULT NULL,
"gun" varchar(255) NOT NULL,
"role" varchar(255) NOT NULL,
"version" integer NOT NULL,
"data" bytea NOT NULL,
"sha256" char(64) DEFAULT NULL,
UNIQUE ("gun","role","version")
);
CREATE INDEX tuf_files_sha256_idx ON tuf_files(sha256);
CREATE TABLE "change_category" (
"category" VARCHAR(20) PRIMARY KEY
);
CREATE TABLE "changefeed" (
"id" serial PRIMARY KEY,
"created_at" timestamp DEFAULT CURRENT_TIMESTAMP,
"gun" varchar(255) NOT NULL,
"version" integer NOT NULL,
"sha256" CHAR(64) DEFAULT NULL,
"category" VARCHAR(20) NOT NULL DEFAULT 'update' REFERENCES "change_category"
);
CREATE INDEX "idx_changefeed_gun" ON "changefeed" ("gun");
CREATE TABLE "schema_migrations" (
"version" int PRIMARY KEY
);
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO server;

View File

@ -0,0 +1,4 @@
CREATE DATABASE notaryserver;
CREATE USER server;
alter user server with encrypted password 'password';
GRANT ALL PRIVILEGES ON DATABASE notaryserver TO server;

View File

@ -0,0 +1,27 @@
\c notarysigner;
CREATE TABLE "private_keys" (
"id" int PRIMARY KEY,
"created_at" timestamp NULL DEFAULT NULL,
"updated_at" timestamp NULL DEFAULT NULL,
"deleted_at" timestamp NULL DEFAULT NULL,
"key_id" varchar(255) NOT NULL,
"encryption_alg" varchar(255) NOT NULL,
"keywrap_alg" varchar(255) NOT NULL,
"algorithm" varchar(50) NOT NULL,
"passphrase_alias" varchar(50) NOT NULL,
"public" bytea NOT NULL,
"private" bytea NOT NULL,
"gun" varchar(255) NOT NULL,
"role" varchar(255) NOT NULL,
"last_used" timestamp NULL DEFAULT NULL,
CONSTRAINT "key_id" UNIQUE ("key_id"),
CONSTRAINT "key_id_2" UNIQUE ("key_id","algorithm")
);
CREATE TABLE "schema_migrations" (
"version" int PRIMARY KEY
);
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO signer;

View File

@ -0,0 +1,4 @@
CREATE DATABASE notarysigner;
CREATE USER signer;
alter user signer with encrypted password 'password';
GRANT ALL PRIVILEGES ON DATABASE notarysigner TO signer;

View File

@ -3,20 +3,13 @@ CREATE DATABASE registry ENCODING 'UTF8';
\c registry;
create table access (
access_id SERIAL PRIMARY KEY NOT NULL,
access_id int PRIMARY KEY NOT NULL,
access_code char(1),
comment varchar (30)
);
insert into access (access_code, comment) values
('M', 'Management access for project'),
('R', 'Read access for project'),
('W', 'Write access for project'),
('D', 'Delete access for project'),
('S', 'Search access for project');
create table role (
role_id SERIAL PRIMARY KEY NOT NULL,
role_id int PRIMARY KEY NOT NULL,
role_mask int DEFAULT 0 NOT NULL,
role_code varchar(20),
name varchar (20)
@ -27,34 +20,25 @@ role mask is used for future enhancement when a project member can have multi-ro
currently set to 0
*/
insert into role (role_code, name) values
('MDRWS', 'projectAdmin'),
('RWS', 'developer'),
('RS', 'guest');
create table harbor_user (
user_id SERIAL PRIMARY KEY NOT NULL,
user_id int PRIMARY KEY NOT NULL,
username varchar(255),
email varchar(255),
password varchar(40) NOT NULL,
realname varchar (255) NOT NULL,
comment varchar (30),
deleted boolean DEFAULT false NOT NULL,
deleted smallint DEFAULT 0 NOT NULL,
reset_uuid varchar(40) DEFAULT NULL,
salt varchar(40) DEFAULT NULL,
sysadmin_flag boolean DEFAULT false NOT NULL,
sysadmin_flag smallint DEFAULT 0 NOT NULL,
creation_time timestamp(0),
update_time timestamp(0),
UNIQUE (username),
UNIQUE (email)
);
insert into harbor_user (username, email, password, realname, comment, deleted, sysadmin_flag, creation_time, update_time) values
('admin', 'admin@example.com', '', 'system admin', 'admin user',false, true, NOW(), NOW()),
('anonymous', 'anonymous@example.com', '', 'anonymous user', 'anonymous user', true, false, NOW(), NOW());
create table project (
project_id SERIAL PRIMARY KEY NOT NULL,
project_id int PRIMARY KEY NOT NULL,
owner_id int NOT NULL,
/*
The max length of name controlled by API is 30,
@ -63,16 +47,15 @@ create table project (
name varchar (255) NOT NULL,
creation_time timestamp,
update_time timestamp,
deleted boolean DEFAULT false NOT NULL,
deleted smallint DEFAULT 0 NOT NULL,
/*
FOREIGN KEY (owner_id) REFERENCES harbor_user(user_id),
*/
UNIQUE (name)
);
insert into project (owner_id, name, creation_time, update_time) values
(1, 'library', NOW(), NOW());
create table project_member (
id SERIAL NOT NULL,
id int NOT NULL,
project_id int NOT NULL,
entity_id int NOT NULL,
/*
@ -98,29 +81,25 @@ $$;
CREATE TRIGGER project_member_update_time_at_modtime BEFORE UPDATE ON project_member FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
insert into project_member (project_id, entity_id, role, entity_type) values
(1, 1, 1, 'u');
create table project_metadata (
id SERIAL NOT NULL,
id int NOT NULL,
project_id int NOT NULL,
name varchar(255) NOT NULL,
value varchar(255),
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
deleted boolean DEFAULT false NOT NULL,
deleted smallint DEFAULT 0 NOT NULL,
PRIMARY KEY (id),
CONSTRAINT unique_project_id_and_name UNIQUE (project_id,name),
CONSTRAINT unique_project_id_and_name UNIQUE (project_id,name)
/*
FOREIGN KEY (project_id) REFERENCES project(project_id)
*/
);
CREATE TRIGGER project_metadata_update_time_at_modtime BEFORE UPDATE ON project_metadata FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
insert into project_metadata (project_id, name, value, creation_time, update_time, deleted) values
(1, 'public', 'true', NOW(), NOW(), false);
create table user_group (
id SERIAL NOT NULL,
id int NOT NULL,
group_name varchar(255) NOT NULL,
group_type smallint default 0,
ldap_group_dn varchar(512) NOT NULL,
@ -132,7 +111,7 @@ create table user_group (
CREATE TRIGGER user_group_update_time_at_modtime BEFORE UPDATE ON user_group FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table access_log (
log_id SERIAL NOT NULL,
log_id int NOT NULL,
username varchar (255) NOT NULL,
project_id int NOT NULL,
repo_name varchar (256),
@ -146,7 +125,7 @@ create table access_log (
CREATE INDEX pid_optime ON access_log (project_id, op_time);
create table repository (
repository_id SERIAL NOT NULL,
repository_id int NOT NULL,
name varchar(255) NOT NULL,
project_id int NOT NULL,
description text,
@ -161,16 +140,16 @@ create table repository (
CREATE TRIGGER repository_update_time_at_modtime BEFORE UPDATE ON repository FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table replication_policy (
id SERIAL NOT NULL,
id int NOT NULL,
name varchar(256),
project_id int NOT NULL,
target_id int NOT NULL,
enabled boolean NOT NULL DEFAULT true,
enabled SMALLINT NOT NULL DEFAULT 1,
description text,
deleted boolean DEFAULT false NOT NULL,
deleted SMALLINT DEFAULT 0 NOT NULL,
cron_str varchar(256),
filters varchar(1024),
replicate_deletion boolean DEFAULT false NOT NULL,
replicate_deletion SMALLINT DEFAULT 0 NOT NULL,
start_time timestamp NULL,
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
@ -180,7 +159,7 @@ create table replication_policy (
CREATE TRIGGER replication_policy_update_time_at_modtime BEFORE UPDATE ON replication_policy FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table replication_target (
id SERIAL NOT NULL,
id int NOT NULL,
name varchar(64),
url varchar(64),
username varchar(255),
@ -191,7 +170,7 @@ create table replication_target (
1 means it's a regulart registry
*/
target_type SMALLINT NOT NULL DEFAULT 0,
insecure boolean NOT NULL DEFAULT false,
insecure SMALLINT NOT NULL DEFAULT 0,
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
PRIMARY KEY (id)
@ -200,7 +179,7 @@ create table replication_target (
CREATE TRIGGER replication_target_update_time_at_modtime BEFORE UPDATE ON replication_target FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table replication_job (
id SERIAL NOT NULL,
id int NOT NULL,
status varchar(64) NOT NULL,
policy_id int NOT NULL,
repository varchar(256) NOT NULL,
@ -222,11 +201,11 @@ CREATE INDEX poid_status ON replication_job (policy_id, status);
CREATE TRIGGER replication_job_update_time_at_modtime BEFORE UPDATE ON replication_job FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table replication_immediate_trigger (
id SERIAL NOT NULL,
id int NOT NULL,
policy_id int NOT NULL,
namespace varchar(256) NOT NULL,
on_push boolean NOT NULL DEFAULT false,
on_deletion boolean NOT NULL DEFAULT false,
on_push SMALLINT NOT NULL DEFAULT 0,
on_deletion SMALLINT NOT NULL DEFAULT 0,
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
PRIMARY KEY (id)
@ -235,7 +214,7 @@ create table replication_immediate_trigger (
CREATE TRIGGER replication_immediate_trigger_update_time_at_modtime BEFORE UPDATE ON replication_immediate_trigger FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table img_scan_job (
id SERIAL NOT NULL,
id int NOT NULL,
status varchar(64) NOT NULL,
repository varchar(256) NOT NULL,
tag varchar(128) NOT NULL,
@ -257,7 +236,7 @@ CREATE INDEX idx_repository_tag ON img_scan_job (repository,tag);
CREATE TRIGGER img_scan_job_update_time_at_modtime BEFORE UPDATE ON img_scan_job FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table img_scan_overview (
id SERIAL NOT NULL,
id int NOT NULL,
image_digest varchar(128) NOT NULL,
scan_job_id int NOT NULL,
/* 0 indicates none, the higher the number, the more severe the status */
@ -275,7 +254,7 @@ create table img_scan_overview (
CREATE TRIGGER img_scan_overview_update_time_at_modtime BEFORE UPDATE ON img_scan_overview FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table clair_vuln_timestamp (
id SERIAL NOT NULL,
id int NOT NULL,
namespace varchar(128) NOT NULL,
last_update timestamp NOT NULL,
PRIMARY KEY(id),
@ -283,7 +262,7 @@ UNIQUE(namespace)
);
create table properties (
id SERIAL NOT NULL,
id int NOT NULL,
k varchar(64) NOT NULL,
v varchar(128) NOT NULL,
PRIMARY KEY(id),
@ -291,7 +270,7 @@ create table properties (
);
create table harbor_label (
id SERIAL NOT NULL,
id int NOT NULL,
name varchar(128) NOT NULL,
description text,
color varchar(16),
@ -309,13 +288,13 @@ create table harbor_label (
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
PRIMARY KEY(id),
CONSTRAINT unique_label UNIQUE (name,scope, project_id)
CONSTRAINT unique_name_and_scope UNIQUE (name,scope,project_id)
);
CREATE TRIGGER harbor_label_update_time_at_modtime BEFORE UPDATE ON harbor_label FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table harbor_resource_label (
id SERIAL NOT NULL,
id int NOT NULL,
label_id int NOT NULL,
/*
the resource_id is the ID of project when the resource_type is p
@ -342,7 +321,4 @@ CREATE TRIGGER harbor_resource_label_update_time_at_modtime BEFORE UPDATE ON har
CREATE TABLE IF NOT EXISTS alembic_version (
version_num varchar(32) NOT NULL
);
insert into alembic_version values ('1.5.0');
);

View File

@ -0,0 +1,39 @@
#!/bin/bash
# Copyright 2017 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -e
function alembic_up {
local db_type="$1"
local target_version="$2"
export PYTHONPATH=$PYTHONPATH:/harbor-migration/db/alembic
if [ $db_type = "mysql" ]; then
source /harbor-migration/db/alembic/alembic.tpl > /harbor-migration/db/alembic/alembic.ini
echo "Performing upgrade $target_version..."
alembic -c /harbor-migration/db/alembic/alembic.ini current
alembic -c /harbor-migration/db/alembic/alembic.ini upgrade $target_version
alembic -c /harbor-migration/db/alembic/alembic.ini current
elif [ $db_type = "pgsql" ]; then
echo "TODO: add support for pgsql."
else
echo "Unsupported DB type."
exit 1
fi
echo "Upgrade performed."
}

View File

@ -0,0 +1,105 @@
#!/bin/bash
# Copyright 2017 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -e
DBCNF="-hlocalhost -u${DB_USR}"
function launch_mysql {
set +e
local usr="$1"
local pwd="$2"
if [ ! -z "$pwd" ]; then
export MYSQL_PWD="${DB_PWD}"
fi
echo 'Trying to start mysql server...'
chown -R 10000:10000 /var/lib/mysql
mysqld &
echo 'Waiting for MySQL start...'
for i in {60..0}; do
if [ -z "$pwd" ]; then
mysqladmin -u$usr processlist >/dev/null 2>&1
else
mysqladmin -u$usr -p$pwd processlist >/dev/null 2>&1
fi
if [ $? -eq 0 ]; then
break
fi
sleep 1
done
set -e
if [ "$i" -eq 0 ]; then
echo "timeout. Can't run mysql server."
return 1
fi
return 0
}
function test_mysql {
set +e
launch_mysql $DB_USR $DB_PWD
if [ $? -eq 0 ]; then
echo "DB test failed."
exit 0
else
echo "DB test success."
exit 1
fi
set -e
}
function stop_mysql {
if [ -z $2 ]; then
mysqladmin -u$1 shutdown
else
mysqladmin -u$1 -p$DB_PWD shutdown
fi
sleep 1
}
function get_version_mysql {
local cur_version=""
set +e
if [[ $(mysql $DBCNF -N -s -e "select count(*) from information_schema.tables \
where table_schema='registry' and table_name='alembic_version';") -eq 0 ]]; then
echo "table alembic_version does not exist. Trying to initial alembic_version."
mysql $DBCNF < ./alembic.sql
#compatible with version 0.1.0 and 0.1.1
if [[ $(mysql $DBCNF -N -s -e "select count(*) from information_schema.tables \
where table_schema='registry' and table_name='properties'") -eq 0 ]]; then
echo "table properties does not exist. The version of registry is 0.1.0"
cur_version='0.1.0'
else
echo "The version of registry is 0.1.1"
mysql $DBCNF -e "insert into registry.alembic_version values ('0.1.1')"
cur_version='0.1.1'
fi
else
cur_version=$(mysql $DBCNF -N -s -e "select * from registry.alembic_version;")
fi
set -e
echo $cur_version
}
# It's only for registry, leverage the code from 1.5.0
function backup_mysql {
mysqldump $DBCNF --add-drop-database --databases registry > /harbor-migration/backup/registry.sql
}
# It's only for registry, leverage the code from 1.5.0
function restore_mysql {
mysql $DBCNF < /harbor-migration/backup/registry.sql
}

View File

@ -0,0 +1,82 @@
#!/bin/bash
# Copyright 2017 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
source $PWD/db/util/pgsql.sh
source $PWD/db/util/alembic.sh
set -e
DBCNF="-hlocalhost -u${DB_USR}"
function mysql_2_pgsql_1_5_0 {
alembic_up mysql '1.5.0'
## dump 1.5.0-mysql
mysqldump --compatible=postgresql --no-create-info --complete-insert --default-character-set=utf8 --databases registry > /harbor-migration/db/schema/registry.mysql
## migrate 1.5.0-mysql to 1.5.0-pqsql.
python /harbor-migration/db/util/mysql_pgsql_data_converter.py /harbor-migration/db/schema/registry.mysql /harbor-migration/db/schema/registry_insert_data.pgsql
## import 1.5.0-pgsql into pgsql.
psql -U $1 -f /harbor-migration/db/schema/registry_create_tables.pgsql
psql -U $1 -f /harbor-migration/db/schema/registry_insert_data.pgsql
}
# This function is only for <= 1.5.0 to migrate notary db from mysql to pgsql.
function up_notary {
set +e
if [[ $(mysql $DBCNF -N -s -e "select count(*) from information_schema.tables \
where table_schema='notaryserver' and table_name='tuf_files'") -eq 0 ]]; then
echo "no content trust data needs to be updated."
return 0
else
## it's not a clean notary db, so cannot execute the create tables step.
## fail at here to call user to clean DB tables, then to run notary db migration.
if [[ $(psql -U $1 -d notaryserver -t -c "select count(*) from pg_tables where schemaname='public';") -ne 0 ]]; then
cat >&2 <<-EOF
*******************************************************************************
WARNING: Notary migration will only allow anyone haven't migrated notary or
launched harbor yet.
If you want to migrate notary data, please delete all the notaryserver
and notarysigner DB tables in pgsql manually fistly.
*******************************************************************************
EOF
exit 0
fi
set -e
mysqldump --skip-triggers --compact --no-create-info --skip-quote-names --hex-blob --compatible=postgresql --default-character-set=utf8 --databases notaryserver > /harbor-migration/db/schema/notaryserver.mysql.tmp
sed "s/0x\([0-9A-F]*\)/decode('\1','hex')/g" /harbor-migration/db/schema/notaryserver.mysql.tmp > /harbor-migration/db/schema/notaryserver_insert_data.mysql
mysqldump --skip-triggers --compact --no-create-info --skip-quote-names --hex-blob --compatible=postgresql --default-character-set=utf8 --databases notarysigner > /harbor-migration/db/schema/notarysigner.mysql.tmp
sed "s/0x\([0-9A-F]*\)/decode('\1','hex')/g" /harbor-migration/db/schema/notarysigner.mysql.tmp > /harbor-migration/db/schema/notarysigner_insert_data.mysql
python /harbor-migration/db/util/mysql_pgsql_data_converter.py /harbor-migration/db/schema/notaryserver_insert_data.mysql /harbor-migration/db/schema/notaryserver_insert_data.pgsql
python /harbor-migration/db/util/mysql_pgsql_data_converter.py /harbor-migration/db/schema/notarysigner_insert_data.mysql /harbor-migration/db/schema/notarysigner_insert_data.pgsql
# launch_pgsql $PGSQL_USR
psql -U $1 -f /harbor-migration/db/schema/notaryserver_create_tables.pgsql
psql -U $1 -f /harbor-migration/db/schema/notaryserver_insert_data.pgsql
psql -U $1 -f /harbor-migration/db/schema/notarysigner_create_tables.pgsql
psql -U $1 -f /harbor-migration/db/schema/notarysigner_insert_data.pgsql
stop_mysql root
stop_pgsql $1
fi
}

View File

@ -0,0 +1,160 @@
#!/usr/bin/env python
import re
import sys
import os
import time
import subprocess
def convert_registry_db(mysql_dump_file, pgsql_dump_file):
mysql_dump = open(mysql_dump_file)
pgsql_dump = open(pgsql_dump_file, "w")
insert_lines = []
for i, line in enumerate(mysql_dump):
line = line.decode("utf8").strip()
# catch insert
if line.startswith("INSERT INTO"):
# pgsql doesn't support user as a table name, change it to harbor_user.
if line.startswith('INSERT INTO "user"'):
insert_lines.append(line.replace('INSERT INTO "user"', 'INSERT INTO "harbor_user"'))
# pgsql doesn't support upper-case as a column name, change it to lower-case.
elif line.find('INSERT INTO "access_log" ("log_id", "username", "project_id", "repo_name", "repo_tag", "GUID", "operation", "op_time")') != -1:
line = line.replace('INSERT INTO "access_log" ("log_id", "username", "project_id", "repo_name", "repo_tag", "GUID", "operation", "op_time")',
'INSERT INTO "access_log" ("log_id", "username", "project_id", "repo_name", "repo_tag", "guid", "operation", "op_time")')
insert_lines.append(line)
continue
# pgsql doesn't support 0 as a time data, change it to the minimum value.
elif line.find("0000-00-00 00:00:00") != -1:
line = line.replace("0000-00-00 00:00:00", "0001-01-01 00:00:00")
insert_lines.append(line)
continue
# mysqldump generates dumps in which strings are enclosed in quotes and quotes inside the string are escaped with a backslash
# like, {\"kind\":\"Manual\",\"schedule_param\":null}.
# this is by design of mysql, see issue https://bugs.mysql.com/bug.php?id=65941
# the data could be inserted into pgsql, but it will be failed on harbor api call.
elif line.find('\\"') != -1:
line = line.replace('\\"', '"')
insert_lines.append(line)
continue
else:
insert_lines.append(line)
write_database(pgsql_dump, "registry")
write_insert(pgsql_dump, insert_lines)
write_alter_table_bool(pgsql_dump, "harbor_user", "deleted")
write_alter_table_bool(pgsql_dump, "harbor_user", "sysadmin_flag")
write_alter_table_bool(pgsql_dump, "project", "deleted")
write_alter_table_bool(pgsql_dump, "project_metadata", "deleted")
write_alter_table_bool(pgsql_dump, "replication_policy", "enabled", "TRUE")
write_alter_table_bool(pgsql_dump, "replication_policy", "replicate_deletion")
write_alter_table_bool(pgsql_dump, "replication_policy", "deleted")
write_alter_table_bool(pgsql_dump, "replication_target", "insecure")
write_alter_table_bool(pgsql_dump, "replication_immediate_trigger", "on_push")
write_alter_table_bool(pgsql_dump, "replication_immediate_trigger", "on_deletion")
write_foreign_key(pgsql_dump)
write_sequence(pgsql_dump, "harbor_user", "user_id")
write_sequence(pgsql_dump, "project", "project_id")
write_sequence(pgsql_dump, "project_member", "id")
write_sequence(pgsql_dump, "project_metadata", "id")
write_sequence(pgsql_dump, "user_group", "id")
write_sequence(pgsql_dump, "access_log", "log_id")
write_sequence(pgsql_dump, "repository", "repository_id")
write_sequence(pgsql_dump, "replication_policy", "id")
write_sequence(pgsql_dump, "replication_target", "id")
write_sequence(pgsql_dump, "replication_immediate_trigger", "id")
write_sequence(pgsql_dump, "img_scan_job", "id")
write_sequence(pgsql_dump, "img_scan_overview", "id")
write_sequence(pgsql_dump, "clair_vuln_timestamp", "id")
write_sequence(pgsql_dump, "properties", "id")
write_sequence(pgsql_dump, "harbor_label", "id")
write_sequence(pgsql_dump, "harbor_resource_label", "id")
write_sequence(pgsql_dump, "replication_job", "id")
write_sequence(pgsql_dump, "role", "role_id")
def convert_notary_server_db(mysql_dump_file, pgsql_dump_file):
mysql_dump = open(mysql_dump_file)
pgsql_dump = open(pgsql_dump_file, "w")
insert_lines = []
for i, line in enumerate(mysql_dump):
line = line.decode("utf8").strip()
# catch insert
if line.startswith("INSERT INTO"):
if line.find("0000-00-00 00:00:00") != -1:
line = line.replace("0000-00-00 00:00:00", "0001-01-01 00:00:00")
insert_lines.append(line)
continue
else:
insert_lines.append(line)
write_database(pgsql_dump, "notaryserver")
write_insert(pgsql_dump, insert_lines)
write_sequence(pgsql_dump, "tuf_files", "id")
def convert_notary_signer_db(mysql_dump_file, pgsql_dump_file):
mysql_dump = open(mysql_dump_file)
pgsql_dump = open(pgsql_dump_file, "w")
insert_lines = []
for i, line in enumerate(mysql_dump):
line = line.decode("utf8").strip()
# catch insert
if line.startswith("INSERT INTO"):
if line.find("0000-00-00 00:00:00") != -1:
line = line.replace("0000-00-00 00:00:00", "0001-01-01 00:00:00")
insert_lines.append(line)
continue
else:
insert_lines.append(line)
write_database(pgsql_dump, "notarysigner")
write_insert(pgsql_dump, insert_lines)
write_sequence(pgsql_dump, "private_keys", "id")
def write_database(pgsql_dump, db_name):
pgsql_dump.write("\\c %s;\n" % db_name)
def write_table(pgsql_dump, table_lines):
for item in table_lines:
pgsql_dump.write("%s\n" % item)
if item.startswith(');'):
pgsql_dump.write('\n')
pgsql_dump.write('\n')
def write_insert(pgsql_dump, insert_lines):
for item in insert_lines:
pgsql_dump.write("%s\n" % item)
def write_foreign_key(pgsql_dump):
pgsql_dump.write('\n')
pgsql_dump.write("%s\n" % "ALTER TABLE \"project\" ADD CONSTRAINT \"project_ibfk_1\" FOREIGN KEY (\"owner_id\") REFERENCES \"harbor_user\" (\"user_id\");")
pgsql_dump.write("%s\n" % "ALTER TABLE \"project_metadata\" ADD CONSTRAINT \"project_metadata_ibfk_1\" FOREIGN KEY (\"project_id\") REFERENCES \"project\" (\"project_id\");")
def write_alter_table_bool(pgsql_dump, table_name, table_columnn, default_value="FALSE"):
pgsql_dump.write('\n')
pgsql_dump.write("ALTER TABLE %s ALTER COLUMN %s DROP DEFAULT;\n" % (table_name, table_columnn))
pgsql_dump.write("ALTER TABLE %s ALTER %s TYPE bool USING CASE WHEN %s=0 THEN FALSE ELSE TRUE END;\n" % (table_name, table_columnn, table_columnn))
pgsql_dump.write("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT %s;\n" % (table_name, table_columnn, default_value))
def write_sequence(pgsql_dump, table_name, table_columnn):
pgsql_dump.write('\n')
pgsql_dump.write("CREATE SEQUENCE %s_%s_seq;\n" % (table_name, table_columnn))
pgsql_dump.write("SELECT setval('%s_%s_seq', max(%s)) FROM %s;\n" % (table_name, table_columnn, table_columnn, table_name))
pgsql_dump.write("ALTER TABLE \"%s\" ALTER COLUMN \"%s\" SET DEFAULT nextval('%s_%s_seq');\n" % (table_name, table_columnn, table_name, table_columnn))
if __name__ == "__main__":
if sys.argv[1].find("registry") != -1:
convert_registry_db(sys.argv[1], sys.argv[2])
elif sys.argv[1].find("notaryserver") != -1:
convert_notary_server_db(sys.argv[1], sys.argv[2])
elif sys.argv[1].find("notarysigner") != -1:
convert_notary_signer_db(sys.argv[1], sys.argv[2])
else:
print ("Unsupport mysql dump file, %s" % sys.argv[1])
sys.exit(1)

View File

@ -0,0 +1,140 @@
#!/bin/bash
# Copyright 2017 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -e
POSTGRES_PASSWORD=${DB_PWD}
function file_env {
local var="$1"
local fileVar="${var}_FILE"
local def="${2:-}"
if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then
echo >&2 "error: both $var and $fileVar are set (but are exclusive)"
exit 1
fi
local val="$def"
if [ "${!var:-}" ]; then
val="${!var}"
elif [ "${!fileVar:-}" ]; then
val="$(< "${!fileVar}")"
fi
export "$var"="$val"
unset "$fileVar"
}
if [ "${1:0:1}" = '-' ]; then
set -- postgres "$@"
fi
function launch_pgsql {
if [ "$1" = 'postgres' ]; then
chown -R postgres:postgres $PGDATA
# look specifically for PG_VERSION, as it is expected in the DB dir
if [ ! -s "$PGDATA/PG_VERSION" ]; then
file_env 'POSTGRES_INITDB_ARGS'
if [ "$POSTGRES_INITDB_XLOGDIR" ]; then
export POSTGRES_INITDB_ARGS="$POSTGRES_INITDB_ARGS --xlogdir $POSTGRES_INITDB_XLOGDIR"
fi
su - $1 -c "initdb -D $PGDATA -U postgres -E UTF-8 --lc-collate=en_US.UTF-8 --lc-ctype=en_US.UTF-8 $POSTGRES_INITDB_ARGS"
# check password first so we can output the warning before postgres
# messes it up
file_env 'POSTGRES_PASSWORD'
if [ "$POSTGRES_PASSWORD" ]; then
pass="PASSWORD '$POSTGRES_PASSWORD'"
authMethod=md5
else
# The - option suppresses leading tabs but *not* spaces. :)
echo "Use \"-e POSTGRES_PASSWORD=password\" to set the password in \"docker run\"."
exit 1
fi
{
echo
echo "host all all all $authMethod"
} >> "$PGDATA/pg_hba.conf"
# internal start of server in order to allow set-up using psql-client
# does not listen on external TCP/IP and waits until start finishes
su - $1 -c "pg_ctl -D \"$PGDATA\" -o \"-c listen_addresses='localhost'\" -w start"
file_env 'POSTGRES_USER' 'postgres'
file_env 'POSTGRES_DB' "$POSTGRES_USER"
psql=( psql -v ON_ERROR_STOP=1 )
if [ "$POSTGRES_DB" != 'postgres' ]; then
"${psql[@]}" --username postgres <<-EOSQL
CREATE DATABASE "$POSTGRES_DB" ;
EOSQL
echo
fi
if [ "$POSTGRES_USER" = 'postgres' ]; then
op='ALTER'
else
op='CREATE'
fi
"${psql[@]}" --username postgres <<-EOSQL
$op USER "$POSTGRES_USER" WITH SUPERUSER $pass ;
EOSQL
echo
psql+=( --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" )
echo
for f in /docker-entrypoint-initdb.d/*; do
case "$f" in
*.sh) echo "$0: running $f"; . "$f" ;;
*.sql) echo "$0: running $f"; "${psql[@]}" -f "$f"; echo ;;
*.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${psql[@]}"; echo ;;
*) echo "$0: ignoring $f" ;;
esac
echo
done
#PGUSER="${PGUSER:-postgres}" \
#su - $1 -c "pg_ctl -D \"$PGDATA\" -m fast -w stop"
echo
echo 'PostgreSQL init process complete; ready for start up.'
echo
else
su - $PGSQL_USR -c "pg_ctl -D \"$PGDATA\" -o \"-c listen_addresses='localhost'\" -w start"
fi
fi
}
function stop_pgsql {
su - $1 -c "pg_ctl -D \"/var/lib/postgresql/data\" -w stop"
}
function get_version_pgsql {
version=$(psql -U $1 -d registry -t -c "select * from alembic_version;")
echo $version
}
function test_pgsql {
echo "TODO: needs to implement test pgsql connection..."
}
function backup_pgsql {
echo "TODO: needs to implement backup registry..."
}
function restore_pgsql {
echo "TODO: needs to implement restore registry..."
}