Update migrator to support only from >=v1.6.0

This commit removes code to support upgrade from <v1.6.0
It also removes packages for supporting mysql/mariadb from Dockerfile

It does not handle the optimization in the script such as run.sh after the update.

Signed-off-by: Daniel Jiang <jiangd@vmware.com>
This commit is contained in:
Daniel Jiang 2019-05-06 22:36:36 -07:00
parent d74624d306
commit 3a1ffb3c49
29 changed files with 16 additions and 2623 deletions

View File

@ -6,15 +6,11 @@ ENV PGDATA /var/lib/postgresql/data
RUN tdnf distro-sync -y \
&& tdnf remove -y toybox \
&& tdnf install -y sed shadow procps-ng gawk gzip sudo net-tools glibc-i18n >> /dev/null\
&& groupadd -r -g 10000 mysql && useradd --no-log-init -r -g 10000 -u 10000 mysql \
&& tdnf install -y mariadb-server mariadb mariadb-devel python2 python2-devel python-pip gcc PyYAML python-jinja2\
&& tdnf install -y python2 python2-devel python-pip gcc PyYAML python-jinja2 \
linux-api-headers glibc-devel binutils zlib-devel openssl-devel postgresql python-psycopg2 >> /dev/null \
&& pip install mysqlclient alembic \
&& pip install alembic \
&& mkdir /docker-entrypoint-initdb.d /docker-entrypoint-updatedb.d \
&& rm -fr /var/lib/mysql \
&& mkdir -p /var/lib/mysql /var/run/mysqld \
&& chown -R mysql:mysql /var/lib/mysql /var/run/mysqld \
&& chmod 777 /var/run/mysqld /docker-entrypoint-initdb.d /docker-entrypoint-updatedb.d \
&& chmod 777 /docker-entrypoint-initdb.d /docker-entrypoint-updatedb.d \
&& mkdir -p /harbor-migration \
&& touch /etc/localtime.bak \
&& groupadd -r postgres --gid=999 \
@ -29,8 +25,6 @@ RUN tdnf distro-sync -y \
&& locale-gen.sh en_US.UTF-8 \
&& tdnf clean all
COPY ./db/my.cnf /etc/
VOLUME /var/lib/postgresql/data
WORKDIR /harbor-migration

View File

@ -1,4 +0,0 @@
use `registry`;
CREATE TABLE IF NOT EXISTS `alembic_version` (
`version_num` varchar(32) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

View File

@ -1,68 +0,0 @@
echo "
# A generic, single database configuration.
[alembic]
# path to migration scripts
script_location = /harbor-migration/db/alembic/mysql/migration_harbor
# template used to generate migration files
# file_template = %%(rev)s_%%(slug)s
# max length of characters to apply to the
# "slug" field
#truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false
# version location specification; this defaults
# to migration_harbor/versions. When using multiple version
# directories, initial revisions must be specified with --version-path
# version_locations = %(here)s/bar %(here)s/bat migration_harbor/versions
# the output encoding used when revision files
# are written from script.py.mako
# output_encoding = utf-8
sqlalchemy.url = mysql://$DB_USR:$DB_PWD@localhost:3306/registry?unix_socket=/var/run/mysqld/mysqld.sock
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S"

View File

@ -1,276 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.dialects import mysql
import datetime
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
user_id = sa.Column(sa.Integer, primary_key=True)
username = sa.Column(sa.String(255), unique=True)
email = sa.Column(sa.String(255), unique=True)
password = sa.Column(sa.String(40), nullable=False)
realname = sa.Column(sa.String(255), nullable=False)
comment = sa.Column(sa.String(30))
deleted = sa.Column(sa.Integer, nullable=False, server_default=sa.text("'0'"))
reset_uuid = sa.Column(sa.String(40))
salt = sa.Column(sa.String(40))
sysadmin_flag = sa.Column(sa.Integer)
creation_time = sa.Column(mysql.TIMESTAMP)
update_time = sa.Column(mysql.TIMESTAMP)
class UserGroup(Base):
__tablename__ = 'user_group'
id = sa.Column(sa.Integer, primary_key=True)
group_name = sa.Column(sa.String(128), nullable = False)
group_type = sa.Column(sa.Integer, server_default=sa.text("'0'"))
ldap_group_dn = sa.Column(sa.String(512), nullable=False)
creation_time = sa.Column(mysql.TIMESTAMP, server_default=sa.text("CURRENT_TIMESTAMP"))
update_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class Properties(Base):
__tablename__ = 'properties'
id = sa.Column(sa.Integer, primary_key=True)
k = sa.Column(sa.String(64), unique=True)
v = sa.Column(sa.String(128), nullable = False)
class ProjectMember(Base):
__tablename__ = 'project_member'
id = sa.Column(sa.Integer, primary_key=True)
project_id = sa.Column(sa.Integer(), nullable=False)
entity_id = sa.Column(sa.Integer(), nullable=False)
entity_type = sa.Column(sa.String(1), nullable=False)
role = sa.Column(sa.Integer(), nullable = False)
creation_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP"))
update_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
__table_args__ = (sa.UniqueConstraint('project_id', 'entity_id', 'entity_type', name='unique_name_and_scope'),)
class UserProjectRole(Base):
__tablename__ = 'user_project_role'
upr_id = sa.Column(sa.Integer(), primary_key = True)
user_id = sa.Column(sa.Integer(), sa.ForeignKey('user.user_id'))
pr_id = sa.Column(sa.Integer(), sa.ForeignKey('project_role.pr_id'))
project_role = relationship("ProjectRole")
class ProjectRole(Base):
__tablename__ = 'project_role'
pr_id = sa.Column(sa.Integer(), primary_key = True)
project_id = sa.Column(sa.Integer(), nullable = False)
role_id = sa.Column(sa.Integer(), nullable = False)
sa.ForeignKeyConstraint(['role_id'], [u'role.role_id'])
sa.ForeignKeyConstraint(['project_id'], [u'project.project_id'])
class Access(Base):
__tablename__ = 'access'
access_id = sa.Column(sa.Integer(), primary_key = True)
access_code = sa.Column(sa.String(1))
comment = sa.Column(sa.String(30))
class Role(Base):
__tablename__ = 'role'
role_id = sa.Column(sa.Integer, primary_key=True)
role_mask = sa.Column(sa.Integer, nullable=False, server_default=sa.text("'0'"))
role_code = sa.Column(sa.String(20))
name = sa.Column(sa.String(20))
class Project(Base):
__tablename__ = 'project'
project_id = sa.Column(sa.Integer, primary_key=True)
owner_id = sa.Column(sa.ForeignKey(u'user.user_id'), nullable=False, index=True)
name = sa.Column(sa.String(255), nullable=False, unique=True)
creation_time = sa.Column(mysql.TIMESTAMP)
update_time = sa.Column(mysql.TIMESTAMP)
deleted = sa.Column(sa.Integer, nullable=False, server_default=sa.text("'0'"))
owner = relationship(u'User')
class ProjectMetadata(Base):
__tablename__ = 'project_metadata'
id = sa.Column(sa.Integer, primary_key=True)
project_id = sa.Column(sa.ForeignKey(u'project.project_id'), nullable=False)
name = sa.Column(sa.String(255), nullable=False)
value = sa.Column(sa.String(255))
creation_time = sa.Column(mysql.TIMESTAMP, server_default=sa.text("CURRENT_TIMESTAMP"))
update_time = sa.Column(mysql.TIMESTAMP, server_default=sa.text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
deleted = sa.Column(mysql.TINYINT(1), nullable=False, server_default='0')
__table_args__ = (sa.UniqueConstraint('project_id', 'name', name='unique_project_id_and_name'),)
class ReplicationPolicy(Base):
__tablename__ = "replication_policy"
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String(256))
project_id = sa.Column(sa.Integer, nullable=False)
target_id = sa.Column(sa.Integer, nullable=False)
enabled = sa.Column(mysql.TINYINT(1), nullable=False, server_default=sa.text("'1'"))
description = sa.Column(sa.Text)
cron_str = sa.Column(sa.String(256))
filters = sa.Column(sa.String(1024))
replicate_deletion = sa.Column(mysql.TINYINT(1), nullable=False, server_default='0')
start_time = sa.Column(mysql.TIMESTAMP)
creation_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP"))
update_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class ReplicationTarget(Base):
__tablename__ = "replication_target"
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String(64))
url = sa.Column(sa.String(64))
username = sa.Column(sa.String(255))
password = sa.Column(sa.String(40))
target_type = sa.Column(mysql.TINYINT(1), nullable=False, server_default=sa.text("'0'"))
insecure = sa.Column(mysql.TINYINT(1), nullable=False, server_default='0')
creation_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP"))
update_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class ReplicationJob(Base):
__tablename__ = "replication_job"
id = sa.Column(sa.Integer, primary_key=True)
status = sa.Column(sa.String(64), nullable=False)
policy_id = sa.Column(sa.Integer, nullable=False)
repository = sa.Column(sa.String(256), nullable=False)
operation = sa.Column(sa.String(64), nullable=False)
tags = sa.Column(sa.String(16384))
job_uuid = sa.Column(sa.String(64))
creation_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP"))
update_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
__table_args__ = (sa.Index('policy', 'policy_id'),)
class ReplicationImmediateTrigger(Base):
__tablename__ = 'replication_immediate_trigger'
id = sa.Column(sa.Integer, primary_key=True)
policy_id = sa.Column(sa.Integer, nullable=False)
namespace = sa.Column(sa.String(256), nullable=False)
on_push = sa.Column(mysql.TINYINT(1), nullable=False, server_default='0')
on_deletion = sa.Column(mysql.TINYINT(1), nullable=False, server_default='0')
creation_time = sa.Column(mysql.TIMESTAMP, server_default=sa.text("CURRENT_TIMESTAMP"))
update_time = sa.Column(mysql.TIMESTAMP, server_default=sa.text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class Repository(Base):
__tablename__ = "repository"
repository_id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String(255), nullable=False, unique=True)
project_id = sa.Column(sa.Integer, nullable=False)
owner_id = sa.Column(sa.Integer, nullable=False)
description = sa.Column(sa.Text)
pull_count = sa.Column(sa.Integer,server_default=sa.text("'0'"), nullable=False)
star_count = sa.Column(sa.Integer,server_default=sa.text("'0'"), nullable=False)
creation_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP"))
update_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class AccessLog(Base):
__tablename__ = "access_log"
user_id = sa.Column(sa.Integer, nullable=False)
log_id = sa.Column(sa.Integer, primary_key=True)
username = sa.Column(sa.String(255), nullable=False)
project_id = sa.Column(sa.Integer, nullable=False)
repo_name = sa.Column(sa.String(256))
repo_tag = sa.Column(sa.String(128))
GUID = sa.Column(sa.String(64))
operation = sa.Column(sa.String(20))
op_time = sa.Column(mysql.TIMESTAMP)
update_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
__table_args__ = (sa.Index('project_id', "op_time"),)
class ImageScanJob(Base):
__tablename__ = "img_scan_job"
id = sa.Column(sa.Integer, nullable=False, primary_key=True)
status = sa.Column(sa.String(64), nullable=False)
repository = sa.Column(sa.String(256), nullable=False)
tag = sa.Column(sa.String(128), nullable=False)
digest = sa.Column(sa.String(128))
job_uuid = sa.Column(sa.String(64))
creation_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP"))
update_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class ImageScanOverview(Base):
__tablename__ = "img_scan_overview"
id = sa.Column(sa.Integer, nullable=False, primary_key=True)
image_digest = sa.Column(sa.String(128), nullable=False)
scan_job_id = sa.Column(sa.Integer, nullable=False)
severity = sa.Column(sa.Integer, nullable=False, server_default=sa.text("'0'"))
components_overview = sa.Column(sa.String(2048))
details_key = sa.Column(sa.String(128))
creation_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP"))
update_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class ClairVulnTimestamp(Base):
__tablename__ = "clair_vuln_timestamp"
id = sa.Column(sa.Integer, nullable=False, primary_key=True)
namespace = sa.Column(sa.String(128), nullable=False, unique=True)
last_update = sa.Column(mysql.TIMESTAMP)
class HarborLabel(Base):
__tablename__ = "harbor_label"
id = sa.Column(sa.Integer, nullable=False, primary_key=True)
name = sa.Column(sa.String(128), nullable=False)
description = sa.Column(sa.Text)
color = sa.Column(sa.String(16))
level = sa.Column(sa.String(1), nullable=False)
scope = sa.Column(sa.String(1), nullable=False)
project_id = sa.Column(sa.Integer, nullable=False)
creation_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP"))
update_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
__table_args__ = (sa.UniqueConstraint('name', 'scope', 'project_id', name='unique_label'),)
class HarborResourceLabel(Base):
__tablename__ = 'harbor_resource_label'
id = sa.Column(sa.Integer, primary_key=True)
label_id = sa.Column(sa.Integer, nullable=False)
resource_id = sa.Column(sa.Integer)
resource_name = sa.Column(sa.String(256))
resource_type = sa.Column(sa.String(1), nullable=False)
creation_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP"))
update_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
__table_args__ = (sa.UniqueConstraint('label_id', 'resource_id', 'resource_name', 'resource_type', name='unique_label_resource'),)

View File

@ -1,85 +0,0 @@
# Copyright Project Harbor Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()

View File

@ -1,24 +0,0 @@
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
branch_labels = ${repr(branch_labels)}
depends_on = ${repr(depends_on)}
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
def upgrade():
${upgrades if upgrades else "pass"}
def downgrade():
${downgrades if downgrades else "pass"}

View File

@ -1,98 +0,0 @@
# Copyright Project Harbor Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""0.1.0 to 0.1.1
Revision ID: 0.1.1
Revises:
Create Date: 2016-04-18 18:32:14.101897
"""
# revision identifiers, used by Alembic.
revision = '0.1.1'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
from db_meta import *
from sqlalchemy.dialects import mysql
Session = sessionmaker()
def upgrade():
"""
update schema&data
"""
bind = op.get_bind()
session = Session(bind=bind)
#create table property
Properties.__table__.create(bind)
session.add(Properties(k='schema_version', v='0.1.1'))
#add column to table user
op.add_column('user', sa.Column('creation_time', mysql.TIMESTAMP, nullable=True))
op.add_column('user', sa.Column('sysadmin_flag', sa.Integer(), nullable=True))
op.add_column('user', sa.Column('update_time', mysql.TIMESTAMP, nullable=True))
#init all sysadmin_flag = 0
session.query(User).update({User.sysadmin_flag: 0})
#create table project_member
ProjectMember.__table__.create(bind)
#fill data into project_member and user
join_result = session.query(UserProjectRole).join(UserProjectRole.project_role).all()
for result in join_result:
session.add(ProjectMember(project_id=result.project_role.project_id, \
user_id=result.user_id, role=result.project_role.role_id, \
creation_time=None, update_time=None))
#update sysadmin_flag
sys_admin_result = session.query(UserProjectRole).\
join(UserProjectRole.project_role).filter(ProjectRole.role_id ==1).all()
for result in sys_admin_result:
session.query(User).filter(User.user_id == result.user_id).update({User.sysadmin_flag: 1})
#add column to table role
op.add_column('role', sa.Column('role_mask', sa.Integer(), server_default=sa.text(u"'0'"), nullable=False))
#drop user_project_role table before drop project_role
#because foreign key constraint
op.drop_table('user_project_role')
op.drop_table('project_role')
#delete sysadmin from table role
role = session.query(Role).filter_by(role_id=1).first()
session.delete(role)
session.query(Role).update({Role.role_id: Role.role_id - 1})
#delete A from table access
acc = session.query(Access).filter_by(access_id=1).first()
session.delete(acc)
session.query(Access).update({Access.access_id: Access.access_id - 1})
#add column to table project
op.add_column('project', sa.Column('update_time', mysql.TIMESTAMP, nullable=True))
session.commit()
def downgrade():
"""
Downgrade has been disabled.
"""
pass

View File

@ -1,57 +0,0 @@
# Copyright Project Harbor Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""0.1.1 to 0.3.0
Revision ID: 0.1.1
Revises:
"""
# revision identifiers, used by Alembic.
revision = '0.3.0'
down_revision = '0.1.1'
branch_labels = None
depends_on = None
from alembic import op
from db_meta import *
from sqlalchemy.dialects import mysql
def upgrade():
"""
update schema&data
"""
bind = op.get_bind()
#alter column user.email, alter column access_log.repo_name, and add column access_log.repo_tag
op.alter_column('user', 'email', type_=sa.String(128), existing_type=sa.String(30))
op.alter_column('access_log', 'repo_name', type_=sa.String(256), existing_type=sa.String(40))
try:
op.add_column('access_log', sa.Column('repo_tag', sa.String(128)))
except Exception as e:
if str(e).find("Duplicate column") >=0:
print "ignore dup column error for repo_tag"
else:
raise e
#create tables: replication_policy, replication_target, replication_job
ReplicationPolicy.__table__.create(bind)
ReplicationTarget.__table__.create(bind)
ReplicationJob.__table__.create(bind)
def downgrade():
"""
Downgrade has been disabled.
"""
pass

View File

@ -1,54 +0,0 @@
# Copyright Project Harbor Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""0.3.0 to 0.4.0
Revision ID: 0.3.0
Revises:
"""
# revision identifiers, used by Alembic.
revision = '0.4.0'
down_revision = '0.3.0'
branch_labels = None
depends_on = None
from alembic import op
from db_meta import *
from sqlalchemy.dialects import mysql
def upgrade():
"""
update schema&data
"""
bind = op.get_bind()
#alter column user.username, alter column user.email, project.name and add column replication_policy.deleted
op.alter_column('user', 'username', type_=sa.String(32), existing_type=sa.String(15))
op.alter_column('user', 'email', type_=sa.String(255), existing_type=sa.String(128))
op.alter_column('project', 'name', type_=sa.String(41), existing_type=sa.String(30), nullable=False)
op.alter_column('replication_target', 'password', type_=sa.String(128), existing_type=sa.String(40))
op.add_column('replication_policy', sa.Column('deleted', mysql.TINYINT(1), nullable=False, server_default=sa.text("'0'")))
#create index pid_optime (project_id, op_time) on table access_log, poid_uptime (policy_id, update_time) on table replication_job
op.create_index('pid_optime', 'access_log', ['project_id', 'op_time'])
op.create_index('poid_uptime', 'replication_job', ['policy_id', 'update_time'])
#create tables: repository
Repository.__table__.create(bind)
def downgrade():
"""
Downgrade has been disabled.
"""
pass

View File

@ -1,79 +0,0 @@
# Copyright Project Harbor Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""0.4.0 to 1.2.0
Revision ID: 0.4.0
Revises:
"""
# revision identifiers, used by Alembic.
revision = '1.2.0'
down_revision = '0.4.0'
branch_labels = None
depends_on = None
from alembic import op
from db_meta import *
from sqlalchemy.dialects import mysql
Session = sessionmaker()
def upgrade():
"""
update schema&data
"""
bind = op.get_bind()
session = Session(bind=bind)
op.alter_column('user', 'realname', type_=sa.String(255), existing_type=sa.String(20))
#delete column access_log.user_id(access_log_ibfk_1), access_log.project_id(access_log_ibfk_2)
op.drop_constraint('access_log_ibfk_1', 'access_log', type_='foreignkey')
op.drop_constraint('access_log_ibfk_2', 'access_log', type_='foreignkey')
#add colume username to access_log
op.add_column('access_log', sa.Column('username', mysql.VARCHAR(255), nullable=False))
#init username
session.query(AccessLog).update({AccessLog.username: ""})
#update access_log username
user_all = session.query(User).all()
for user in user_all:
session.query(AccessLog).filter(AccessLog.user_id == user.user_id).update({AccessLog.username: user.username}, synchronize_session='fetch')
#update user.username length to 255
op.alter_column('user', 'username', type_=sa.String(255), existing_type=sa.String(32))
#update replication_target.username length to 255
op.alter_column('replication_target', 'username', type_=sa.String(255), existing_type=sa.String(40))
op.drop_column("access_log", "user_id")
op.drop_column("repository", "owner_id")
#create tables: img_scan_job, img_scan_overview, clair_vuln_timestamp
ImageScanJob.__table__.create(bind)
ImageScanOverview.__table__.create(bind)
ClairVulnTimestamp.__table__.create(bind)
session.commit()
def downgrade():
"""
Downgrade has been disabled.
"""
pass

View File

@ -1,68 +0,0 @@
# Copyright Project Harbor Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""1.2.0 to 1.3.0
Revision ID: 1.2.0
Revises:
"""
# revision identifiers, used by Alembic.
revision = '1.3.0'
down_revision = '1.2.0'
branch_labels = None
depends_on = None
from alembic import op
from db_meta import *
from sqlalchemy.dialects import mysql
Session = sessionmaker()
def upgrade():
"""
update schema&data
"""
bind = op.get_bind()
session = Session(bind=bind)
# This is to solve the legacy issue when upgrade from 1.2.0rc1 to 1.3.0 refered by #3077
username_coloumn = session.execute("show columns from user where field='username'").fetchone()
if username_coloumn[1] != 'varchar(255)':
op.alter_column('user', 'username', type_=sa.String(255))
# create table project_metadata
ProjectMetadata.__table__.create(bind)
# migrate public data form project to project meta
# The original type is int in project_meta data value type is string
project_publicity = session.execute('SELECT project_id, public from project').fetchall()
project_metadatas = [ProjectMetadata(project_id=project_id, name='public', value='true' if public else 'false')
for project_id, public in project_publicity]
session.add_all(project_metadatas)
# drop public column from project
op.drop_column("project", "public")
# add column insecure to replication target
op.add_column('replication_target', sa.Column('insecure', mysql.TINYINT(1), nullable=False, server_default='0'))
session.commit()
def downgrade():
"""
Downgrade has been disabled.
"""

View File

@ -1,92 +0,0 @@
# Copyright Project Harbor Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""1.3.0 to 1.4.0
Revision ID: 1.3.0
Revises:
"""
# revision identifiers, used by Alembic.
revision = '1.4.0'
down_revision = '1.3.0'
branch_labels = None
depends_on = None
from alembic import op
from db_meta import *
import os
from sqlalchemy.dialects import mysql
Session = sessionmaker()
def upgrade():
"""
update schema&data
"""
bind = op.get_bind()
session = Session(bind=bind)
# Alter column length of project name
op.alter_column('project', 'name', existing_type=sa.String(30), type_=sa.String(255), existing_nullable=False)
# Add columns in replication_policy table
op.add_column('replication_policy', sa.Column('filters', sa.String(1024)))
op.add_column('replication_policy', sa.Column('replicate_deletion', mysql.TINYINT(1), nullable=False, server_default='0'))
# create table replication_immediate_trigger
ReplicationImmediateTrigger.__table__.create(bind)
# Divided policies into unabled and enabled group
unenabled_policies = session.query(ReplicationPolicy).filter(ReplicationPolicy.enabled == 0)
enabled_policies = session.query(ReplicationPolicy).filter(ReplicationPolicy.enabled == 1)
# As projects aren't stored in database of Harbor, migrate all replication
# policies with manual trigger
if os.getenv('WITH_ADMIRAL', '') == 'true':
print ("deployed with admiral, migrating all replication policies with manual trigger")
enabled_policies.update({
ReplicationPolicy.enabled: 1,
ReplicationPolicy.cron_str: '{"kind":"Manual"}'
})
else:
# migrate enabeld policies
enabled_policies.update({
ReplicationPolicy.cron_str: '{"kind":"Immediate"}'
})
immediate_triggers = [ReplicationImmediateTrigger(
policy_id=policy.id,
namespace=session.query(Project).get(policy.project_id).name,
on_push=1,
on_deletion=1) for policy in enabled_policies]
session.add_all(immediate_triggers)
# migrate unenabled policies
unenabled_policies.update({
ReplicationPolicy.enabled: 1,
ReplicationPolicy.cron_str: '{"kind":"Manual"}'
})
op.drop_constraint('PRIMARY', 'properties', type_='primary')
op.create_unique_constraint('uq_properties_k', 'properties', ['k'])
op.execute('ALTER TABLE properties ADD id INT PRIMARY KEY AUTO_INCREMENT;')
session.commit()
def downgrade():
"""
Downgrade has been disabled.
"""

View File

@ -1,90 +0,0 @@
# Copyright Project Harbor Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""1.4.0 to 1.5.0
Revision ID: 1.5.0
Revises:
"""
# revision identifiers, used by Alembic.
revision = '1.5.0'
down_revision = '1.4.0'
branch_labels = None
depends_on = None
from alembic import op
from db_meta import *
from sqlalchemy.dialects import mysql
Session = sessionmaker()
def upgrade():
"""
update schema&data
"""
bind = op.get_bind()
session = Session(bind=bind)
# create table harbor_label
HarborLabel.__table__.create(bind)
# create table harbor_resource_label
HarborResourceLabel.__table__.create(bind)
# create user_group
UserGroup.__table__.create(bind)
# project member
op.drop_constraint('project_member_ibfk_1', 'project_member', type_='foreignkey')
op.drop_constraint('project_member_ibfk_2', 'project_member', type_='foreignkey')
op.drop_constraint('project_member_ibfk_3', 'project_member', type_='foreignkey')
op.drop_constraint('PRIMARY', 'project_member', type_='primary')
op.drop_index('user_id', 'project_member')
op.drop_index('role', 'project_member')
op.execute('ALTER TABLE project_member ADD id INT PRIMARY KEY AUTO_INCREMENT;')
op.alter_column('project_member', 'user_id', existing_type=sa.Integer, existing_nullable=False, new_column_name='entity_id')
op.alter_column('project_member', 'creation_time', existing_type=mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP"))
op.alter_column('project_member', 'update_time', existing_type=mysql.TIMESTAMP, server_default=sa.text("CURRENT_TIMESTAMP"), onupdate=sa.text("CURRENT_TIMESTAMP"))
op.add_column('project_member', sa.Column('entity_type', sa.String(1)))
session.query(ProjectMember).update({
ProjectMember.entity_type: 'u'
})
op.alter_column('project_member', 'entity_type', existing_type=sa.String(1), existing_nullable=True, nullable=False)
op.create_unique_constraint('unique_project_entity_type', 'project_member', ['project_id', 'entity_id', 'entity_type'])
# add job_uuid to replicationjob and img_scan_job
op.add_column('replication_job', sa.Column('job_uuid', sa.String(64)))
op.add_column('img_scan_job', sa.Column('job_uuid', sa.String(64)))
# add index to replication job
op.create_index('poid_status', 'replication_job', ['policy_id', 'status'])
# add index to img_scan_job
op.create_index('idx_status', 'img_scan_job', ['status'])
op.create_index('idx_digest', 'img_scan_job', ['digest'])
op.create_index('idx_uuid', 'img_scan_job', ['job_uuid'])
op.create_index('idx_repository_tag', 'img_scan_job', ['repository', 'tag'])
session.commit()
def downgrade():
"""
Downgrade has been disabled.
"""

View File

@ -1,191 +0,0 @@
# MariaDB database server configuration file.
#
# You can copy this file to one of:
# - "/etc/mysql/my.cnf" to set global options,
# - "~/.my.cnf" to set user-specific options.
#
# One can use all long options that the program supports.
# Run program with --help to get a list of available options and with
# --print-defaults to see which it would actually understand and use.
#
# For explanations see
# http://dev.mysql.com/doc/mysql/en/server-system-variables.html
# This will be passed to all mysql clients
# It has been reported that passwords should be enclosed with ticks/quotes
# escpecially if they contain "#" chars...
# Remember to edit /etc/mysql/debian.cnf when changing the socket location.
[client]
port = 3306
socket = /var/run/mysqld/mysqld.sock
# Here is entries for some specific programs
# The following values assume you have at least 32M ram
# This was formally known as [safe_mysqld]. Both versions are currently parsed.
[mysqld_safe]
socket = /var/run/mysqld/mysqld.sock
nice = 0
[mysqld]
skip-host-cache
#skip-name-resolve
#
# * Basic Settings
#
user = mysql
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
port = 3306
basedir = /usr
datadir = /var/lib/mysql
tmpdir = /tmp
lc_messages_dir = /usr/share/mysql
lc_messages = en_US
skip-external-locking
#
# Instead of skip-networking the default is now to listen only on
# localhost which is more compatible and is not less secure.
#bind-address = 127.0.0.1
#
# * Fine Tuning
#
max_connections = 100
connect_timeout = 5
wait_timeout = 600
max_allowed_packet = 16M
thread_cache_size = 128
sort_buffer_size = 4M
bulk_insert_buffer_size = 16M
tmp_table_size = 32M
max_heap_table_size = 32M
#
# * MyISAM
#
# This replaces the startup script and checks MyISAM tables if needed
# the first time they are touched. On error, make copy and try a repair.
myisam_recover_options = BACKUP
key_buffer_size = 128M
#open-files-limit = 2000
table_open_cache = 400
myisam_sort_buffer_size = 512M
concurrent_insert = 2
read_buffer_size = 2M
read_rnd_buffer_size = 1M
#
# * Query Cache Configuration
#
# Cache only tiny result sets, so we can fit more in the query cache.
query_cache_limit = 128K
query_cache_size = 64M
# for more write intensive setups, set to DEMAND or OFF
#query_cache_type = DEMAND
#
# * Logging and Replication
#
# Both location gets rotated by the cronjob.
# Be aware that this log type is a performance killer.
# As of 5.1 you can enable the log at runtime!
#general_log_file = /var/log/mysql/mysql.log
#general_log = 1
#
# Error logging goes to syslog due to /etc/mysql/conf.d/mysqld_safe_syslog.cnf.
#
# we do want to know about network errors and such
#log_warnings = 2
#
# Enable the slow query log to see queries with especially long duration
#slow_query_log[={0|1}]
slow_query_log_file = /var/log/mysql/mariadb-slow.log
long_query_time = 10
#log_slow_rate_limit = 1000
#log_slow_verbosity = query_plan
#log-queries-not-using-indexes
#log_slow_admin_statements
#
# The following can be used as easy to replay backup logs or for replication.
# note: if you are setting up a replication slave, see README.Debian about
# other settings you may need to change.
#server-id = 1
#report_host = master1
#auto_increment_increment = 2
#auto_increment_offset = 1
#log_bin = /var/log/mysql/mariadb-bin
#log_bin_index = /var/log/mysql/mariadb-bin.index
# not fab for performance, but safer
#sync_binlog = 1
expire_logs_days = 10
max_binlog_size = 100M
# slaves
#relay_log = /var/log/mysql/relay-bin
#relay_log_index = /var/log/mysql/relay-bin.index
#relay_log_info_file = /var/log/mysql/relay-bin.info
#log_slave_updates
#read_only
#
# If applications support it, this stricter sql_mode prevents some
# mistakes like inserting invalid dates etc.
#sql_mode = NO_ENGINE_SUBSTITUTION,TRADITIONAL
#
# * InnoDB
#
# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/.
# Read the manual for more InnoDB related options. There are many!
default_storage_engine = InnoDB
# you can't just change log file size, requires special procedure
#innodb_log_file_size = 50M
innodb_buffer_pool_size = 256M
innodb_log_buffer_size = 8M
innodb_file_per_table = 1
innodb_open_files = 400
innodb_io_capacity = 400
innodb_flush_method = O_DIRECT
#
# * Security Features
#
# Read the manual, too, if you want chroot!
# chroot = /var/lib/mysql/
#
# For generating SSL certificates I recommend the OpenSSL GUI "tinyca".
#
# ssl-ca=/etc/mysql/cacert.pem
# ssl-cert=/etc/mysql/server-cert.pem
# ssl-key=/etc/mysql/server-key.pem
#
# * Galera-related settings
#
[galera]
# Mandatory settings
#wsrep_on=ON
#wsrep_provider=
#wsrep_cluster_address=
#binlog_format=row
#default_storage_engine=InnoDB
#innodb_autoinc_lock_mode=2
#
# Allow server to accept connections on all interfaces.
#
#bind-address=0.0.0.0
#
# Optional setting
#wsrep_slave_threads=1
#innodb_flush_log_at_trx_commit=0
[mysqldump]
quick
quote-names
max_allowed_packet = 16M
[mysql]
#no-auto-rehash # faster start of mysql but no tab completition
[isamchk]
key_buffer = 16M
#
# * IMPORTANT: Additional settings that can override those from this file!
# The files must end with '.cnf', otherwise they'll be ignored.
#
!includedir /etc/my.cnf.d/

View File

@ -13,75 +13,32 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
source $PWD/db/util/mysql.sh
source $PWD/db/util/pgsql.sh
source $PWD/db/util/mysql_pgsql_1_5_0.sh
source $PWD/db/util/alembic.sh
set -e
ISMYSQL=false
ISPGSQL=false
ISNOTARY=false
ISCLAIR=false
cur_version=""
PGSQL_USR="postgres"
function init {
if [ "$(ls -A /var/lib/mysql)" ]; then
# As after the first success run, the data will be migrated to pgsql,
# the PG_VERSION should be in /var/lib/mysql if user repeats the UP command.
if [ -e '/var/lib/mysql/PG_VERSION' ]; then
ISPGSQL=true
elif [ -d '/var/lib/mysql/mysql' ]; then
ISMYSQL=true
if [ -d '/var/lib/mysql/notaryserver' ]; then
ISNOTARY=true
fi
fi
fi
if [ "$(ls -A /var/lib/postgresql/data)" ]; then
ISPGSQL=true
fi
if [ -d "/clair-db" ]; then
ISCLAIR=true
fi
if [ $ISMYSQL == false ] && [ $ISPGSQL == false ]; then
if [ $ISPGSQL == false ]; then
echo "No database has been mounted for the migration. Use '-v' to set it in 'docker run'."
exit 1
fi
if [ $ISMYSQL == true ]; then
# as for UP notary, user does not need to provide username and pwd.
# the check works for harbor DB only.
if [ $ISNOTARY == false ]; then
if [ -z "$DB_USR" -o -z "$DB_PWD" ]; then
echo "DB_USR or DB_PWD not set, exiting..."
exit 1
fi
launch_mysql $DB_USR $DB_PWD
else
launch_mysql root
fi
fi
if [ $ISPGSQL == true ]; then
if [ $ISCLAIR == true ]; then
launch_pgsql $PGSQL_USR "/clair-db"
else
launch_pgsql $PGSQL_USR
fi
launch_pgsql $PGSQL_USR
fi
}
function get_version {
if [ $ISMYSQL == true ]; then
result=$(get_version_mysql)
fi
if [ $ISPGSQL == true ]; then
result=$(get_version_pgsql $PGSQL_USR)
fi
@ -99,9 +56,6 @@ function version_le {
function backup {
echo "Performing backup..."
if [ $ISMYSQL == true ]; then
backup_mysql
fi
if [ $ISPGSQL == true ]; then
backup_pgsql
fi
@ -112,9 +66,6 @@ function backup {
function restore {
echo "Performing restore..."
if [ $ISMYSQL == true ]; then
restore_mysql
fi
if [ $ISPGSQL == true ]; then
restore_pgsql
fi
@ -125,9 +76,6 @@ function restore {
function validate {
echo "Performing test..."
if [ $ISMYSQL == true ]; then
test_mysql $DB_USR $DB_PWD
fi
if [ $ISPGSQL == true ]; then
test_pgsql $PGSQL_USR
fi
@ -137,13 +85,7 @@ function validate {
}
function upgrade {
if [ $ISNOTARY == true ];then
up_notary $PGSQL_USR
elif [ $ISCLAIR == true ];then
up_clair $PGSQL_USR
else
up_harbor $1
fi
up_harbor $1
}
function up_harbor {
@ -159,58 +101,13 @@ function up_harbor {
exit 0
fi
# $cur_version <='1.5.0', $target_version <='1.5.0', it needs to call mysql upgrade.
if version_le $cur_version '1.5.0' && version_le $target_version '1.5.0'; then
if [ $ISMYSQL != true ]; then
echo "Please mount the database volume to /var/lib/mysql, then to run the upgrade again."
return 1
else
alembic_up mysql $target_version
return $?
fi
fi
# $cur_version > '1.5.0', $target_version > '1.5.0', it needs to call pgsql upgrade.
if ! version_le $cur_version '1.5.0' && ! version_le $target_version '1.5.0'; then
if [ $ISPGSQL != true ]; then
echo "Please mount the database volume to /var/lib/postgresql/data, then to run the upgrade again."
return 1
else
alembic_up pgsql $target_version
return $?
fi
fi
# $cur_version <='1.5.0', $target_version >'1.5.0', it needs to upgrade to $cur_version.mysql => 1.5.0.mysql => 1.5.0.pgsql => target_version.pgsql.
if version_le $cur_version '1.5.0' && ! version_le $target_version '1.5.0'; then
if [ $ISMYSQL != true ]; then
echo "Please make sure to mount the correct the data volume."
return 1
else
launch_pgsql $PGSQL_USR
mysql_2_pgsql_1_5_0 $PGSQL_USR
# Pgsql won't run the init scripts as the migration script has already created the PG_VERSION,
# which is a flag that used by entrypoint.sh of pgsql to define whether to run init scripts to create harbor DBs.
# Here to force init notary DBs just align with new harbor launch process.
# Otherwise, user could get db failure when to launch harbor with notary as no data was created.
psql -U $PGSQL_USR -f /harbor-migration/db/schema/notaryserver_init.pgsql
psql -U $PGSQL_USR -f /harbor-migration/db/schema/notarysigner_init.pgsql
stop_mysql $DB_USR $DB_PWD
## it needs to call the alembic_up to target, disable it as it's now unsupported.
alembic_up pgsql $target_version
stop_pgsql $PGSQL_USR
rm -rf /var/lib/mysql/*
cp -rf $PGDATA/* /var/lib/mysql
## Chmod 700 to DB data directory
chmod 700 /var/lib/mysql
return 0
fi
if [ $ISPGSQL != true ]; then
echo "Please mount the database volume to /var/lib/postgresql/data, then to run the upgrade again."
return 1
else
alembic_up pgsql $target_version
return $?
fi
echo "Unsupported DB upgrade from $cur_version to $target_version, please check the inputs."
@ -253,4 +150,4 @@ function main {
esac
}
main "$@"
main "$@"

View File

@ -1,8 +0,0 @@
\c notaryserver;
ALTER TABLE tuf_files OWNER TO server;
ALTER SEQUENCE tuf_files_id_seq OWNER TO server;
ALTER TABLE change_category OWNER TO server;
ALTER TABLE changefeed OWNER TO server;
ALTER SEQUENCE changefeed_id_seq OWNER TO server;
ALTER TABLE schema_migrations OWNER TO server;

View File

@ -1,39 +0,0 @@
\c notaryserver;
CREATE TABLE "tuf_files" (
"id" serial PRIMARY KEY,
"created_at" timestamp NULL DEFAULT NULL,
"updated_at" timestamp NULL DEFAULT NULL,
"deleted_at" timestamp NULL DEFAULT NULL,
"gun" varchar(255) NOT NULL,
"role" varchar(255) NOT NULL,
"version" integer NOT NULL,
"data" bytea NOT NULL,
"sha256" char(64) DEFAULT NULL,
UNIQUE ("gun","role","version")
);
CREATE INDEX tuf_files_sha256_idx ON tuf_files(sha256);
CREATE TABLE "change_category" (
"category" VARCHAR(20) PRIMARY KEY
);
CREATE TABLE "changefeed" (
"id" serial PRIMARY KEY,
"created_at" timestamp DEFAULT CURRENT_TIMESTAMP,
"gun" varchar(255) NOT NULL,
"version" integer NOT NULL,
"sha256" CHAR(64) DEFAULT NULL,
"category" VARCHAR(20) NOT NULL DEFAULT 'update' REFERENCES "change_category"
);
CREATE INDEX "idx_changefeed_gun" ON "changefeed" ("gun");
CREATE TABLE "schema_migrations" (
"version" int PRIMARY KEY
);
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO server;

View File

@ -1,4 +0,0 @@
CREATE DATABASE notaryserver;
CREATE USER server;
alter user server with encrypted password 'password';
GRANT ALL PRIVILEGES ON DATABASE notaryserver TO server;

View File

@ -1,5 +0,0 @@
\c notarysigner;
ALTER TABLE private_keys OWNER TO signer;
ALTER SEQUENCE private_keys_id_seq OWNER TO signer;
ALTER TABLE schema_migrations OWNER TO signer;

View File

@ -1,27 +0,0 @@
\c notarysigner;
CREATE TABLE "private_keys" (
"id" serial PRIMARY KEY,
"created_at" timestamp NULL DEFAULT NULL,
"updated_at" timestamp NULL DEFAULT NULL,
"deleted_at" timestamp NULL DEFAULT NULL,
"key_id" varchar(255) NOT NULL,
"encryption_alg" varchar(255) NOT NULL,
"keywrap_alg" varchar(255) NOT NULL,
"algorithm" varchar(50) NOT NULL,
"passphrase_alias" varchar(50) NOT NULL,
"public" bytea NOT NULL,
"private" bytea NOT NULL,
"gun" varchar(255) NOT NULL,
"role" varchar(255) NOT NULL,
"last_used" timestamp NULL DEFAULT NULL,
CONSTRAINT "key_id" UNIQUE ("key_id"),
CONSTRAINT "key_id_2" UNIQUE ("key_id","algorithm")
);
CREATE TABLE "schema_migrations" (
"version" int PRIMARY KEY
);
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO signer;

View File

@ -1,4 +0,0 @@
CREATE DATABASE notarysigner;
CREATE USER signer;
alter user signer with encrypted password 'password';
GRANT ALL PRIVILEGES ON DATABASE notarysigner TO signer;

View File

@ -1,332 +0,0 @@
CREATE DATABASE registry ENCODING 'UTF8';
\c registry;
create table access (
access_id int PRIMARY KEY NOT NULL,
access_code char(1),
comment varchar (30)
);
create table role (
role_id int PRIMARY KEY NOT NULL,
role_mask int DEFAULT 0 NOT NULL,
role_code varchar(20),
name varchar (20)
);
/*
role mask is used for future enhancement when a project member can have multi-roles
currently set to 0
*/
create table harbor_user (
user_id int PRIMARY KEY NOT NULL,
username varchar(255),
email varchar(255),
password varchar(40) NOT NULL,
realname varchar (255) NOT NULL,
comment varchar (30),
deleted smallint DEFAULT 0 NOT NULL,
reset_uuid varchar(40) DEFAULT NULL,
salt varchar(40) DEFAULT NULL,
sysadmin_flag smallint DEFAULT 0 NOT NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
UNIQUE (username),
UNIQUE (email)
);
CREATE FUNCTION update_update_time_at_column() RETURNS trigger
LANGUAGE plpgsql
AS $$
BEGIN
NEW.update_time = NOW();
RETURN NEW;
END;
$$;
/*
The trigger for harbor_user and project should be added by alembic pgsql v1.6.0,
put them here is to reduce DB operation, make all things be done in the creation of DB.
*/
CREATE TRIGGER harbor_user_update_time_at_modtime BEFORE UPDATE ON harbor_user FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table project (
project_id int PRIMARY KEY NOT NULL,
owner_id int NOT NULL,
/*
The max length of name controlled by API is 30,
and 11 is reserved for marking the deleted project.
*/
name varchar (255) NOT NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
deleted smallint DEFAULT 0 NOT NULL,
/*
FOREIGN KEY (owner_id) REFERENCES harbor_user(user_id),
*/
UNIQUE (name)
);
CREATE TRIGGER project_update_time_at_modtime BEFORE UPDATE ON project FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table project_member (
id int NOT NULL,
project_id int NOT NULL,
entity_id int NOT NULL,
/*
entity_type indicates the type of member,
u for user, g for user group
*/
entity_type char(1) NOT NULL,
role int NOT NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
PRIMARY KEY (id),
CONSTRAINT unique_project_entity_type UNIQUE (project_id, entity_id, entity_type)
);
CREATE TRIGGER project_member_update_time_at_modtime BEFORE UPDATE ON project_member FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table project_metadata (
id int NOT NULL,
project_id int NOT NULL,
name varchar(255) NOT NULL,
value varchar(255),
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
deleted smallint DEFAULT 0 NOT NULL,
PRIMARY KEY (id),
CONSTRAINT unique_project_id_and_name UNIQUE (project_id,name)
/*
FOREIGN KEY (project_id) REFERENCES project(project_id)
*/
);
CREATE TRIGGER project_metadata_update_time_at_modtime BEFORE UPDATE ON project_metadata FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table user_group (
id int NOT NULL,
group_name varchar(255) NOT NULL,
group_type smallint default 0,
ldap_group_dn varchar(512) NOT NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
PRIMARY KEY (id)
);
CREATE TRIGGER user_group_update_time_at_modtime BEFORE UPDATE ON user_group FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table access_log (
log_id int NOT NULL,
username varchar (255) NOT NULL,
project_id int NOT NULL,
repo_name varchar (256),
repo_tag varchar (128),
GUID varchar(64),
operation varchar(20) NOT NULL,
op_time timestamp default CURRENT_TIMESTAMP,
primary key (log_id)
);
CREATE INDEX pid_optime ON access_log (project_id, op_time);
create table repository (
repository_id int NOT NULL,
name varchar(255) NOT NULL,
project_id int NOT NULL,
description text,
pull_count int DEFAULT 0 NOT NULL,
star_count int DEFAULT 0 NOT NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
primary key (repository_id),
UNIQUE (name)
);
CREATE TRIGGER repository_update_time_at_modtime BEFORE UPDATE ON repository FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table replication_policy (
id int NOT NULL,
name varchar(256),
project_id int NOT NULL,
target_id int NOT NULL,
enabled SMALLINT NOT NULL DEFAULT 1,
description text,
deleted SMALLINT DEFAULT 0 NOT NULL,
cron_str varchar(256),
filters varchar(1024),
replicate_deletion SMALLINT DEFAULT 0 NOT NULL,
start_time timestamp NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
PRIMARY KEY (id)
);
CREATE TRIGGER replication_policy_update_time_at_modtime BEFORE UPDATE ON replication_policy FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table replication_target (
id int NOT NULL,
name varchar(64),
url varchar(64),
username varchar(255),
password varchar(128),
/*
target_type indicates the type of target registry,
0 means it's a harbor instance,
1 means it's a regulart registry
*/
target_type SMALLINT NOT NULL DEFAULT 0,
insecure SMALLINT NOT NULL DEFAULT 0,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
PRIMARY KEY (id)
);
CREATE TRIGGER replication_target_update_time_at_modtime BEFORE UPDATE ON replication_target FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table replication_job (
id int NOT NULL,
status varchar(64) NOT NULL,
policy_id int NOT NULL,
repository varchar(256) NOT NULL,
operation varchar(64) NOT NULL,
tags varchar(16384),
/*
New job service only records uuid, for compatibility in this table both IDs are stored.
*/
job_uuid varchar(64),
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
PRIMARY KEY (id)
);
CREATE INDEX policy ON replication_job (policy_id);
CREATE INDEX poid_uptime ON replication_job (policy_id, update_time);
CREATE INDEX poid_status ON replication_job (policy_id, status);
CREATE TRIGGER replication_job_update_time_at_modtime BEFORE UPDATE ON replication_job FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table replication_immediate_trigger (
id int NOT NULL,
policy_id int NOT NULL,
namespace varchar(256) NOT NULL,
on_push SMALLINT NOT NULL DEFAULT 0,
on_deletion SMALLINT NOT NULL DEFAULT 0,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
PRIMARY KEY (id)
);
CREATE TRIGGER replication_immediate_trigger_update_time_at_modtime BEFORE UPDATE ON replication_immediate_trigger FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table img_scan_job (
id int NOT NULL,
status varchar(64) NOT NULL,
repository varchar(256) NOT NULL,
tag varchar(128) NOT NULL,
digest varchar(128),
/*
New job service only records uuid, for compatibility in this table both IDs are stored.
*/
job_uuid varchar(64),
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
PRIMARY KEY (id)
);
CREATE INDEX idx_status ON img_scan_job (status);
CREATE INDEX idx_digest ON img_scan_job (digest);
CREATE INDEX idx_uuid ON img_scan_job (job_uuid);
CREATE INDEX idx_repository_tag ON img_scan_job (repository,tag);
CREATE TRIGGER img_scan_job_update_time_at_modtime BEFORE UPDATE ON img_scan_job FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table img_scan_overview (
id int NOT NULL,
image_digest varchar(128) NOT NULL,
scan_job_id int NOT NULL,
/* 0 indicates none, the higher the number, the more severe the status */
severity int NOT NULL default 0,
/* the json string to store components severity status, currently use a json to be more flexible and avoid creating additional tables. */
components_overview varchar(2048),
/* primary key for querying details, in clair it should be the name of the "top layer" */
details_key varchar(128),
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
PRIMARY KEY(id),
UNIQUE(image_digest)
);
CREATE TRIGGER img_scan_overview_update_time_at_modtime BEFORE UPDATE ON img_scan_overview FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table clair_vuln_timestamp (
id int NOT NULL,
namespace varchar(128) NOT NULL,
last_update timestamp NOT NULL,
PRIMARY KEY(id),
UNIQUE(namespace)
);
create table properties (
id int NOT NULL,
k varchar(64) NOT NULL,
v varchar(128) NOT NULL,
PRIMARY KEY(id),
UNIQUE (k)
);
create table harbor_label (
id int NOT NULL,
name varchar(128) NOT NULL,
description text,
color varchar(16),
/*
's' for system level labels
'u' for user level labels
*/
level char(1) NOT NULL,
/*
'g' for global labels
'p' for project labels
*/
scope char(1) NOT NULL,
project_id int,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
PRIMARY KEY(id),
CONSTRAINT unique_name_and_scope UNIQUE (name,scope,project_id)
);
CREATE TRIGGER harbor_label_update_time_at_modtime BEFORE UPDATE ON harbor_label FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table harbor_resource_label (
id int NOT NULL,
label_id int NOT NULL,
/*
the resource_id is the ID of project when the resource_type is p
the resource_id is the ID of repository when the resource_type is r
*/
resource_id int,
/*
the resource_name is the name of image when the resource_type is i
*/
resource_name varchar(256),
/*
'p' for project
'r' for repository
'i' for image
*/
resource_type char(1) NOT NULL,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP,
PRIMARY KEY(id),
CONSTRAINT unique_label_resource UNIQUE (label_id,resource_id, resource_name, resource_type)
);
CREATE TRIGGER harbor_resource_label_update_time_at_modtime BEFORE UPDATE ON harbor_resource_label FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
CREATE TABLE IF NOT EXISTS alembic_version (
version_num varchar(32) NOT NULL
);

View File

@ -1,129 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#!/usr/bin/python
"""
The script is to export the existing projects of Harbor into a file.
It's only for VIC 1.2 migration.
"""
import json
import fileinput
from optparse import OptionParser
import os
import MySQLdb
import sys
class Parameters(object):
def __init__(self):
self.dbuser = ''
self.dbpwd = ''
self.exportpath = ''
self.init_from_input()
@staticmethod
def parse_input():
usage = "usage: %prog [options] <dbuser> <dbpwd> <exportpath>"
parser = OptionParser(usage)
parser.add_option("-u", "--dbuser", dest="dbuser", help="db user")
parser.add_option("-p", "--dbpwd", dest="dbpwd", help="db password")
parser.add_option("-o", "--exportpath", dest="exportpath", help="the path of exported json file")
(options, args) = parser.parse_args()
return (options.dbuser, options.dbpwd, options.exportpath)
def init_from_input(self):
(self.dbuser, self.dbpwd, self.exportpath) = Parameters.parse_input()
class Project:
def __init__(self, project_id, name, public):
self.project_id = project_id
self.project_name = name
if public == 0:
self.public = "false"
elif public == 1:
self.public = "true"
else:
self.public = "false"
class HarborUtil:
def __init__(self, dbuser, dbpwd):
self.serverName = 'localhost'
self.user = dbuser
self.password = dbpwd
self.port = '3306'
self.subDB = 'registry'
self.db = None
self.cursor = None
def connect(self):
try:
self.db = MySQLdb.connect(host=self.serverName, user=self.user,
passwd=self.password, db=self.subDB)
self.cursor = self.db.cursor()
except Exception, e:
raise Exception(e)
def close(self):
try:
self.cursor.close()
self.db.close()
except Exception, e:
print str(e)
def get_projects(self):
projects = []
try:
query = "SELECT project_id, name, public from registry.project where deleted=0"
self.cursor.execute(query)
self.cursor.fetchall()
for result in self.cursor:
projects.append(Project(int(result[0]), result[1], result[2]))
return projects
except Exception, e:
raise Exception(e)
def delfile(src):
if not os.path.exists(src):
return
try:
os.remove(src)
except Exception, e:
raise Exception("unable to delete file: %s, error: %s" % (src, str(e)))
def main():
commandline_input = Parameters()
harbor = HarborUtil(commandline_input.dbuser, commandline_input.dbpwd)
try:
harbor.connect()
projects = harbor.get_projects()
if len(projects) == 0:
return
harbor_projects_json = commandline_input.exportpath + '/harbor_projects.json'
delfile(harbor_projects_json)
with open(harbor_projects_json, 'w') as outfile:
json.dump({'projects': [project.__dict__ for project in projects]}, outfile, sort_keys=True, indent=4)
except Exception, e:
print e
sys.exit(1)
finally:
harbor.close()
if __name__ == '__main__':
main()

View File

@ -1,127 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#!/usr/bin/python
"""
The script is to import the projects of source file into Admiral and save the results into mapprojectsfile.
It's only for VIC 1.2 migration.
"""
import json
from optparse import OptionParser
import os
import urllib2, ssl
import sys
import logging
import logging.config
logging.basicConfig(filename="import_project.log", level=logging.INFO)
logger = logging.getLogger()
class Parameters(object):
def __init__(self):
self.admiral_endpoint = ''
self.tokenfile = ''
self.projectsfile = ''
self.mapprojectsfile = ''
self.init_from_input()
@staticmethod
def parse_input():
usage = "usage: %prog [options] <admiralendpoint> <tokenfile> <projectsfile>"
parser = OptionParser(usage)
parser.add_option("-a", "--admiralendpoint", dest="admiral_endpoint", help="admiral endpoint")
parser.add_option("-t", "--tokenfile", dest="tokenfile", help="the path of token file")
parser.add_option("-f", "--projectsfile", dest="projectsfile", help="the path of exported json file")
parser.add_option("-m", "--mapprojectsfile", dest="mapprojectsfile", help="the path of output projects file for mapping project id")
(options, args) = parser.parse_args()
return (options.admiral_endpoint, options.tokenfile, options.projectsfile, options.mapprojectsfile)
def init_from_input(self):
(self.admiral_endpoint, self.tokenfile, self.projectsfile, self.mapprojectsfile) = Parameters.parse_input()
class Project:
def __init__(self, project_id, name, public):
self.project_id = project_id
self.project_name = name
self.public = public
self.index_id = ''
class Admiral:
def __init__(self, admiral_url, token):
self.admiral_url = admiral_url + '/projects'
self.token = token
def __import_project(self, project, retry=True):
project_data = json.dumps({ "name": project.project_name, "isPublic": project.public,
"customProperties": {"__enableContentTrust": False, "__preventVulnerableImagesFromRunning":False,
"__preventVulnerableImagesFromRunningSeverity":"high", "__automaticallyScanImagesOnPush":False }})
data_len = len(project_data)
request = urllib2.Request(self.admiral_url, project_data)
request.add_header('x-xenon-auth-token', self.token)
request.add_header('Content-Type', 'application/json')
request.add_header('Content-Length', data_len)
try:
response = urllib2.urlopen(request, context=ssl._create_unverified_context())
response_obj = response.read()
project.index_id = json.loads(response_obj)['customProperties']['__projectIndex']
except Exception, e:
if not retry:
logger.error("failed to import project: %s, admiral_endpoint: %s, error: %s " % (project.project_name, self.admiral_url, str(e)))
return
self.__import_project(project, False)
def import_project(self, projects):
for project in projects:
self.__import_project(project)
def main():
commandline_input = Parameters()
try:
if not os.path.exists(commandline_input.projectsfile):
raise Exception('Error: %s does not exist' % commandline_input.projectsfile)
if not os.path.exists(commandline_input.tokenfile):
raise Exception('Error: %s does not exist' % commandline_input.tokenfile)
with open(commandline_input.tokenfile, 'r') as f:
token = f.readlines()
if len(token) == 0:
raise Exception('No token found in the properties file %s' % commandline_input.tokenfile)
admiral = Admiral(commandline_input.admiral_endpoint, token[0])
with open(commandline_input.projectsfile, 'r') as project_data_file:
project_data = json.load(project_data_file)
projects_import_list = []
for item in project_data['projects']:
projects_import_list.append(Project(item['project_id'], item['project_name'], item['public']))
admiral.import_project(projects_import_list)
with open(commandline_input.mapprojectsfile, 'w') as outfile:
json.dump({'map_projects': [project.__dict__ for project in projects_import_list]}, outfile, sort_keys=True, indent=4)
except Exception, e:
logger.error("failed to import project, admiral_endpoint: %s, error: %s " % (commandline_input.admiral_endpoint, str(e)))
sys.exit(1)
if __name__ == '__main__':
main()

View File

@ -1,247 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
The script is to map the project ID of Harbor and Admiral, and only for VIC 1.2 migration.
In VIC 1.2, proejct is managed by Admiral rather than Harbor, as part of migration,
it needs to unify the proejct ID of Admiral and Harbor.
"""
import json
import fileinput
from optparse import OptionParser
import os
import MySQLdb
import sys
class Parameters(object):
def __init__(self):
self.dbuser = ''
self.dbpwd = ''
self.mapprojectsfile = ''
self.init_from_input()
@staticmethod
def parse_input():
usage = \
'usage: %prog [options] <dbuser> <dbpwd> <mapprojectsfile>'
parser = OptionParser(usage)
parser.add_option('-u', '--dbuser', dest='dbuser',
help='db user')
parser.add_option('-p', '--dbpwd', dest='dbpwd',
help='db password')
parser.add_option('-m', '--mapprojectsfile',
dest='mapprojectsfile',
help='the path of mapping projects file')
(options, args) = parser.parse_args()
return (options.dbuser, options.dbpwd, options.mapprojectsfile)
def init_from_input(self):
(self.dbuser, self.dbpwd, self.mapprojectsfile) = \
Parameters.parse_input()
class AccessLog:
def __init__(self, log_id, project_id):
self.log_id = log_id
self.project_id = project_id
class Repository:
def __init__(self, repository_id, project_id):
self.repository_id = repository_id
self.project_id = project_id
class ReplicationPolicy:
def __init__(self, replication_policy_id, project_id):
self.replication_policy_id = replication_policy_id
self.project_id = project_id
class Project:
def __init__(
self,
project_id,
name,
index_id,
):
self.project_id = project_id
self.project_name = name
self.index_id = index_id
class HarborUtil:
def __init__(self, dbuser, dbpwd):
self.serverName = 'localhost'
self.user = dbuser
self.password = dbpwd
self.port = '3306'
self.subDB = 'registry'
self.db = None
self.cursor = None
def connect(self):
try:
self.db = MySQLdb.connect(host=self.serverName,
user=self.user, passwd=self.password, db=self.subDB)
self.db.autocommit(False)
self.cursor = self.db.cursor()
except Exception, e:
raise Exception(e)
def close(self):
try:
self.db.commit()
self.cursor.close()
self.db.close()
except Exception, e:
print str(e)
def enable_foreign_key_check(self):
try:
self.cursor.execute('SET FOREIGN_KEY_CHECKS=1')
except Exception, e:
print str(e)
def disable_foreign_key_check(self):
try:
self.cursor.execute('SET FOREIGN_KEY_CHECKS=0')
except Exception, e:
print str(e)
def get_index_id(self, projects, project_id):
for project in projects:
if project.project_id == project_id:
return project.index_id
return ''
def update_access_log_table(self, projects):
access_logs = []
try:
query_sccess_log = \
'SELECT log_id, project_id from registry.access_log'
self.cursor.execute(query_sccess_log)
self.cursor.fetchall()
for result in self.cursor:
access_logs.append(AccessLog(result[0], result[1]))
except Exception, e:
raise Exception(e)
for item in access_logs:
index_id = self.get_index_id(projects, item.project_id)
if index_id != '':
try:
update_access_log_project_id = \
'UPDATE registry.access_log SET project_id=%s where log_id=%s' \
% (index_id, item.log_id)
self.cursor.execute(update_access_log_project_id)
except Exception, e:
raise Exception(e)
def update_repository_table(self, projects):
repositories = []
try:
query_repository = \
'SELECT repository_id, project_id from registry.repository'
self.cursor.execute(query_repository)
self.cursor.fetchall()
for result in self.cursor:
repositories.append(Repository(result[0], result[1]))
except Exception, e:
raise Exception(e)
for item in repositories:
index_id = self.get_index_id(projects, item.project_id)
if index_id != '':
try:
update_repository_project_id = \
'UPDATE registry.repository SET project_id=%s where repository_id=%s' \
% (index_id, item.repository_id)
self.cursor.execute(update_repository_project_id)
except Exception, e:
raise Exception(e)
def update_replication_policy_table(self, projects):
replication_policies = []
try:
query_replication_policy = \
'SELECT id, project_id from registry.replication_policy'
self.cursor.execute(query_replication_policy)
self.cursor.fetchall()
for result in self.cursor:
replication_policies.append(ReplicationPolicy(result[0],
result[1]))
except Exception, e:
raise Exception(e)
for item in replication_policies:
index_id = self.get_index_id(projects, item.project_id)
if index_id != '':
try:
update_replication_policy_id = \
'UPDATE registry.replication_policy SET project_id=%s where id=%s' \
% (index_id, item.replication_policy_id)
self.cursor.execute(update_replication_policy_id)
except Exception, e:
raise Exception(e)
def main():
commandline_input = Parameters()
harbor = HarborUtil(commandline_input.dbuser,
commandline_input.dbpwd)
try:
harbor.connect()
harbor.disable_foreign_key_check()
with open(commandline_input.mapprojectsfile, 'r') as \
project_mapping_file:
project_mapping_data = json.load(project_mapping_file)
projects_mapping_list = []
for item in project_mapping_data['map_projects']:
projects_mapping_list.append(Project(item['project_id'],
item['project_name'], item['index_id']))
harbor.update_access_log_table(projects_mapping_list)
harbor.update_repository_table(projects_mapping_list)
harbor.update_replication_policy_table(projects_mapping_list)
except Exception, e:
print e
sys.exit(1)
finally:
harbor.enable_foreign_key_check()
harbor.close()
if __name__ == '__main__':
main()

View File

@ -20,14 +20,7 @@ function alembic_up {
local db_type="$1"
local target_version="$2"
if [ $db_type = "mysql" ]; then
export PYTHONPATH=/harbor-migration/db/alembic/mysql
source /harbor-migration/db/alembic/mysql/alembic.tpl > /harbor-migration/db/alembic/mysql/alembic.ini
echo "Performing upgrade $target_version..."
alembic -c /harbor-migration/db/alembic/mysql/alembic.ini current
alembic -c /harbor-migration/db/alembic/mysql/alembic.ini upgrade $target_version
alembic -c /harbor-migration/db/alembic/mysql/alembic.ini current
elif [ $db_type = "pgsql" ]; then
if [ $db_type = "pgsql" ]; then
export PYTHONPATH=/harbor-migration/db/alembic/postgres
echo "TODO: add support for pgsql."
source /harbor-migration/db/alembic/postgres/alembic.tpl > /harbor-migration/db/alembic/postgres/alembic.ini
@ -36,9 +29,9 @@ function alembic_up {
alembic -c /harbor-migration/db/alembic/postgres/alembic.ini upgrade $target_version
alembic -c /harbor-migration/db/alembic/postgres/alembic.ini current
else
echo "Unsupported DB type."
echo "Unsupported DB type: $db_type"
exit 1
fi
echo "Upgrade performed."
}
}

View File

@ -1,105 +0,0 @@
#!/bin/bash
# Copyright Project Harbor Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -e
DBCNF="-hlocalhost -u${DB_USR}"
function launch_mysql {
set +e
local usr="$1"
local pwd="$2"
if [ ! -z "$pwd" ]; then
export MYSQL_PWD="${DB_PWD}"
fi
echo 'Trying to start mysql server...'
chown -R 10000:10000 /var/lib/mysql
mysqld &
echo 'Waiting for MySQL start...'
for i in {60..0}; do
if [ -z "$pwd" ]; then
mysqladmin -u$usr processlist >/dev/null 2>&1
else
mysqladmin -u$usr -p$pwd processlist >/dev/null 2>&1
fi
if [ $? -eq 0 ]; then
break
fi
sleep 1
done
set -e
if [ "$i" -eq 0 ]; then
echo "timeout. Can't run mysql server."
return 1
fi
return 0
}
function test_mysql {
set +e
launch_mysql $DB_USR $DB_PWD
if [ $? -eq 0 ]; then
echo "DB test failed."
exit 0
else
echo "DB test success."
exit 1
fi
set -e
}
function stop_mysql {
if [ -z $2 ]; then
mysqladmin -u$1 shutdown
else
mysqladmin -u$1 -p$DB_PWD shutdown
fi
sleep 1
}
function get_version_mysql {
local cur_version=""
set +e
if [[ $(mysql $DBCNF -N -s -e "select count(*) from information_schema.tables \
where table_schema='registry' and table_name='alembic_version';") -eq 0 ]]; then
echo "table alembic_version does not exist. Trying to initial alembic_version."
mysql $DBCNF < /harbor-migration/db/alembic/mysql/alembic.sql
#compatible with version 0.1.0 and 0.1.1
if [[ $(mysql $DBCNF -N -s -e "select count(*) from information_schema.tables \
where table_schema='registry' and table_name='properties'") -eq 0 ]]; then
echo "table properties does not exist. The version of registry is 0.1.0"
cur_version='0.1.0'
else
echo "The version of registry is 0.1.1"
mysql $DBCNF -e "insert into registry.alembic_version values ('0.1.1')"
cur_version='0.1.1'
fi
else
cur_version=$(mysql $DBCNF -N -s -e "select * from registry.alembic_version;")
fi
set -e
echo $cur_version
}
# It's only for registry, leverage the code from 1.5.0
function backup_mysql {
mysqldump $DBCNF --add-drop-database --databases registry > /harbor-migration/backup/registry.sql
}
# It's only for registry, leverage the code from 1.5.0
function restore_mysql {
mysql $DBCNF < /harbor-migration/backup/registry.sql
}

View File

@ -1,117 +0,0 @@
#!/bin/bash
# Copyright Project Harbor Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
source $PWD/db/util/pgsql.sh
source $PWD/db/util/alembic.sh
set -e
DBCNF="-hlocalhost -u${DB_USR}"
function mysql_2_pgsql_1_5_0 {
alembic_up mysql '1.5.0'
## dump 1.5.0-mysql
mysqldump --compatible=postgresql --no-create-info --complete-insert --default-character-set=utf8 --databases registry > /harbor-migration/db/schema/registry.mysql
## migrate 1.5.0-mysql to 1.5.0-pqsql.
python /harbor-migration/db/util/mysql_pgsql_data_converter.py /harbor-migration/db/schema/registry.mysql /harbor-migration/db/schema/registry_insert_data.pgsql
## import 1.5.0-pgsql into pgsql.
psql -U $1 -f /harbor-migration/db/schema/registry_create_tables.pgsql
psql -U $1 -f /harbor-migration/db/schema/registry_insert_data.pgsql
}
# This function is only for <= 1.5.0 to migrate notary db from mysql to pgsql.
function up_notary {
set +e
if [[ $(mysql $DBCNF -N -s -e "select count(*) from information_schema.tables \
where table_schema='notaryserver' and table_name='tuf_files'") -eq 0 ]]; then
echo "no content trust data needs to be updated."
return 0
else
## it's not a clean notary db, so cannot execute the create tables step.
## fail at here to call user to clean DB tables, then to run notary db migration.
if [[ $(psql -U $1 -d notaryserver -t -c "select count(*) from pg_tables where schemaname='public';") -ne 0 ]]; then
cat >&2 <<-EOF
*******************************************************************************
WARNING: Notary migration will only allow anyone haven't migrated notary or
launched harbor yet.
If you want to migrate notary data, please delete all the notaryserver
and notarysigner DB tables in pgsql manually firstly.
*******************************************************************************
EOF
exit 0
fi
set -e
mysqldump --skip-triggers --compact --no-create-info --skip-quote-names --hex-blob --compatible=postgresql --default-character-set=utf8 --databases notaryserver > /harbor-migration/db/schema/notaryserver.mysql.tmp
sed "s/0x\([0-9A-F]*\)/decode('\1','hex')/g" /harbor-migration/db/schema/notaryserver.mysql.tmp > /harbor-migration/db/schema/notaryserver_insert_data.mysql
mysqldump --skip-triggers --compact --no-create-info --skip-quote-names --hex-blob --compatible=postgresql --default-character-set=utf8 --databases notarysigner > /harbor-migration/db/schema/notarysigner.mysql.tmp
sed "s/0x\([0-9A-F]*\)/decode('\1','hex')/g" /harbor-migration/db/schema/notarysigner.mysql.tmp > /harbor-migration/db/schema/notarysigner_insert_data.mysql
python /harbor-migration/db/util/mysql_pgsql_data_converter.py /harbor-migration/db/schema/notaryserver_insert_data.mysql /harbor-migration/db/schema/notaryserver_insert_data.pgsql
python /harbor-migration/db/util/mysql_pgsql_data_converter.py /harbor-migration/db/schema/notarysigner_insert_data.mysql /harbor-migration/db/schema/notarysigner_insert_data.pgsql
# launch_pgsql $PGSQL_USR
psql -U $1 -f /harbor-migration/db/schema/notaryserver_create_tables.pgsql
psql -U $1 -f /harbor-migration/db/schema/notaryserver_insert_data.pgsql
psql -U $1 -f /harbor-migration/db/schema/notaryserver_alter_tables.pgsql
psql -U $1 -f /harbor-migration/db/schema/notarysigner_create_tables.pgsql
psql -U $1 -f /harbor-migration/db/schema/notarysigner_insert_data.pgsql
psql -U $1 -f /harbor-migration/db/schema/notarysigner_alter_tables.pgsql
stop_mysql root
stop_pgsql $1
fi
}
function up_clair {
# clair DB info: user: 'postgres' database: 'postgres'
set +e
if [[ $(psql -U $1 -d postgres -t -c "select count(*) from vulnerability;") -eq 0 ]]; then
echo "no vulnerability data needs to be updated."
return 0
else
pg_dump -U postgres postgres > /harbor-migration/db/schema/clair.pgsql
stop_pgsql postgres "/clair-db"
# it's harbor DB on pgsql.
launch_pgsql $1
## it's not a clean clair db, so cannot execute the import step.
## fail at here to call user to clean DB, then to run clair db migration.
if [[ $(psql -U $1 -d postgres -t -c "select count(*) from pg_tables where schemaname='public';") -ne 0 ]]; then
cat >&2 <<-EOF
*******************************************************************************
WARNING: Clair migration will only allow anyone haven't migrated clair or
launched harbor yet.
If you want to migrate clair data, please delete all the clair DB tables
in pgsql manually firstly.
*******************************************************************************
EOF
exit 0
fi
set -e
psql -U $1 -f /harbor-migration/db/schema/clair.pgsql
stop_pgsql $1
fi
}

View File

@ -1,161 +0,0 @@
#!/usr/bin/env python
import re
import sys
import os
import time
import subprocess
def convert_registry_db(mysql_dump_file, pgsql_dump_file):
mysql_dump = open(mysql_dump_file)
pgsql_dump = open(pgsql_dump_file, "w")
insert_lines = []
for i, line in enumerate(mysql_dump):
line = line.decode("utf8").strip()
# catch insert
if line.startswith("INSERT INTO"):
# pgsql doesn't support user as a table name, change it to harbor_user.
if line.startswith('INSERT INTO "user"'):
insert_lines.append(line.replace('INSERT INTO "user"', 'INSERT INTO "harbor_user"'))
# pgsql doesn't support upper-case as a column name, change it to lower-case.
elif line.find('INSERT INTO "access_log" ("log_id", "username", "project_id", "repo_name", "repo_tag", "GUID", "operation", "op_time")') != -1:
line = line.replace('INSERT INTO "access_log" ("log_id", "username", "project_id", "repo_name", "repo_tag", "GUID", "operation", "op_time")',
'INSERT INTO "access_log" ("log_id", "username", "project_id", "repo_name", "repo_tag", "guid", "operation", "op_time")')
insert_lines.append(line)
continue
# pgsql doesn't support 0 as a time data, change it to the minimum value.
elif line.find("0000-00-00 00:00:00") != -1:
line = line.replace("0000-00-00 00:00:00", "0001-01-01 00:00:00")
insert_lines.append(line)
continue
# mysqldump generates dumps in which strings are enclosed in quotes and quotes inside the string are escaped with a backslash
# like, {\"kind\":\"Manual\",\"schedule_param\":null}.
# this is by design of mysql, see issue https://bugs.mysql.com/bug.php?id=65941
# the data could be inserted into pgsql, but it will be failed on harbor api call.
elif line.find('\\"') != -1:
line = line.replace('\\"', '"')
insert_lines.append(line)
continue
else:
insert_lines.append(line)
write_database(pgsql_dump, "registry")
write_insert(pgsql_dump, insert_lines)
write_alter_table_bool(pgsql_dump, "harbor_user", "deleted")
write_alter_table_bool(pgsql_dump, "harbor_user", "sysadmin_flag")
write_alter_table_bool(pgsql_dump, "project", "deleted")
write_alter_table_bool(pgsql_dump, "project_metadata", "deleted")
write_alter_table_bool(pgsql_dump, "replication_policy", "enabled", "TRUE")
write_alter_table_bool(pgsql_dump, "replication_policy", "replicate_deletion")
write_alter_table_bool(pgsql_dump, "replication_policy", "deleted")
write_alter_table_bool(pgsql_dump, "replication_target", "insecure")
write_alter_table_bool(pgsql_dump, "replication_immediate_trigger", "on_push")
write_alter_table_bool(pgsql_dump, "replication_immediate_trigger", "on_deletion")
write_foreign_key(pgsql_dump)
write_sequence(pgsql_dump, "harbor_user", "user_id")
write_sequence(pgsql_dump, "project", "project_id")
write_sequence(pgsql_dump, "project_member", "id")
write_sequence(pgsql_dump, "project_metadata", "id")
write_sequence(pgsql_dump, "user_group", "id")
write_sequence(pgsql_dump, "access_log", "log_id")
write_sequence(pgsql_dump, "repository", "repository_id")
write_sequence(pgsql_dump, "replication_policy", "id")
write_sequence(pgsql_dump, "replication_target", "id")
write_sequence(pgsql_dump, "replication_immediate_trigger", "id")
write_sequence(pgsql_dump, "img_scan_job", "id")
write_sequence(pgsql_dump, "img_scan_overview", "id")
write_sequence(pgsql_dump, "clair_vuln_timestamp", "id")
write_sequence(pgsql_dump, "properties", "id")
write_sequence(pgsql_dump, "harbor_label", "id")
write_sequence(pgsql_dump, "harbor_resource_label", "id")
write_sequence(pgsql_dump, "replication_job", "id")
write_sequence(pgsql_dump, "role", "role_id")
def convert_notary_server_db(mysql_dump_file, pgsql_dump_file):
mysql_dump = open(mysql_dump_file)
pgsql_dump = open(pgsql_dump_file, "w")
insert_lines = []
for i, line in enumerate(mysql_dump):
line = line.decode("utf8").strip()
# catch insert
if line.startswith("INSERT INTO"):
if line.find("0000-00-00 00:00:00") != -1:
line = line.replace("0000-00-00 00:00:00", "0001-01-01 00:00:00")
insert_lines.append(line)
continue
else:
insert_lines.append(line)
write_database(pgsql_dump, "notaryserver")
write_insert(pgsql_dump, insert_lines)
write_sequence(pgsql_dump, "tuf_files", "id")
write_sequence(pgsql_dump, "changefeed", "id")
def convert_notary_signer_db(mysql_dump_file, pgsql_dump_file):
mysql_dump = open(mysql_dump_file)
pgsql_dump = open(pgsql_dump_file, "w")
insert_lines = []
for i, line in enumerate(mysql_dump):
line = line.decode("utf8").strip()
# catch insert
if line.startswith("INSERT INTO"):
if line.find("0000-00-00 00:00:00") != -1:
line = line.replace("0000-00-00 00:00:00", "0001-01-01 00:00:00")
insert_lines.append(line)
continue
else:
insert_lines.append(line)
write_database(pgsql_dump, "notarysigner")
write_insert(pgsql_dump, insert_lines)
write_sequence(pgsql_dump, "private_keys", "id")
def write_database(pgsql_dump, db_name):
pgsql_dump.write("\\c %s;\n" % db_name)
def write_table(pgsql_dump, table_lines):
for item in table_lines:
pgsql_dump.write("%s\n" % item)
if item.startswith(');'):
pgsql_dump.write('\n')
pgsql_dump.write('\n')
def write_insert(pgsql_dump, insert_lines):
for item in insert_lines:
pgsql_dump.write("%s\n" % item.encode('utf-8'))
def write_foreign_key(pgsql_dump):
pgsql_dump.write('\n')
pgsql_dump.write("%s\n" % "ALTER TABLE \"project\" ADD CONSTRAINT \"project_ibfk_1\" FOREIGN KEY (\"owner_id\") REFERENCES \"harbor_user\" (\"user_id\");")
pgsql_dump.write("%s\n" % "ALTER TABLE \"project_metadata\" ADD CONSTRAINT \"project_metadata_ibfk_1\" FOREIGN KEY (\"project_id\") REFERENCES \"project\" (\"project_id\");")
def write_alter_table_bool(pgsql_dump, table_name, table_columnn, default_value="FALSE"):
pgsql_dump.write('\n')
pgsql_dump.write("ALTER TABLE %s ALTER COLUMN %s DROP DEFAULT;\n" % (table_name, table_columnn))
pgsql_dump.write("ALTER TABLE %s ALTER %s TYPE bool USING CASE WHEN %s=0 THEN FALSE ELSE TRUE END;\n" % (table_name, table_columnn, table_columnn))
pgsql_dump.write("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT %s;\n" % (table_name, table_columnn, default_value))
def write_sequence(pgsql_dump, table_name, table_columnn):
pgsql_dump.write('\n')
pgsql_dump.write("CREATE SEQUENCE IF NOT EXISTS %s_%s_seq;\n" % (table_name, table_columnn))
pgsql_dump.write("SELECT setval('%s_%s_seq', max(%s)) FROM %s;\n" % (table_name, table_columnn, table_columnn, table_name))
pgsql_dump.write("ALTER TABLE \"%s\" ALTER COLUMN \"%s\" SET DEFAULT nextval('%s_%s_seq');\n" % (table_name, table_columnn, table_name, table_columnn))
if __name__ == "__main__":
if sys.argv[1].find("registry") != -1:
convert_registry_db(sys.argv[1], sys.argv[2])
elif sys.argv[1].find("notaryserver") != -1:
convert_notary_server_db(sys.argv[1], sys.argv[2])
elif sys.argv[1].find("notarysigner") != -1:
convert_notary_signer_db(sys.argv[1], sys.argv[2])
else:
print ("Unsupport mysql dump file, %s" % sys.argv[1])
sys.exit(1)