Update migrator to 1 6 0

1. Add new alembic_pg folder for postgres
2. Add migration file for 1.6.0
3. Update version to 1.6.0
4. update migrator dockerfile
This commit is contained in:
Deng, Qian 2018-07-02 21:23:47 +08:00
parent e705224b3f
commit edbe2fe620
29 changed files with 575 additions and 19 deletions

View File

@ -102,7 +102,7 @@ NOTARYVERSION=v0.5.1
MARIADBVERSION=$(VERSIONTAG) MARIADBVERSION=$(VERSIONTAG)
CLAIRVERSION=v2.0.1 CLAIRVERSION=v2.0.1
CLAIRDBVERSION=$(VERSIONTAG) CLAIRDBVERSION=$(VERSIONTAG)
MIGRATORVERSION=v1.5.0 MIGRATORVERSION=v1.6.0
REDISVERSION=$(VERSIONTAG) REDISVERSION=$(VERSIONTAG)
#clarity parameters #clarity parameters

View File

@ -341,5 +341,5 @@ CREATE TABLE IF NOT EXISTS alembic_version (
version_num varchar(32) NOT NULL version_num varchar(32) NOT NULL
); );
insert into alembic_version values ('1.5.0'); insert into alembic_version values ('1.6.0');

View File

@ -20,7 +20,7 @@ import (
const ( const (
// SchemaVersion is the version of database schema // SchemaVersion is the version of database schema
SchemaVersion = "1.5.0" SchemaVersion = "1.6.0"
) )
// GetSchemaVersion return the version of database schema // GetSchemaVersion return the version of database schema

View File

@ -1,6 +1,6 @@
{ {
"name": "harbor-ui", "name": "harbor-ui",
"version": "0.7.19-dev.8", "version": "0.7.19-dev.9",
"description": "Harbor shared UI components based on Clarity and Angular4", "description": "Harbor shared UI components based on Clarity and Angular4",
"author": "VMware", "author": "VMware",
"module": "index.js", "module": "index.js",

View File

@ -49,7 +49,7 @@
"bootstrap": "4.0.0-alpha.5", "bootstrap": "4.0.0-alpha.5",
"codelyzer": "~2.0.0-beta.4", "codelyzer": "~2.0.0-beta.4",
"enhanced-resolve": "^3.0.0", "enhanced-resolve": "^3.0.0",
"harbor-ui": "0.7.19-test-16", "harbor-ui": "0.7.19-dev.9",
"jasmine-core": "2.4.1", "jasmine-core": "2.4.1",
"jasmine-spec-reporter": "2.5.0", "jasmine-spec-reporter": "2.5.0",
"karma": "~1.7.0", "karma": "~1.7.0",

View File

@ -8,7 +8,7 @@ RUN tdnf distro-sync -y || echo \
&& groupadd -r -g 10000 mysql && useradd --no-log-init -r -g 10000 -u 10000 mysql \ && groupadd -r -g 10000 mysql && useradd --no-log-init -r -g 10000 -u 10000 mysql \
&& tdnf install -y mariadb-server mariadb mariadb-devel python2 python2-devel python-pip gcc \ && tdnf install -y mariadb-server mariadb mariadb-devel python2 python2-devel python-pip gcc \
linux-api-headers glibc-devel binutils zlib-devel openssl-devel postgresql >> /dev/null\ linux-api-headers glibc-devel binutils zlib-devel openssl-devel postgresql >> /dev/null\
&& pip install mysqlclient alembic \ && pip install mysqlclient alembic psycopg2 \
&& mkdir /docker-entrypoint-initdb.d /docker-entrypoint-updatedb.d \ && mkdir /docker-entrypoint-initdb.d /docker-entrypoint-updatedb.d \
&& rm -fr /var/lib/mysql \ && rm -fr /var/lib/mysql \
&& mkdir -p /var/lib/mysql /var/run/mysqld \ && mkdir -p /var/lib/mysql /var/run/mysqld \

View File

@ -13,7 +13,7 @@ import shutil
import sys import sys
def main(): def main():
target_version = '1.5.0' target_version = '1.6.0'
parser = argparse.ArgumentParser(description='migrator of harbor.cfg') parser = argparse.ArgumentParser(description='migrator of harbor.cfg')
parser.add_argument('--input', '-i', action="store", dest='input_path', required=True, help='The path to the old harbor.cfg that provides input value, this required value') parser.add_argument('--input', '-i', action="store", dest='input_path', required=True, help='The path to the old harbor.cfg that provides input value, this required value')
parser.add_argument('--output','-o', action="store", dest='output_path', required=False, help='The path of the migrated harbor.cfg, if not set the input file will be overwritten') parser.add_argument('--output','-o', action="store", dest='output_path', required=False, help='The path of the migrated harbor.cfg, if not set the input file will be overwritten')

View File

@ -3,7 +3,7 @@ echo "
[alembic] [alembic]
# path to migration scripts # path to migration scripts
script_location = /harbor-migration/db/alembic/migration_harbor script_location = /harbor-migration/db/alembic/mysql/migration_harbor
# template used to generate migration files # template used to generate migration files
# file_template = %%(rev)s_%%(slug)s # file_template = %%(rev)s_%%(slug)s

View File

@ -265,7 +265,7 @@ class HarborLabel(Base):
class HarborResourceLabel(Base): class HarborResourceLabel(Base):
__tablename__ = 'harbor_resource_label' __tablename__ = 'harbor_resource_label'
id = sa.Column(sa.Integer, nullable=False, primary_key=True) id = sa.Column(sa.Integer, primary_key=True)
label_id = sa.Column(sa.Integer, nullable=False) label_id = sa.Column(sa.Integer, nullable=False)
resource_id = sa.Column(sa.Integer) resource_id = sa.Column(sa.Integer)
resource_name = sa.Column(sa.String(256)) resource_name = sa.Column(sa.String(256))

View File

@ -0,0 +1,75 @@
echo "
# A generic, single database configuration.
[alembic]
# path to migration scripts
script_location = /harbor-migration/db/alembic/postgres/migration_harbor
# template used to generate migration files
# file_template = %%(rev)s_%%(slug)s
# timezone to use when rendering the date
# within the migration file as well as the filename.
# string value is passed to dateutil.tz.gettz()
# leave blank for localtime
# timezone =
# max length of characters to apply to the
# "slug" field
#truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false
# version location specification; this defaults
# to migration_harbor/versions. When using multiple version
# directories, initial revisions must be specified with --version-path
# version_locations = %(here)s/bar %(here)s/bat migration_harbor/versions
# the output encoding used when revision files
# are written from script.py.mako
# output_encoding = utf-8
sqlalchemy.url = postgresql://$PGSQL_USR:$DB_PWD@localhost:5432/registry
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S"

View File

@ -0,0 +1,284 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql import func
import datetime
Base = declarative_base()
class User(Base):
__tablename__ = 'harbor_user'
user_id = sa.Column(sa.Integer, primary_key=True)
username = sa.Column(sa.String(255), unique=True)
email = sa.Column(sa.String(255), unique=True)
password = sa.Column(sa.String(40), nullable=False)
realname = sa.Column(sa.String(255), nullable=False)
comment = sa.Column(sa.String(30))
deleted = sa.Column(sa.Boolean, nullable=False, server_default='false')
reset_uuid = sa.Column(sa.String(40))
salt = sa.Column(sa.String(40))
sysadmin_flag = sa.Column(sa.Boolean, nullable=False, server_default='false')
creation_time = sa.Column(sa.TIMESTAMP)
update_time = sa.Column(sa.TIMESTAMP)
class UserGroup(Base):
__tablename__ = 'user_group'
id = sa.Column(sa.Integer, primary_key=True)
group_name = sa.Column(sa.String(255), nullable = False)
group_type = sa.Column(sa.SmallInteger, server_default=sa.text("'0'"))
ldap_group_dn = sa.Column(sa.String(512), nullable=False)
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
class Properties(Base):
__tablename__ = 'properties'
id = sa.Column(sa.Integer, primary_key=True)
k = sa.Column(sa.String(64), unique=True)
v = sa.Column(sa.String(128), nullable = False)
class ProjectMember(Base):
__tablename__ = 'project_member'
id = sa.Column(sa.Integer, primary_key=True)
project_id = sa.Column(sa.Integer(), nullable=False)
entity_id = sa.Column(sa.Integer(), nullable=False)
entity_type = sa.Column(sa.String(1), nullable=False)
role = sa.Column(sa.Integer(), nullable = False)
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
__table_args__ = (sa.UniqueConstraint('project_id', 'entity_id', 'entity_type', name='unique_name_and_scope'),)
class UserProjectRole(Base):
__tablename__ = 'user_project_role'
upr_id = sa.Column(sa.Integer(), primary_key = True)
user_id = sa.Column(sa.Integer(), sa.ForeignKey('user.user_id'))
pr_id = sa.Column(sa.Integer(), sa.ForeignKey('project_role.pr_id'))
project_role = relationship("ProjectRole")
class ProjectRole(Base):
__tablename__ = 'project_role'
pr_id = sa.Column(sa.Integer(), primary_key = True)
project_id = sa.Column(sa.Integer(), nullable = False)
role_id = sa.Column(sa.Integer(), nullable = False)
sa.ForeignKeyConstraint(['role_id'], [u'role.role_id'])
sa.ForeignKeyConstraint(['project_id'], [u'project.project_id'])
class Access(Base):
__tablename__ = 'access'
access_id = sa.Column(sa.Integer(), primary_key = True)
access_code = sa.Column(sa.String(1))
comment = sa.Column(sa.String(30))
class Role(Base):
__tablename__ = 'role'
role_id = sa.Column(sa.Integer, primary_key=True)
role_mask = sa.Column(sa.Integer, nullable=False, server_default=sa.text("'0'"))
role_code = sa.Column(sa.String(20))
name = sa.Column(sa.String(20))
class Project(Base):
__tablename__ = 'project'
project_id = sa.Column(sa.Integer, primary_key=True)
owner_id = sa.Column(sa.ForeignKey(u'harbor_user.user_id'), nullable=False, index=True)
name = sa.Column(sa.String(255), nullable=False, unique=True)
creation_time = sa.Column(sa.TIMESTAMP)
update_time = sa.Column(sa.TIMESTAMP)
deleted = sa.Column(sa.Boolean, nullable=False, server_default='false')
owner = relationship(u'User')
class ProjectMetadata(Base):
__tablename__ = 'project_metadata'
id = sa.Column(sa.Integer, primary_key=True)
project_id = sa.Column(sa.ForeignKey(u'project.project_id'), nullable=False)
name = sa.Column(sa.String(255), nullable=False)
value = sa.Column(sa.String(255))
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
deleted = sa.Column(sa.Boolean, nullable=False, server_default='false')
__table_args__ = (sa.UniqueConstraint('project_id', 'name', name='unique_project_id_and_name'),)
class ReplicationPolicy(Base):
__tablename__ = "replication_policy"
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String(256))
project_id = sa.Column(sa.Integer, nullable=False)
target_id = sa.Column(sa.Integer, nullable=False)
enabled = sa.Column(sa.Boolean, nullable=False, server_default='true')
description = sa.Column(sa.Text)
cron_str = sa.Column(sa.String(256))
filters = sa.Column(sa.String(1024))
replicate_deletion = sa.Column(sa.Boolean, nullable=False, server_default='false')
start_time = sa.Column(sa.TIMESTAMP)
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
class ReplicationTarget(Base):
__tablename__ = "replication_target"
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String(64))
url = sa.Column(sa.String(64))
username = sa.Column(sa.String(255))
password = sa.Column(sa.String(128))
target_type = sa.Column(sa.SmallInteger, nullable=False, server_default=sa.text("'0'"))
insecure = sa.Column(sa.Boolean, nullable=False, server_default='false')
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
class ReplicationJob(Base):
__tablename__ = "replication_job"
id = sa.Column(sa.Integer, primary_key=True)
status = sa.Column(sa.String(64), nullable=False)
policy_id = sa.Column(sa.Integer, nullable=False)
repository = sa.Column(sa.String(256), nullable=False)
operation = sa.Column(sa.String(64), nullable=False)
tags = sa.Column(sa.String(16384))
job_uuid = sa.Column(sa.String(64))
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
__table_args__ = (sa.Index('policy', 'policy_id'),)
class ReplicationImmediateTrigger(Base):
__tablename__ = 'replication_immediate_trigger'
id = sa.Column(sa.Integer, primary_key=True)
policy_id = sa.Column(sa.Integer, nullable=False)
namespace = sa.Column(sa.String(256), nullable=False)
on_push = sa.Column(sa.Boolean, nullable=False, server_default='false')
on_deletion = sa.Column(sa.Boolean, nullable=False, server_default='false')
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
class Repository(Base):
__tablename__ = "repository"
repository_id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String(255), nullable=False, unique=True)
project_id = sa.Column(sa.Integer, nullable=False)
owner_id = sa.Column(sa.Integer, nullable=False)
description = sa.Column(sa.Text)
pull_count = sa.Column(sa.Integer,server_default=sa.text("'0'"), nullable=False)
star_count = sa.Column(sa.Integer,server_default=sa.text("'0'"), nullable=False)
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
class AccessLog(Base):
__tablename__ = "access_log"
log_id = sa.Column(sa.Integer, primary_key=True)
username = sa.Column(sa.String(255), nullable=False)
project_id = sa.Column(sa.Integer, nullable=False)
repo_name = sa.Column(sa.String(256))
repo_tag = sa.Column(sa.String(128))
GUID = sa.Column(sa.String(64))
operation = sa.Column(sa.String(20))
op_time = sa.Column(sa.TIMESTAMP)
__table_args__ = (sa.Index('project_id', "op_time"),)
class ImageScanJob(Base):
__tablename__ = "img_scan_job"
id = sa.Column(sa.Integer, nullable=False, primary_key=True)
status = sa.Column(sa.String(64), nullable=False)
repository = sa.Column(sa.String(256), nullable=False)
tag = sa.Column(sa.String(128), nullable=False)
digest = sa.Column(sa.String(128))
job_uuid = sa.Column(sa.String(64))
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
class ImageScanOverview(Base):
__tablename__ = "img_scan_overview"
id = sa.Column(sa.Integer, nullable=False, primary_key=True)
image_digest = sa.Column(sa.String(128), nullable=False)
scan_job_id = sa.Column(sa.Integer, nullable=False)
severity = sa.Column(sa.Integer, nullable=False, server_default=sa.text("'0'"))
components_overview = sa.Column(sa.String(2048))
details_key = sa.Column(sa.String(128))
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
class ClairVulnTimestamp(Base):
__tablename__ = "clair_vuln_timestamp"
id = sa.Column(sa.Integer, nullable=False, primary_key=True)
namespace = sa.Column(sa.String(128), nullable=False, unique=True)
last_update = sa.Column(sa.TIMESTAMP)
class HarborLabel(Base):
__tablename__ = "harbor_label"
id = sa.Column(sa.Integer, nullable=False, primary_key=True)
name = sa.Column(sa.String(128), nullable=False)
description = sa.Column(sa.Text)
color = sa.Column(sa.String(16))
level = sa.Column(sa.String(1), nullable=False)
scope = sa.Column(sa.String(1), nullable=False)
project_id = sa.Column(sa.Integer, nullable=False)
deleted = sa.Column(sa.Boolean, nullable=False, server_default='false')
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
__table_args__ = (sa.UniqueConstraint('name', 'scope', 'project_id', name='unique_label'),)
class HarborResourceLabel(Base):
__tablename__ = 'harbor_resource_label'
id = sa.Column(sa.Integer, primary_key=True)
label_id = sa.Column(sa.Integer, nullable=False)
resource_id = sa.Column(sa.Integer)
resource_name = sa.Column(sa.String(256))
resource_type = sa.Column(sa.String(1), nullable=False)
creation_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
update_time = sa.Column(sa.TIMESTAMP, server_default=sa.text("'now'::timestamp"))
__table_args__ = (sa.UniqueConstraint('label_id', 'resource_id', 'resource_name', 'resource_type', name='unique_label_resource'),)
class SchemaMigrations(Base):
__tablename__ = 'schema_migrations'
version = sa.Column(sa.BigInteger, primary_key=True)
dirty = sa.Column(sa.Boolean, nullable=False)

View File

@ -0,0 +1 @@
Generic single-database configuration.

View File

@ -0,0 +1,70 @@
from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()

View File

@ -0,0 +1,24 @@
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
branch_labels = ${repr(branch_labels)}
depends_on = ${repr(depends_on)}
def upgrade():
${upgrades if upgrades else "pass"}
def downgrade():
${downgrades if downgrades else "pass"}

View File

@ -0,0 +1,39 @@
# Copyright (c) 2008-2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Empty version
Revision ID: 1.5.0
Revises:
Create Date: 2018-6-26
"""
# revision identifiers, used by Alembic.
revision = '1.5.0'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
"""
update schema&data
"""
pass
def downgrade():
"""
Downgrade has been disabled.
"""
pass

View File

@ -0,0 +1,54 @@
# Copyright (c) 2008-2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""1.5.0 to 1.6.0
Revision ID: 1.6.0
Revises:
Create Date: 2018-6-26
"""
# revision identifiers, used by Alembic.
revision = '1.6.0'
down_revision = '1.5.0'
branch_labels = None
depends_on = None
from alembic import op
from db_meta import *
Session = sessionmaker()
def upgrade():
"""
update schema&data
"""
bind = op.get_bind()
session = Session(bind=bind)
## Add column deleted to harbor_label
op.add_column('harbor_label', sa.Column('deleted', sa.Boolean, nullable=False, server_default='false'))
## Add schema_migration then insert data
SchemaMigrations.__table__.create(bind)
session.add(SchemaMigrations(version=1, dirty=False))
session.commit()
def downgrade():
"""
Downgrade has been disabled.
"""
pass

View File

@ -198,13 +198,17 @@ function up_harbor {
psql -U $PGSQL_USR -f /harbor-migration/db/schema/notaryserver_init.pgsql psql -U $PGSQL_USR -f /harbor-migration/db/schema/notaryserver_init.pgsql
psql -U $PGSQL_USR -f /harbor-migration/db/schema/notarysigner_init.pgsql psql -U $PGSQL_USR -f /harbor-migration/db/schema/notarysigner_init.pgsql
## it needs to call the alembic_up to target, disable it as it's now unsupported.
#alembic_up $target_version
stop_pgsql $PGSQL_USR
stop_mysql $DB_USR $DB_PWD stop_mysql $DB_USR $DB_PWD
## it needs to call the alembic_up to target, disable it as it's now unsupported.
alembic_up pgsql $target_version
stop_pgsql $PGSQL_USR
rm -rf /var/lib/mysql/* rm -rf /var/lib/mysql/*
cp -rf $PGDATA/* /var/lib/mysql cp -rf $PGDATA/* /var/lib/mysql
## Chmod 700 to DB data directory
chmod 700 /var/lib/mysql
return 0 return 0
fi fi
fi fi

View File

@ -20,16 +20,21 @@ function alembic_up {
local db_type="$1" local db_type="$1"
local target_version="$2" local target_version="$2"
export PYTHONPATH=$PYTHONPATH:/harbor-migration/db/alembic
if [ $db_type = "mysql" ]; then if [ $db_type = "mysql" ]; then
source /harbor-migration/db/alembic/alembic.tpl > /harbor-migration/db/alembic/alembic.ini export PYTHONPATH=/harbor-migration/db/alembic/mysql
source /harbor-migration/db/alembic/mysql/alembic.tpl > /harbor-migration/db/alembic/mysql/alembic.ini
echo "Performing upgrade $target_version..." echo "Performing upgrade $target_version..."
alembic -c /harbor-migration/db/alembic/alembic.ini current alembic -c /harbor-migration/db/alembic/mysql/alembic.ini current
alembic -c /harbor-migration/db/alembic/alembic.ini upgrade $target_version alembic -c /harbor-migration/db/alembic/mysql/alembic.ini upgrade $target_version
alembic -c /harbor-migration/db/alembic/alembic.ini current alembic -c /harbor-migration/db/alembic/mysql/alembic.ini current
elif [ $db_type = "pgsql" ]; then elif [ $db_type = "pgsql" ]; then
export PYTHONPATH=/harbor-migration/db/alembic/postgres
echo "TODO: add support for pgsql." echo "TODO: add support for pgsql."
source /harbor-migration/db/alembic/postgres/alembic.tpl > /harbor-migration/db/alembic/postgres/alembic.ini
echo "Performing upgrade $target_version..."
alembic -c /harbor-migration/db/alembic/postgres/alembic.ini current
alembic -c /harbor-migration/db/alembic/postgres/alembic.ini upgrade $target_version
alembic -c /harbor-migration/db/alembic/postgres/alembic.ini current
else else
echo "Unsupported DB type." echo "Unsupported DB type."
exit 1 exit 1

View File

@ -76,7 +76,7 @@ function get_version_mysql {
if [[ $(mysql $DBCNF -N -s -e "select count(*) from information_schema.tables \ if [[ $(mysql $DBCNF -N -s -e "select count(*) from information_schema.tables \
where table_schema='registry' and table_name='alembic_version';") -eq 0 ]]; then where table_schema='registry' and table_name='alembic_version';") -eq 0 ]]; then
echo "table alembic_version does not exist. Trying to initial alembic_version." echo "table alembic_version does not exist. Trying to initial alembic_version."
mysql $DBCNF < ./alembic.sql mysql $DBCNF < /harbor-migration/db/alembic/mysql/alembic.sql
#compatible with version 0.1.0 and 0.1.1 #compatible with version 0.1.0 and 0.1.1
if [[ $(mysql $DBCNF -N -s -e "select count(*) from information_schema.tables \ if [[ $(mysql $DBCNF -N -s -e "select count(*) from information_schema.tables \
where table_schema='registry' and table_name='properties'") -eq 0 ]]; then where table_schema='registry' and table_name='properties'") -eq 0 ]]; then