Merge branch 'master' of https://github.com/steven-zou/harbor into fork_master

This commit is contained in:
Steven Zou 2017-10-27 18:19:39 +08:00
commit 410908d863
62 changed files with 916 additions and 247 deletions

21
.gitmessage Normal file
View File

@ -0,0 +1,21 @@
SUBJECT HERE
#
# Please provide your commit's subject above this line.
#
# Guideline (http://chris.beams.io/posts/git-commit/)
#
# 1. Separate subject from body with a blank line
# 2. Limit the subject line to 50 characters
# 3. Capitalize the first letter subject line
# 4. Do not end the subject line with a period
# 5. Use the imperative mood in the subject line
# 6. Wrap the body at 72 characters
# 7. Use the body to explain what and why vs. how
#
# Description of your commit should go below. Make sure to leave
# one empty line after your description.
#
#
BODY LINE1
BODY LINE2

View File

@ -96,7 +96,6 @@ script:
- go test -race -i ./src/ui ./src/adminserver ./src/jobservice
- sudo -E env "PATH=$PATH" ./tests/coverage4gotest.sh
- goveralls -coverprofile=profile.cov -service=travis-ci
- docker-compose -f make/docker-compose.test.yml down
- sudo rm -rf /data/config/*
- ls /data/cert

View File

@ -86,7 +86,7 @@ NGINXVERSION=1.11.13
PHOTONVERSION=1.0
NOTARYVERSION=server-0.5.0
NOTARYSIGNERVERSION=signer-0.5.0
MARIADBVERSION=mariadb-10.1.10
MARIADBVERSION=10.2.8
HTTPPROXY=
REBUILDCLARITYFLAG=false
NEWCLARITYVERSION=
@ -241,7 +241,7 @@ DOCKERCOMPOSE_LIST=-f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
ifeq ($(NOTARYFLAG), true)
DOCKERSAVE_PARA+= vmware/notary-photon:$(NOTARYVERSION) vmware/notary-photon:$(NOTARYSIGNERVERSION) \
vmware/harbor-notary-db:$(MARIADBVERSION)
vmware/mariadb-photon:$(MARIADBVERSION)
PACKAGE_OFFLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSENOTARYFILENAME)
PACKAGE_ONLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSENOTARYFILENAME)
DOCKERCOMPOSE_LIST+= -f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSENOTARYFILENAME)
@ -371,7 +371,7 @@ package_offline: compile build modify_sourcefiles modify_composefile
echo "pulling notary and harbor-notary-db..."; \
$(DOCKERPULL) vmware/notary-photon:$(NOTARYVERSION); \
$(DOCKERPULL) vmware/notary-photon:$(NOTARYSIGNERVERSION); \
$(DOCKERPULL) vmware/harbor-notary-db:$(MARIADBVERSION); \
$(DOCKERPULL) vmware/mariadb-photon:$(MARIADBVERSION); \
fi
@if [ "$(CLAIRFLAG)" = "true" ] ; then \
echo "pulling claiy and postgres..."; \

View File

@ -34,11 +34,11 @@ Refer to **[User Guide](docs/user_guide.md)** for more details on how to use Har
### Community
**Slack:** Join Harbor's community for discussion and ask questions: [VMware {code}](https://code.vmware.com/join/), Channel: #harbor.
**Email:** harbor@ vmware.com .
More info on [partners and users](partners.md).
**Email:** harbor@vmware.com .
More info on [partners and users](partners.md).
### Contribution
We welcome contributions from the community. If you wish to contribute code and you have not signed our contributor license agreement (CLA), our bot will update the issue when you open a pull request. For any questions about the CLA process, please refer to our [FAQ](https://cla.vmware.com/faq). Contact us for any questions: harbor @vmware.com .
We welcome contributions from the community. If you wish to contribute code and you have not signed our contributor license agreement (CLA), our bot will update the issue when you open a pull request. For any questions about the CLA process, please refer to our [FAQ](https://cla.vmware.com/faq). Contact us for any questions: harbor@vmware.com .
### Demos
* ![play](docs/img/video.png) **Content Trust** ( [youtube](https://www.youtube.com/watch?v=pPklSTJZY2E) , [Tencent Video](https://v.qq.com/x/page/n0553fzzrnf.html) )
@ -56,7 +56,7 @@ This project uses open source components which have additional licensing terms.
* MySQL 5.6: [docker image](https://hub.docker.com/_/mysql/), [license](https://github.com/docker-library/mysql/blob/master/LICENSE)
### Commercial Support
If you need commercial support of Harbor, please contact us for more information: harbor@ vmware.com .
If you need commercial support of Harbor, please contact us for more information: harbor@vmware.com .

View File

@ -4,7 +4,7 @@ import json
import logging
import requests
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger(__name__)
class HarborClient(object):
def __init__(self, host, user, password, protocol="http"):
@ -23,15 +23,15 @@ class HarborClient(object):
if login_data.status_code == 200:
session_id = login_data.cookies.get('beegosessionID')
self.session_id = session_id
logging.debug("Successfully login, session id: {}".format(
logger.debug("Successfully login, session id: {}".format(
session_id))
else:
logging.error("Fail to login, please try again")
logger.error("Fail to login, please try again")
def logout(self):
requests.get('%s://%s/log_out' % (self.protocol, self.host),
cookies={'beegosessionID': self.session_id}, verify=False)
logging.debug("Successfully logout")
logger.debug("Successfully logout")
# GET /search
def search(self, query_string):
@ -42,9 +42,9 @@ class HarborClient(object):
cookies={'beegosessionID': self.session_id}, verify=False)
if response.status_code == 200:
result = response.json()
logging.debug("Successfully get search result: {}".format(result))
logger.debug("Successfully get search result: {}".format(result))
else:
logging.error("Fail to get search result")
logger.error("Fail to get search result")
return result
# GET /projects
@ -55,10 +55,10 @@ class HarborClient(object):
cookies={'beegosessionID': self.session_id}, verify=False)
if response.status_code == 200:
result = response.json()
logging.debug("Successfully get projects result: {}".format(
logger.debug("Successfully get projects result: {}".format(
result))
else:
logging.error("Fail to get projects result")
logger.error("Fail to get projects result")
return result
# HEAD /projects
@ -70,14 +70,14 @@ class HarborClient(object):
cookies={'beegosessionID': self.session_id}, verify=False)
if response.status_code == 200:
result = True
logging.debug(
logger.debug(
"Successfully check project exist, result: {}".format(result))
elif response.status_code == 404:
result = False
logging.debug(
logger.debug(
"Successfully check project exist, result: {}".format(result))
else:
logging.error("Fail to check project exist")
logger.error("Fail to check project exist")
return result
# POST /projects
@ -91,11 +91,11 @@ class HarborClient(object):
data=request_body, verify=False)
if response.status_code == 201:
result = True
logging.debug(
logger.debug(
"Successfully create project with project name: {}".format(
project_name))
else:
logging.error(
logger.error(
"Fail to create project with project name: {}, response code: {}".format(
project_name, response.status_code))
return result
@ -108,11 +108,11 @@ class HarborClient(object):
cookies={'beegosessionID': self.session_id}, verify=False)
if response.status_code == 200:
result = response.json()
logging.debug(
logger.debug(
"Successfully create project with project id: {}".format(
project_id))
else:
logging.error(
logger.error(
"Fail to create project with project id: {}, response code: {}".format(
project_id, response.status_code))
return result
@ -128,11 +128,11 @@ class HarborClient(object):
data=request_body, verify=False)
if response.status_code == 200:
result = True
logging.debug(
logger.debug(
"Successfully add project member with project id: {}".format(
project_id))
else:
logging.error(
logger.error(
"Fail to add project member with project id: {}, response code: {}".format(
project_id, response.status_code))
return result
@ -146,10 +146,10 @@ class HarborClient(object):
cookies={'beegosessionID': self.session_id}, verify=False)
if response.status_code == 200:
result = True
logging.debug("Successfully delete member with id: {}".format(
logger.debug("Successfully delete member with id: {}".format(
user_id))
else:
logging.error("Fail to delete member with id: {}, response code: {}"
logger.error("Fail to delete member with id: {}, response code: {}"
.format(user_id, response.status_code))
return result
@ -164,11 +164,11 @@ class HarborClient(object):
data=request_body, verify=False)
if response.status_code == 200:
result = True
logging.debug(
logger.debug(
"Success to set project id: {} with publicity: {}".format(
project_id, is_public))
else:
logging.error(
logger.error(
"Fail to set publicity to project id: {} with status code: {}".format(
project_id, response.status_code))
return result
@ -181,9 +181,9 @@ class HarborClient(object):
cookies={'beegosessionID': self.session_id}, verify=False)
if response.status_code == 200:
result = response.json()
logging.debug("Successfully get statistics: {}".format(result))
logger.debug("Successfully get statistics: {}".format(result))
else:
logging.error("Fail to get statistics result with status code: {}"
logger.error("Fail to get statistics result with status code: {}"
.format(response.status_code))
return result
@ -196,9 +196,9 @@ class HarborClient(object):
cookies={'beegosessionID': self.session_id}, verify=False)
if response.status_code == 200:
result = response.json()
logging.debug("Successfully get users result: {}".format(result))
logger.debug("Successfully get users result: {}".format(result))
else:
logging.error("Fail to get users result with status code: {}"
logger.error("Fail to get users result with status code: {}"
.format(response.status_code))
return result
@ -210,9 +210,9 @@ class HarborClient(object):
cookies={'beegosessionID': self.session_id}, verify=False)
if response.status_code == 200:
result = response.json()
logging.debug("Successfully get users result: {}".format(result))
logger.debug("Successfully get users result: {}".format(result))
else:
logging.error("Fail to get users result with status code: {}"
logger.error("Fail to get users result with status code: {}"
.format(response.status_code))
return result
@ -230,10 +230,10 @@ class HarborClient(object):
data=request_body, verify=False)
if response.status_code == 201:
result = True
logging.debug("Successfully create user with username: {}".format(
logger.debug("Successfully create user with username: {}".format(
username))
else:
logging.error(
logger.error(
"Fail to create user with username: {}, response code: {}".format(
username, response.status_code))
return result
@ -252,11 +252,11 @@ class HarborClient(object):
data=request_body, verify=False)
if response.status_code == 200:
result = True
logging.debug(
logger.debug(
"Successfully update user profile with user id: {}".format(
user_id))
else:
logging.error(
logger.error(
"Fail to update user profile with user id: {}, response code: {}".format(
user_id, response.status_code))
return result
@ -270,10 +270,10 @@ class HarborClient(object):
cookies={'beegosessionID': self.session_id}, verify=False)
if response.status_code == 200:
result = True
logging.debug("Successfully delete user with id: {}".format(
logger.debug("Successfully delete user with id: {}".format(
user_id))
else:
logging.error("Fail to delete user with id: {}, response code: {}"
logger.error("Fail to delete user with id: {}, response code: {}"
.format(user_id, response.status_code))
return result
@ -289,10 +289,10 @@ class HarborClient(object):
data=request_body, verify=False)
if response.status_code == 200:
result = True
logging.debug(
logger.debug(
"Successfully change password for user id: {}".format(user_id))
else:
logging.error("Fail to change password for user id: {}".format(
logger.error("Fail to change password for user id: {}".format(
user_id))
return result
@ -308,11 +308,11 @@ class HarborClient(object):
data=request_body, verify=False)
if response.status_code == 200:
result = True
logging.debug(
logger.debug(
"Successfully promote user as admin with user id: {}".format(
user_id))
else:
logging.error(
logger.error(
"Fail to promote user as admin with user id: {}, response code: {}".format(
user_id, response.status_code))
return result
@ -327,11 +327,11 @@ class HarborClient(object):
cookies={'beegosessionID': self.session_id}, verify=False)
if response.status_code == 200:
result = response.json()
logging.debug(
logger.debug(
"Successfully get repositories with id: {}, result: {}".format(
project_id, result))
else:
logging.error("Fail to get repositories result with id: {}, response code: {}".format(
logger.error("Fail to get repositories result with id: {}, response code: {}".format(
project_id, response.status_code))
return result
@ -344,10 +344,10 @@ class HarborClient(object):
cookies={'beegosessionID': self.session_id}, verify=False)
if response.status_code == 200:
result = True
logging.debug("Successfully delete a tag of repository: {}".format(
logger.debug("Successfully delete a tag of repository: {}".format(
repo_name))
else:
logging.error("Fail to delete repository with name: {}, response code: {}".format(
logger.error("Fail to delete repository with name: {}, response code: {}".format(
repo_name, response.status_code))
return result
@ -360,10 +360,10 @@ class HarborClient(object):
cookies={'beegosessionID': self.session_id}, verify=False)
if response.status_code == 200:
result = True
logging.debug("Successfully delete repository: {}".format(
logger.debug("Successfully delete repository: {}".format(
repo_name))
else:
logging.error("Fail to delete repository with name: {}, response code: {}".format(
logger.error("Fail to delete repository with name: {}, response code: {}".format(
repo_name, response.status_code))
return result
@ -376,11 +376,11 @@ class HarborClient(object):
cookies={'beegosessionID': self.session_id}, verify=False)
if response.status_code == 200:
result = response.json()
logging.debug(
logger.debug(
"Successfully get tag with repo name: {}, result: {}".format(
repo_name, result))
else:
logging.error("Fail to get tags with repo name: {}, response code: {}".format(
logger.error("Fail to get tags with repo name: {}, response code: {}".format(
repo_name, response.status_code))
return result
@ -393,11 +393,11 @@ class HarborClient(object):
cookies={'beegosessionID': self.session_id}, verify=False)
if response.status_code == 200:
result = response.json()
logging.debug(
logger.debug(
"Successfully get manifests with repo name: {}, tag: {}, result: {}".format(
repo_name, tag, result))
else:
logging.error(
logger.error(
"Fail to get manifests with repo name: {}, tag: {}".format(
repo_name, tag))
return result
@ -412,11 +412,11 @@ class HarborClient(object):
cookies={'beegosessionID': self.session_id}, verify=False)
if response.status_code == 200:
result = response.json()
logging.debug(
logger.debug(
"Successfully get top accessed repositories, result: {}".format(
result))
else:
logging.error("Fail to get top accessed repositories")
logger.error("Fail to get top accessed repositories")
return result
# GET /logs
@ -427,9 +427,9 @@ class HarborClient(object):
cookies={'beegosessionID': self.session_id}, verify=False)
if response.status_code == 200:
result = response.json()
logging.debug("Successfully get logs")
logger.debug("Successfully get logs")
else:
logging.error("Fail to get logs and response code: {}".format(
logger.error("Fail to get logs and response code: {}".format(
response.status_code))
return result
@ -441,10 +441,10 @@ class HarborClient(object):
cookies={'beegosessionID': self.session_id}, verify=False)
if response.status_code == 200:
result = response.json()
logging.debug(
logger.debug(
"Successfully get systeminfo, result: {}".format(result))
else:
logging.error("Fail to get systeminfo, response code: {}".format(response.status_code))
logger.error("Fail to get systeminfo, response code: {}".format(response.status_code))
return result
# Get /configurations
@ -455,8 +455,8 @@ class HarborClient(object):
cookies={'beegosessionID': self.session_id}, verify=False)
if response.status_code == 200:
result = response.json()
logging.debug(
logger.debug(
"Successfully get configurations, result: {}".format(result))
else:
logging.error("Fail to get configurations, response code: {}".format(response.status_code))
logger.error("Fail to get configurations, response code: {}".format(response.status_code))
return result

View File

@ -2339,6 +2339,9 @@ definitions:
type: integer
format: int
description: Reserved field.
insecure:
type: boolean
description: Whether or not the certificate will be verified when Harbor tries to access the server.
creation_time:
type: string
description: The create time of the policy.
@ -2360,6 +2363,9 @@ definitions:
password:
type: string
description: The target server password.
insecure:
type: boolean
description: Whether or not the certificate will be verified when Harbor tries to access the server.
PingTarget:
type: object
properties:
@ -2372,6 +2378,9 @@ definitions:
password:
type: string
description: The target server password.
insecure:
type: boolean
description: Whether or not the certificate will be verified when Harbor tries to access the server.
PutTarget:
type: object
properties:
@ -2387,6 +2396,9 @@ definitions:
password:
type: string
description: The target server password.
insecure:
type: boolean
description: Whether or not the certificate will be verified when Harbor tries to access the server.
HasAdminRole:
type: object
properties:

View File

@ -1,8 +1,6 @@
FROM mysql:5.6.35
FROM vmware/mariadb-photon:10.2.8
WORKDIR /tmp
COPY registry.sql /docker-entrypoint-initdb.d/
COPY registry-flag.sh /docker-entrypoint-initdb.d/
COPY upgrade.sh /docker-entrypoint-updatedb.d/
ADD registry.sql r.sql
ADD docker-entrypoint.sh /entrypoint.sh
RUN chmod u+x /entrypoint.sh

View File

@ -1,44 +0,0 @@
#!/bin/bash
set -e
if [ ! -d '/var/lib/mysql/mysql' -a "${1%_safe}" = 'mysqld' ]; then
if [ -z "$MYSQL_ROOT_PASSWORD" -a -z "$MYSQL_ALLOW_EMPTY_PASSWORD" ]; then
echo >&2 'error: database is uninitialized and MYSQL_ROOT_PASSWORD not set'
echo >&2 ' Did you forget to add -e MYSQL_ROOT_PASSWORD=... ? v2'
exit 1
fi
mysql_install_db --user=mysql --datadir=/var/lib/mysql
# These statements _must_ be on individual lines, and _must_ end with
# semicolons (no line breaks or comments are permitted).
# TODO proper SQL escaping on ALL the things D:
printf -v MYSQL_ROOT_PASSWORD "%q" ${MYSQL_ROOT_PASSWORD}
TEMP_FILE='/tmp/mysql-first-time.sql'
cat > "$TEMP_FILE" <<-EOSQL
DELETE FROM mysql.user ;
CREATE USER 'root'@'%' IDENTIFIED BY '${MYSQL_ROOT_PASSWORD}' ;
GRANT ALL ON *.* TO 'root'@'%' WITH GRANT OPTION ;
DROP DATABASE IF EXISTS test ;
EOSQL
if [ "$MYSQL_DATABASE" ]; then
echo "CREATE DATABASE IF NOT EXISTS $MYSQL_DATABASE ;" >> "$TEMP_FILE"
fi
if [ "$MYSQL_USER" -a "$MYSQL_PASSWORD" ]; then
echo "CREATE USER '$MYSQL_USER'@'%' IDENTIFIED BY '$MYSQL_PASSWORD' ;" >> "$TEMP_FILE"
if [ "$MYSQL_DATABASE" ]; then
echo "GRANT ALL ON $MYSQL_DATABASE.* TO '$MYSQL_USER'@'%' ;" >> "$TEMP_FILE"
fi
fi
echo 'FLUSH PRIVILEGES ;' >> "$TEMP_FILE"
cat /tmp/r.sql >> "$TEMP_FILE"
set -- "$@" --init-file="$TEMP_FILE"
fi
chown -R mysql:mysql /var/lib/mysql
exec "$@"

View File

@ -0,0 +1,3 @@
#!/bin/sh
touch /var/lib/mysql/created_in_mariadb.flag
echo "dumped flag for MariaDB"

View File

@ -163,6 +163,7 @@ create table replication_target (
1 means it's a regulart registry
*/
target_type tinyint(1) NOT NULL DEFAULT 0,
insecure tinyint(1) NOT NULL DEFAULT 0,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
PRIMARY KEY (id)

View File

@ -158,6 +158,7 @@ create table replication_target (
1 means it's a regulart registry
*/
target_type tinyint(1) NOT NULL DEFAULT 0,
insecure tinyint(1) NOT NULL DEFAULT 0,
creation_time timestamp default CURRENT_TIMESTAMP,
update_time timestamp default CURRENT_TIMESTAMP
);

28
make/common/db/upgrade.sh Executable file
View File

@ -0,0 +1,28 @@
#!/bin/bash
set +e
if [ ! -f /var/lib/mysql/created_in_mariadb.flag ]; then
echo "Maria DB flag not found, the DB was created in mysql image, running upgrade..."
mysqld >/dev/null 2>&1 &
pid="$!"
for i in {30..0}; do
mysqladmin -uroot -p$MYSQL_ROOT_PASSWORD processlist >/dev/null 2>&1
if [ $? = 0 ]; then
break
fi
echo 'Waiting for MySQL start...'
sleep 1
done
if [ "$i" = 0 ]; then
echo >&2 'MySQL failed to start.'
exit 1
fi
set -e
mysql_upgrade -p$MYSQL_ROOT_PASSWORD
echo 'Finished upgrading'
if ! kill -s TERM "$pid" || ! wait "$pid"; then
echo >&2 'Failed to stop MySQL for upgrading.'
exit 1
fi
else
echo "DB was created in Maria DB, skip upgrade."
fi

View File

@ -0,0 +1,25 @@
FROM vmware/photon:1.0
#The Docker Daemon has to be running with storage backend btrfs when building the image
RUN tdnf distro-sync -y || echo \
&& tdnf install -y sed shadow procps-ng gawk gzip sudo net-tools \
&& groupadd -r -g 999 mysql && useradd --no-log-init -r -g 999 -u 999 mysql \
&& tdnf install -y mariadb-server mariadb \
&& mkdir /docker-entrypoint-initdb.d /docker-entrypoint-updatedb.d \
&& rm -fr /var/lib/mysql \
&& mkdir -p /var/lib/mysql /var/run/mysqld \
&& chown -R mysql:mysql /var/lib/mysql /var/run/mysqld \
&& chmod 777 /var/run/mysqld /docker-entrypoint-initdb.d /docker-entrypoint-updatedb.d \
&& tdnf clean all
COPY docker-entrypoint.sh /usr/local/bin/
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
COPY my.cnf /etc/
RUN ln -s usr/local/bin/docker-entrypoint.sh /
VOLUME /var/lib/mysql
EXPOSE 3306
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
CMD ["mysqld"]

View File

@ -0,0 +1,203 @@
#!/bin/bash
set -eo pipefail
shopt -s nullglob
# if command starts with an option, prepend mysqld
if [ "${1:0:1}" = '-' ]; then
set -- mysqld "$@"
fi
# skip setup if they want an option that stops mysqld
wantHelp=
for arg; do
case "$arg" in
-'?'|--help|--print-defaults|-V|--version)
wantHelp=1
break
;;
esac
done
# usage: file_env VAR [DEFAULT]
# ie: file_env 'XYZ_DB_PASSWORD' 'example'
# (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of
# "$XYZ_DB_PASSWORD" from a file, especially for Docker's secrets feature)
file_env() {
local var="$1"
local fileVar="${var}_FILE"
local def="${2:-}"
if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then
echo >&2 "error: both $var and $fileVar are set (but are exclusive)"
exit 1
fi
local val="$def"
if [ "${!var:-}" ]; then
val="${!var}"
elif [ "${!fileVar:-}" ]; then
val="$(< "${!fileVar}")"
fi
export "$var"="$val"
unset "$fileVar"
}
_check_config() {
toRun=( "$@" --verbose --help --log-bin-index="$(mktemp -u)" )
if ! errors="$("${toRun[@]}" 2>&1 >/dev/null)"; then
cat >&2 <<-EOM
ERROR: mysqld failed while attempting to check config
command was: "${toRun[*]}"
$errors
EOM
exit 1
fi
}
# Fetch value from server config
# We use mysqld --verbose --help instead of my_print_defaults because the
# latter only show values present in config files, and not server defaults
_get_config() {
local conf="$1"; shift
"$@" --verbose --help --log-bin-index="$(mktemp -u)" 2>/dev/null | awk '$1 == "'"$conf"'" { print $2; exit }'
}
# allow the container to be started with `--user`
if [ "$1" = 'mysqld' -a -z "$wantHelp" -a "$(id -u)" = '0' ]; then
_check_config "$@"
DATADIR="$(_get_config 'datadir' "$@")"
mkdir -p "$DATADIR"
chown -R mysql:mysql "$DATADIR"
if [ -d '/docker-entrypoint-initdb.d' ]; then
chmod -R +rx /docker-entrypoint-updatedb.d
fi
if [ -d '/docker-entrypoint-updatedb.d' ]; then
chmod -R +rx /docker-entrypoint-updatedb.d
fi
exec sudo -u mysql -E "$BASH_SOURCE" "$@"
fi
if [ "$1" = 'mysqld' -a -z "$wantHelp" ]; then
# still need to check config, container may have started with --user
_check_config "$@"
# Get config
DATADIR="$(_get_config 'datadir' "$@")"
if [ ! -d "$DATADIR/mysql" ]; then
file_env 'MYSQL_ROOT_PASSWORD'
if [ -z "$MYSQL_ROOT_PASSWORD" -a -z "$MYSQL_ALLOW_EMPTY_PASSWORD" ]; then
echo >&2 'error: database is uninitialized and password option is not specified '
echo >&2 ' You need to specify one of MYSQL_ROOT_PASSWORD, MYSQL_ALLOW_EMPTY_PASSWORD'
exit 1
fi
mkdir -p "$DATADIR"
echo 'Initializing database'
cd /usr
mysql_install_db --datadir="$DATADIR" --rpm
cd -
echo 'Database initialized'
SOCKET="$(_get_config 'socket' "$@")"
"$@" --skip-networking --socket="${SOCKET}" &
pid="$!"
mysql=( mysql --protocol=socket -uroot -hlocalhost --socket="${SOCKET}" )
for i in {30..0}; do
if echo 'SELECT 1' | "${mysql[@]}" &> /dev/null; then
break
fi
echo 'MySQL init process in progress...'
sleep 1
done
if [ "$i" = 0 ]; then
echo >&2 'MySQL init process failed.'
exit 1
fi
if [ -z "$MYSQL_INITDB_SKIP_TZINFO" ]; then
# sed is for https://bugs.mysql.com/bug.php?id=20545
mysql_tzinfo_to_sql /usr/share/zoneinfo | sed 's/Local time zone must be set--see zic manual page/FCTY/' | "${mysql[@]}" mysql
fi
rootCreate=
# default root to listen for connections from anywhere
file_env 'MYSQL_ROOT_HOST' '%'
if [ ! -z "$MYSQL_ROOT_HOST" -a "$MYSQL_ROOT_HOST" != 'localhost' ]; then
# no, we don't care if read finds a terminating character in this heredoc
# https://unix.stackexchange.com/questions/265149/why-is-set-o-errexit-breaking-this-read-heredoc-expression/265151#265151
read -r -d '' rootCreate <<-EOSQL || true
CREATE USER 'root'@'${MYSQL_ROOT_HOST}' IDENTIFIED BY '${MYSQL_ROOT_PASSWORD}' ;
GRANT ALL ON *.* TO 'root'@'${MYSQL_ROOT_HOST}' WITH GRANT OPTION ;
EOSQL
fi
"${mysql[@]}" <<-EOSQL
-- What's done in this file shouldn't be replicated
-- or products like mysql-fabric won't work
SET @@SESSION.SQL_LOG_BIN=0;
DELETE FROM mysql.user WHERE user NOT IN ('mysql.sys', 'mysqlxsys', 'root') OR host NOT IN ('localhost') ;
SET PASSWORD FOR 'root'@'localhost'=PASSWORD('${MYSQL_ROOT_PASSWORD}') ;
GRANT ALL ON *.* TO 'root'@'localhost' WITH GRANT OPTION ;
${rootCreate}
DROP DATABASE IF EXISTS test ;
FLUSH PRIVILEGES ;
EOSQL
if [ ! -z "$MYSQL_ROOT_PASSWORD" ]; then
mysql+=( -p"${MYSQL_ROOT_PASSWORD}" )
fi
file_env 'MYSQL_DATABASE'
if [ "$MYSQL_DATABASE" ]; then
echo "CREATE DATABASE IF NOT EXISTS \`$MYSQL_DATABASE\` ;" | "${mysql[@]}"
mysql+=( "$MYSQL_DATABASE" )
fi
file_env 'MYSQL_USER'
file_env 'MYSQL_PASSWORD'
if [ "$MYSQL_USER" -a "$MYSQL_PASSWORD" ]; then
echo "CREATE USER '$MYSQL_USER'@'%' IDENTIFIED BY '$MYSQL_PASSWORD' ;" | "${mysql[@]}"
if [ "$MYSQL_DATABASE" ]; then
echo "GRANT ALL ON \`$MYSQL_DATABASE\`.* TO '$MYSQL_USER'@'%' ;" | "${mysql[@]}"
fi
echo 'FLUSH PRIVILEGES ;' | "${mysql[@]}"
fi
echo
for f in /docker-entrypoint-initdb.d/*; do
case "$f" in
*.sh) echo "$0: running $f"; . "$f" ;;
*.sql) echo "$0: running $f"; "${mysql[@]}" < "$f"; echo ;;
*.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${mysql[@]}"; echo ;;
*) echo "$0: ignoring $f" ;;
esac
echo
done
if ! kill -s TERM "$pid" || ! wait "$pid"; then
echo >&2 'MySQL init process failed.'
exit 1
fi
echo
echo 'MySQL init process done. Ready for start up.'
echo
fi
for f in /docker-entrypoint-updatedb.d/*; do
case "$f" in
*.sh) echo "$0: running $f"; . "$f" ;;
### Not supported for now... until needed
# *.sql) echo "$0: running $f"; "${mysql[@]}" < "$f"; echo ;;
# *.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${mysql[@]}"; echo ;;
*) echo "$0: ignoring $f" ;;
esac
echo
done
fi
exec "$@"

191
make/common/mariadb/my.cnf Normal file
View File

@ -0,0 +1,191 @@
# MariaDB database server configuration file.
#
# You can copy this file to one of:
# - "/etc/mysql/my.cnf" to set global options,
# - "~/.my.cnf" to set user-specific options.
#
# One can use all long options that the program supports.
# Run program with --help to get a list of available options and with
# --print-defaults to see which it would actually understand and use.
#
# For explanations see
# http://dev.mysql.com/doc/mysql/en/server-system-variables.html
# This will be passed to all mysql clients
# It has been reported that passwords should be enclosed with ticks/quotes
# escpecially if they contain "#" chars...
# Remember to edit /etc/mysql/debian.cnf when changing the socket location.
[client]
port = 3306
socket = /var/run/mysqld/mysqld.sock
# Here is entries for some specific programs
# The following values assume you have at least 32M ram
# This was formally known as [safe_mysqld]. Both versions are currently parsed.
[mysqld_safe]
socket = /var/run/mysqld/mysqld.sock
nice = 0
[mysqld]
skip-host-cache
#skip-name-resolve
#
# * Basic Settings
#
user = mysql
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
port = 3306
basedir = /usr
datadir = /var/lib/mysql
tmpdir = /tmp
lc_messages_dir = /usr/share/mysql
lc_messages = en_US
skip-external-locking
#
# Instead of skip-networking the default is now to listen only on
# localhost which is more compatible and is not less secure.
#bind-address = 127.0.0.1
#
# * Fine Tuning
#
max_connections = 100
connect_timeout = 5
wait_timeout = 600
max_allowed_packet = 16M
thread_cache_size = 128
sort_buffer_size = 4M
bulk_insert_buffer_size = 16M
tmp_table_size = 32M
max_heap_table_size = 32M
#
# * MyISAM
#
# This replaces the startup script and checks MyISAM tables if needed
# the first time they are touched. On error, make copy and try a repair.
myisam_recover_options = BACKUP
key_buffer_size = 128M
#open-files-limit = 2000
table_open_cache = 400
myisam_sort_buffer_size = 512M
concurrent_insert = 2
read_buffer_size = 2M
read_rnd_buffer_size = 1M
#
# * Query Cache Configuration
#
# Cache only tiny result sets, so we can fit more in the query cache.
query_cache_limit = 128K
query_cache_size = 64M
# for more write intensive setups, set to DEMAND or OFF
#query_cache_type = DEMAND
#
# * Logging and Replication
#
# Both location gets rotated by the cronjob.
# Be aware that this log type is a performance killer.
# As of 5.1 you can enable the log at runtime!
#general_log_file = /var/log/mysql/mysql.log
#general_log = 1
#
# Error logging goes to syslog due to /etc/mysql/conf.d/mysqld_safe_syslog.cnf.
#
# we do want to know about network errors and such
#log_warnings = 2
#
# Enable the slow query log to see queries with especially long duration
#slow_query_log[={0|1}]
slow_query_log_file = /var/log/mysql/mariadb-slow.log
long_query_time = 10
#log_slow_rate_limit = 1000
#log_slow_verbosity = query_plan
#log-queries-not-using-indexes
#log_slow_admin_statements
#
# The following can be used as easy to replay backup logs or for replication.
# note: if you are setting up a replication slave, see README.Debian about
# other settings you may need to change.
#server-id = 1
#report_host = master1
#auto_increment_increment = 2
#auto_increment_offset = 1
#log_bin = /var/log/mysql/mariadb-bin
#log_bin_index = /var/log/mysql/mariadb-bin.index
# not fab for performance, but safer
#sync_binlog = 1
expire_logs_days = 10
max_binlog_size = 100M
# slaves
#relay_log = /var/log/mysql/relay-bin
#relay_log_index = /var/log/mysql/relay-bin.index
#relay_log_info_file = /var/log/mysql/relay-bin.info
#log_slave_updates
#read_only
#
# If applications support it, this stricter sql_mode prevents some
# mistakes like inserting invalid dates etc.
#sql_mode = NO_ENGINE_SUBSTITUTION,TRADITIONAL
#
# * InnoDB
#
# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/.
# Read the manual for more InnoDB related options. There are many!
default_storage_engine = InnoDB
# you can't just change log file size, requires special procedure
#innodb_log_file_size = 50M
innodb_buffer_pool_size = 256M
innodb_log_buffer_size = 8M
innodb_file_per_table = 1
innodb_open_files = 400
innodb_io_capacity = 400
innodb_flush_method = O_DIRECT
#
# * Security Features
#
# Read the manual, too, if you want chroot!
# chroot = /var/lib/mysql/
#
# For generating SSL certificates I recommend the OpenSSL GUI "tinyca".
#
# ssl-ca=/etc/mysql/cacert.pem
# ssl-cert=/etc/mysql/server-cert.pem
# ssl-key=/etc/mysql/server-key.pem
#
# * Galera-related settings
#
[galera]
# Mandatory settings
#wsrep_on=ON
#wsrep_provider=
#wsrep_cluster_address=
#binlog_format=row
#default_storage_engine=InnoDB
#innodb_autoinc_lock_mode=2
#
# Allow server to accept connections on all interfaces.
#
#bind-address=0.0.0.0
#
# Optional setting
#wsrep_slave_threads=1
#innodb_flush_log_at_trx_commit=0
[mysqldump]
quick
quote-names
max_allowed_packet = 16M
[mysql]
#no-auto-rehash # faster start of mysql but no tab completition
[isamchk]
key_buffer = 16M
#
# * IMPORTANT: Additional settings that can override those from this file!
# The files must end with '.cnf', otherwise they'll be ignored.
#
!includedir /etc/my.cnf.d/

View File

@ -1,6 +1,6 @@
FROM vmware/photon:1.0
RUN tdnf distro-sync -y \
RUN tdnf distro-sync -y || echo \
&& tdnf install -y nginx \
&& ln -sf /dev/stdout /var/log/nginx/access.log \
&& ln -sf /dev/stderr /var/log/nginx/error.log \

View File

@ -3,7 +3,7 @@ FROM vmware/photon:1.0
ENV PGDATA /var/lib/postgresql/data
RUN touch /etc/localtime.bak \
&& tdnf distro-sync -y \
&& tdnf distro-sync -y || echo \
&& tdnf install -y sed shadow gzip postgresql\
&& groupadd -r postgres --gid=999 \
&& useradd -r -g postgres --uid=999 postgres \

View File

@ -1,9 +1,11 @@
FROM library/photon:1.0
FROM vmware/photon:1.0
#base image for rsyslog base on photon
RUN tdnf install -y cronie rsyslog shadow tar gzip \
RUN tdnf distro-sync -y || echo \
&& tdnf install -y cronie rsyslog shadow tar gzip \
&& mkdir /etc/rsyslog.d/ \
&& mkdir /var/spool/rsyslog \
&& groupadd syslog \
&& useradd -g syslog syslog
&& useradd -g syslog syslog \
&& tdnf clean all

View File

@ -27,7 +27,6 @@ EMAIL_FROM=$email_from
EMAIL_IDENTITY=$email_identity
HARBOR_ADMIN_PASSWORD=$harbor_admin_password
PROJECT_CREATION_RESTRICTION=$project_creation_restriction
VERIFY_REMOTE_CERT=$verify_remote_cert
MAX_JOB_WORKERS=$max_job_workers
UI_SECRET=$ui_secret
JOBSERVICE_SECRET=$jobservice_secret

View File

@ -49,7 +49,7 @@ services:
syslog-address: "tcp://127.0.0.1:1514"
tag: "notary-signer"
notary-db:
image: vmware/harbor-notary-db:mariadb-10.1.10
image: vmware/mariadb-photon:10.2.8
container_name: notary-db
restart: always
networks:

View File

@ -1,7 +1,7 @@
FROM vmware/photon:1.0-20170928
FROM vmware/photon:1.0
RUN tdnf erase vim -y \
&& tdnf distro-sync -y \
&& tdnf distro-sync -y || echo \
&& tdnf clean all \
&& mkdir /harbor/
COPY ./make/dev/adminserver/harbor_adminserver /harbor/

View File

@ -1,6 +1,8 @@
FROM library/photon:1.0
FROM vmware/photon:1.0
RUN mkdir /harbor/
RUN mkdir /harbor/ \
&& tdnf distro-sync -y || echo \
&& tdnf clean all
COPY ./make/dev/jobservice/harbor_jobservice /harbor/
RUN chmod u+x /harbor/harbor_jobservice

View File

@ -149,7 +149,6 @@ if protocol == "https":
customize_crt = rcp.get("configuration", "customize_crt")
max_job_workers = rcp.get("configuration", "max_job_workers")
token_expiration = rcp.get("configuration", "token_expiration")
verify_remote_cert = rcp.get("configuration", "verify_remote_cert")
proj_cre_restriction = rcp.get("configuration", "project_creation_restriction")
secretkey_path = rcp.get("configuration", "secretkey_path")
if rcp.has_option("configuration", "admiral_url"):
@ -239,7 +238,6 @@ render(os.path.join(templates_dir, "adminserver", "env"),
email_identity=email_identity,
harbor_admin_password=harbor_admin_password,
project_creation_restriction=proj_cre_restriction,
verify_remote_cert=verify_remote_cert,
max_job_workers=max_job_workers,
ui_secret=ui_secret,
jobservice_secret=jobservice_secret,

View File

@ -111,10 +111,6 @@ var (
env: "MAX_JOB_WORKERS",
parse: parseStringToInt,
},
common.VerifyRemoteCert: &parser{
env: "VERIFY_REMOTE_CERT",
parse: parseStringToBool,
},
common.ProjectCreationRestriction: "PROJECT_CREATION_RESTRICTION",
common.AdminInitialPassword: "HARBOR_ADMIN_PASSWORD",
common.AdmiralEndpoint: "ADMIRAL_URL",

View File

@ -57,7 +57,6 @@ const (
EmailIdentity = "email_identity"
EmailInsecure = "email_insecure"
ProjectCreationRestriction = "project_creation_restriction"
VerifyRemoteCert = "verify_remote_cert"
MaxJobWorkers = "max_job_workers"
TokenExpiration = "token_expiration"
CfgExpiration = "cfg_expiration"

View File

@ -76,7 +76,7 @@ func DeleteRepTarget(id int64) error {
func UpdateRepTarget(target models.RepTarget) error {
o := GetOrmer()
target.UpdateTime = time.Now()
_, err := o.Update(&target, "URL", "Name", "Username", "Password", "UpdateTime")
_, err := o.Update(&target, "URL", "Name", "Username", "Password", "Insecure", "UpdateTime")
return err
}

View File

@ -105,6 +105,7 @@ type RepTarget struct {
Username string `orm:"column(username)" json:"username"`
Password string `orm:"column(password)" json:"password"`
Type int `orm:"column(target_type)" json:"type"`
Insecure bool `orm:"column(insecure)" json:"insecure"`
CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"`
UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"`
}

View File

@ -0,0 +1,73 @@
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clair
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/vmware/harbor/src/common/models"
"github.com/vmware/harbor/src/common/utils/clair/test"
)
var (
notificationID = "ec45ec87-bfc8-4129-a1c3-d2b82622175a"
layerName = "03adedf41d4e0ea1b2458546a5b4717bf5f24b23489b25589e20c692aaf84d19"
client *Client
)
func TestMain(m *testing.M) {
mockClairServer := test.NewMockServer()
defer mockClairServer.Close()
client = NewClient(mockClairServer.URL, nil)
rc := m.Run()
if rc != 0 {
os.Exit(rc)
}
}
func TestListNamespaces(t *testing.T) {
assert := assert.New(t)
ns, err := client.ListNamespaces()
assert.Nil(err)
assert.Equal(25, len(ns))
}
func TestNotifications(t *testing.T) {
assert := assert.New(t)
n, err := client.GetNotification(notificationID)
assert.Nil(err)
assert.Equal(notificationID, n.Name)
_, err = client.GetNotification("noexist")
assert.NotNil(err)
err = client.DeleteNotification(notificationID)
assert.Nil(err)
}
func TestLaysers(t *testing.T) {
assert := assert.New(t)
layer := models.ClairLayer{
Name: "fakelayer",
ParentName: "parent",
Path: "http://registry:5000/layers/xxx",
}
err := client.ScanLayer(layer)
assert.Nil(err)
data, err := client.GetResult(layerName)
assert.Nil(err)
assert.Equal(layerName, data.Layer.Name)
_, err = client.GetResult("notexist")
assert.NotNil(err)
}

View File

@ -0,0 +1,62 @@
{
"Notification": {
"Name": "ec45ec87-bfc8-4129-a1c3-d2b82622175a",
"Created": "1456247389",
"Notified": "1456246708",
"Limit": 2,
"Page": "gAAAAABWzJaC2JCH6Apr_R1f2EkjGdibnrKOobTcYXBWl6t0Cw6Q04ENGIymB6XlZ3Zi0bYt2c-2cXe43fvsJ7ECZhZz4P8C8F9efr_SR0HPiejzQTuG0qAzeO8klogFfFjSz2peBvgP",
"NextPage": "gAAAAABWzJaCTyr6QXP2aYsCwEZfWIkU2GkNplSMlTOhLJfiR3LorBv8QYgEIgyOvZRmHQEzJKvkI6TP2PkRczBkcD17GE89btaaKMqEX14yHDgyfQvdasW1tj3-5bBRt0esKi9ym5En",
"New": {
"Vulnerability": {
"Name": "CVE-TEST",
"NamespaceName": "debian:8",
"Description": "New CVE",
"Severity": "Low",
"FixedIn": [
{
"Name": "grep",
"NamespaceName": "debian:8",
"Version": "2.25"
}
]
},
"OrderedLayersIntroducingVulnerability": [
{
"Index": 1,
"LayerName": "523ef1d23f222195488575f52a39c729c76a8c5630c9a194139cb246fb212da6"
},
{
"Index": 2,
"LayerName": "3b59c795b34670618fbcace4dac7a27c5ecec156812c9e2c90d3f4be1916b12d"
}
],
"LayersIntroducingVulnerability": [
"523ef1d23f222195488575f52a39c729c76a8c5630c9a194139cb246fb212da6",
"3b59c795b34670618fbcace4dac7a27c5ecec156812c9e2c90d182371916b12d"
]
},
"Old": {
"Vulnerability": {
"Name": "CVE-TEST",
"NamespaceName": "debian:8",
"Description": "New CVE",
"Severity": "Low",
"FixedIn": []
},
"OrderedLayersIntroducingVulnerability": [
{
"Index": 1,
"LayerName": "523ef1d23f222195488575f52a39c729c76a8c5630c9a194139cb246fb212da6"
},
{
"Index": 2,
"LayerName": "3b59c795b34670618fbcace4dac7a27c5ecec156812c9e2c90d3f4be1916b12d"
}
],
"LayersIntroducingVulnerability": [
"3b59c795b34670618fbcace4dac7a27c5ecec156812c9e2c90d3f4be1916b12d",
"523ef1d23f222195488575f52a39c729c76a8c5630c9a194139cb246fb212da6"
]
}
}
}

View File

@ -0,0 +1 @@
{"Namespaces":[{"Name":"debian:7","VersionFormat":"dpkg"},{"Name":"debian:unstable","VersionFormat":"dpkg"},{"Name":"debian:9","VersionFormat":"dpkg"},{"Name":"debian:10","VersionFormat":"dpkg"},{"Name":"debian:8","VersionFormat":"dpkg"},{"Name":"alpine:v3.6","VersionFormat":"dpkg"},{"Name":"alpine:v3.5","VersionFormat":"dpkg"},{"Name":"alpine:v3.4","VersionFormat":"dpkg"},{"Name":"alpine:v3.3","VersionFormat":"dpkg"},{"Name":"oracle:6","VersionFormat":"rpm"},{"Name":"oracle:7","VersionFormat":"rpm"},{"Name":"oracle:5","VersionFormat":"rpm"},{"Name":"ubuntu:14.04","VersionFormat":"dpkg"},{"Name":"ubuntu:15.10","VersionFormat":"dpkg"},{"Name":"ubuntu:17.04","VersionFormat":"dpkg"},{"Name":"ubuntu:16.04","VersionFormat":"dpkg"},{"Name":"ubuntu:12.04","VersionFormat":"dpkg"},{"Name":"ubuntu:13.04","VersionFormat":"dpkg"},{"Name":"ubuntu:14.10","VersionFormat":"dpkg"},{"Name":"ubuntu:12.10","VersionFormat":"dpkg"},{"Name":"ubuntu:16.10","VersionFormat":"dpkg"},{"Name":"ubuntu:15.04","VersionFormat":"dpkg"},{"Name":"centos:6","VersionFormat":"rpm"},{"Name":"centos:7","VersionFormat":"rpm"},{"Name":"centos:5","VersionFormat":"rpm"}]}

View File

@ -0,0 +1,117 @@
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"path"
"runtime"
"strings"
"github.com/vmware/harbor/src/common/models"
"github.com/vmware/harbor/src/common/utils/log"
)
func currPath() string {
_, f, _, ok := runtime.Caller(0)
if !ok {
panic("Failed to get current directory")
}
return path.Dir(f)
}
func serveFile(rw http.ResponseWriter, p string) {
data, err := ioutil.ReadFile(p)
if err != nil {
http.Error(rw, err.Error(), 500)
}
_, err2 := rw.Write(data)
if err2 != nil {
http.Error(rw, err2.Error(), 500)
}
}
type notificationHandler struct {
id string
}
func (n *notificationHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
suffix := strings.TrimPrefix(req.URL.Path, "/v1/notifications/")
if req.Method == http.MethodDelete {
rw.WriteHeader(200)
} else if req.Method == http.MethodGet {
if strings.HasPrefix(suffix, n.id) {
serveFile(rw, path.Join(currPath(), "notification.json"))
} else {
rw.WriteHeader(404)
}
} else {
rw.WriteHeader(http.StatusMethodNotAllowed)
}
}
type layerHandler struct {
name string
}
func (l *layerHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
if req.Method == http.MethodPost {
data, err := ioutil.ReadAll(req.Body)
defer req.Body.Close()
if err != nil {
http.Error(rw, err.Error(), 500)
}
layer := &models.ClairLayerEnvelope{}
if err := json.Unmarshal(data, layer); err != nil {
http.Error(rw, err.Error(), http.StatusBadRequest)
}
rw.WriteHeader(http.StatusCreated)
} else if req.Method == http.MethodGet {
name := strings.TrimPrefix(req.URL.Path, "/v1/layers/")
if name == l.name {
serveFile(rw, path.Join(currPath(), "total-12.json"))
} else {
http.Error(rw, fmt.Sprintf("Invalid layer name: %s", name), http.StatusNotFound)
}
} else {
http.Error(rw, "", http.StatusMethodNotAllowed)
}
}
// NewMockServer ...
func NewMockServer() *httptest.Server {
mux := http.NewServeMux()
mux.HandleFunc("/v1/namespaces", func(rw http.ResponseWriter, req *http.Request) {
if req.Method == http.MethodGet {
serveFile(rw, path.Join(currPath(), "ns.json"))
} else {
rw.WriteHeader(http.StatusMethodNotAllowed)
}
})
mux.Handle("/v1/notifications/", &notificationHandler{id: "ec45ec87-bfc8-4129-a1c3-d2b82622175a"})
mux.Handle("/v1/layers", &layerHandler{name: "03adedf41d4e0ea1b2458546a5b4717bf5f24b23489b25589e20c692aaf84d19"})
mux.Handle("/v1/layers/", &layerHandler{name: "03adedf41d4e0ea1b2458546a5b4717bf5f24b23489b25589e20c692aaf84d19"})
mux.HandleFunc("/", func(rw http.ResponseWriter, req *http.Request) {
log.Infof("method: %s, path: %s", req.Method, req.URL.Path)
rw.WriteHeader(http.StatusNotFound)
},
)
return httptest.NewServer(mux)
}

View File

@ -53,7 +53,6 @@ var adminServerDefaultConfig = map[string]interface{}{
common.EmailInsecure: false,
common.EmailIdentity: "",
common.ProjectCreationRestriction: common.ProCrtRestrAdmOnly,
common.VerifyRemoteCert: false,
common.MaxJobWorkers: 3,
common.TokenExpiration: 30,
common.CfgExpiration: 5,

View File

@ -74,15 +74,6 @@ func initKeyProvider() {
keyProvider = comcfg.NewFileKeyProvider(path)
}
// VerifyRemoteCert returns bool value.
func VerifyRemoteCert() (bool, error) {
cfg, err := mg.Get()
if err != nil {
return true, err
}
return cfg[common.VerifyRemoteCert].(bool), nil
}
// Database ...
func Database() (*models.Database, error) {
cfg, err := mg.Get()

View File

@ -49,10 +49,6 @@ func TestConfig(t *testing.T) {
t.Fatalf("failed to initialize configurations: %v", err)
}
if _, err := VerifyRemoteCert(); err != nil {
t.Fatalf("failed to get verify remote cert: %v", err)
}
if _, err := Database(); err != nil {
t.Fatalf("failed to get database settings: %v", err)
}

View File

@ -106,7 +106,7 @@ func TestRepJob(t *testing.T) {
j, err := dao.GetRepJob(repJobID)
assert.Equal(models.JobRetrying, j.Status)
assert.Equal(1, rj.parm.Enabled)
assert.True(rj.parm.Insecure)
assert.False(rj.parm.Insecure)
rj2 := NewRepJob(99999)
err = rj2.Init()
assert.NotNil(err)

View File

@ -120,17 +120,12 @@ func (rj *RepJob) Init() error {
if err != nil {
return err
}
verify, err := config.VerifyRemoteCert()
if err != nil {
return err
}
rj.parm = &RepJobParm{
LocalRegURL: regURL,
Repository: job.Repository,
Tags: job.TagList,
Enabled: policy.Enabled,
Operation: job.Operation,
Insecure: !verify,
}
if policy.Enabled == 0 {
//worker will cancel this job
@ -159,6 +154,7 @@ func (rj *RepJob) Init() error {
}
rj.parm.TargetPassword = pwd
rj.parm.Insecure = target.Insecure
return nil
}

View File

@ -49,7 +49,6 @@ var (
common.EmailIdentity,
common.EmailInsecure,
common.ProjectCreationRestriction,
common.VerifyRemoteCert,
common.TokenExpiration,
common.ScanAllPolicy,
}
@ -81,7 +80,6 @@ var (
common.EmailSSL,
common.EmailInsecure,
common.SelfRegistration,
common.VerifyRemoteCert,
}
passwordKeys = []string{

View File

@ -61,7 +61,7 @@ func TestPutConfig(t *testing.T) {
apiTest := newHarborAPI()
cfg := map[string]interface{}{
common.VerifyRemoteCert: false,
common.TokenExpiration: 60,
}
code, err := apiTest.PutConfig(*admin, cfg)
@ -104,13 +104,13 @@ func TestResetConfig(t *testing.T) {
return
}
value, ok := cfgs[common.VerifyRemoteCert]
value, ok := cfgs[common.TokenExpiration]
if !ok {
t.Errorf("%s not found", common.VerifyRemoteCert)
t.Errorf("%s not found", common.TokenExpiration)
return
}
assert.Equal(value.Value.(bool), true, "unexpected value")
assert.Equal(int(value.Value.(float64)), 30, "unexpected 30")
ccc, err := config.GetSystemCfg()
if err != nil {

View File

@ -58,13 +58,8 @@ func (t *TargetAPI) Prepare() {
}
}
func (t *TargetAPI) ping(endpoint, username, password string) {
verify, err := config.VerifyRemoteCert()
if err != nil {
log.Errorf("failed to check whether insecure or not: %v", err)
t.CustomAbort(http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError))
}
registry, err := newRegistryClient(endpoint, !verify, username, password)
func (t *TargetAPI) ping(endpoint, username, password string, insecure bool) {
registry, err := newRegistryClient(endpoint, insecure, username, password)
if err != nil {
// timeout, dns resolve error, connection refused, etc.
if urlErr, ok := err.(*url.Error); ok {
@ -105,6 +100,7 @@ func (t *TargetAPI) PingByID() {
endpoint := target.URL
username := target.Username
password := target.Password
insecure := target.Insecure
if len(password) != 0 {
password, err = utils.ReversibleDecrypt(password, t.secretKey)
if err != nil {
@ -112,7 +108,7 @@ func (t *TargetAPI) PingByID() {
t.CustomAbort(http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError))
}
}
t.ping(endpoint, username, password)
t.ping(endpoint, username, password, insecure)
}
// Ping validates whether the target is reachable and whether the credential is valid
@ -121,6 +117,7 @@ func (t *TargetAPI) Ping() {
Endpoint string `json:"endpoint"`
Username string `json:"username"`
Password string `json:"password"`
Insecure bool `json:"insecure"`
}{}
t.DecodeJSONReq(&req)
@ -128,7 +125,7 @@ func (t *TargetAPI) Ping() {
t.CustomAbort(http.StatusBadRequest, "endpoint is required")
}
t.ping(req.Endpoint, req.Username, req.Password)
t.ping(req.Endpoint, req.Username, req.Password, req.Insecure)
}
// Get ...
@ -255,6 +252,7 @@ func (t *TargetAPI) Put() {
Endpoint *string `json:"endpoint"`
Username *string `json:"username"`
Password *string `json:"password"`
Insecure *bool `json:"insecure"`
}{}
t.DecodeJSONReq(&req)
@ -273,6 +271,9 @@ func (t *TargetAPI) Put() {
if req.Password != nil {
target.Password = *req.Password
}
if req.Insecure != nil {
target.Insecure = *req.Insecure
}
t.Validate(target)

View File

@ -276,15 +276,6 @@ func OnlyAdminCreateProject() (bool, error) {
return cfg[common.ProjectCreationRestriction].(string) == common.ProCrtRestrAdmOnly, nil
}
// VerifyRemoteCert returns bool value.
func VerifyRemoteCert() (bool, error) {
cfg, err := mg.Get()
if err != nil {
return true, err
}
return cfg[common.VerifyRemoteCert].(bool), nil
}
// Email returns email server settings
func Email() (*models.Email, error) {
cfg, err := mg.Get()

View File

@ -108,10 +108,6 @@ func TestConfig(t *testing.T) {
t.Fatalf("failed to get onldy admin create project: %v", err)
}
if _, err := VerifyRemoteCert(); err != nil {
t.Fatalf("failed to get verify remote cert: %v", err)
}
if _, err := Email(); err != nil {
t.Fatalf("failed to get email settings: %v", err)
}

View File

@ -38,6 +38,10 @@ export const CREATE_EDIT_ENDPOINT_TEMPLATE: string = `
<label for="destination_password" class="col-md-4 form-group-label-override">{{ 'DESTINATION.PASSWORD' | translate }}</label>
<input type="password" class="col-md-8" id="destination_password" [disabled]="testOngoing" [readonly]="!editable" [(ngModel)]="target.password" size="20" name="password" #password="ngModel" (focus)="clearPassword($event)">
</div>
<div class="form-group">
<label for="destination_insecure" class="col-md-4 form-group-label-override">{{'CONFIG.VERIFY_REMOTE_CERT' | translate }}</label>
<clr-checkbox #insecure class="col-md-8" name="insecure" id="destination_insecure" [(ngModel)]="target.insecure"></clr-checkbox>
</div>
<div class="form-group">
<label for="spin" class="col-md-4"></label>
<span class="col-md-8 spinner spinner-inline" [hidden]="!inProgress"></span>

View File

@ -21,6 +21,7 @@ describe('CreateEditEndpointComponent (inline template)', () => {
"name": "target_01",
"username": "admin",
"password": "",
"insecure": false,
"type": 0
};

View File

@ -91,7 +91,8 @@ export class CreateEditEndpointComponent implements AfterViewChecked, OnDestroy
(this.target.endpoint && this.target.endpoint.trim() !== "") ||
(this.target.name && this.target.name.trim() !== "") ||
(this.target.username && this.target.username.trim() !== "") ||
(this.target.password && this.target.password.trim() !== ""));
(this.target.password && this.target.password.trim() !== "")) ||
this.target.insecure;
} else {
//Edit
return !compareValue(this.target, this.initVal);
@ -104,26 +105,29 @@ export class CreateEditEndpointComponent implements AfterViewChecked, OnDestroy
this.targetForm &&
this.targetForm.valid &&
this.editable &&
(this.targetNameHasChanged || this.endpointHasChanged);
(this.targetNameHasChanged || this.endpointHasChanged || this.checkboxHasChanged);
}
public get inProgress(): boolean {
return this.onGoing || this.testOngoing;
}
public get checkboxHasChanged(): boolean {
return (this.target.insecure !== this.initVal.insecure) ? true : false;
}
ngOnDestroy(): void {
if (this.valueChangesSub) {
this.valueChangesSub.unsubscribe();
}
}
initEndpoint(): Endpoint {
return {
endpoint: "",
name: "",
username: "",
password: "",
insecure: false,
type: 0
};
}
@ -275,20 +279,28 @@ export class CreateEditEndpointComponent implements AfterViewChecked, OnDestroy
if (this.onGoing) {
return;//Avoid duplicated submitting
}
if (!(this.targetNameHasChanged || this.endpointHasChanged)) {
if (!(this.targetNameHasChanged || this.endpointHasChanged || this.checkboxHasChanged)) {
return;//Avoid invalid submitting
}
let payload: Endpoint = this.initEndpoint();
if (this.targetNameHasChanged) {
payload.name = this.target.name;
delete payload.endpoint;
}else {
delete payload.name;
}
if (this.endpointHasChanged) {
payload.endpoint = this.target.endpoint;
payload.username = this.target.username;
payload.password = this.target.password;
delete payload.name;
}else {
delete payload.endpoint;
}
if (this.checkboxHasChanged) {
payload.insecure = this.target.insecure;
}else {
delete payload.insecure;
}
if (!this.target.id) { return; }
this.onGoing = true;
@ -317,7 +329,7 @@ export class CreateEditEndpointComponent implements AfterViewChecked, OnDestroy
handleErrorMessageKey(status: number): string {
switch (status) {
case 409: this
case 409:
return 'DESTINATION.CONFLICT_NAME';
case 400:
return 'DESTINATION.INVALID_NAME';
@ -356,7 +368,7 @@ export class CreateEditEndpointComponent implements AfterViewChecked, OnDestroy
keyNumber++;
}
}
if (keyNumber !== 4) {
if (keyNumber !== 5) {
return;
}

View File

@ -110,6 +110,7 @@ describe('CreateEditRuleComponent (inline template)', ()=>{
"name": "target_01",
"username": "admin",
"password": "",
"insecure": false,
"type": 0
},
{
@ -118,6 +119,7 @@ describe('CreateEditRuleComponent (inline template)', ()=>{
"name": "target_02",
"username": "AAA",
"password": "",
"insecure": false,
"type": 0
},
{
@ -126,6 +128,7 @@ describe('CreateEditRuleComponent (inline template)', ()=>{
"name": "target_03",
"username": "admin",
"password": "",
"insecure": false,
"type": 0
},
{
@ -134,6 +137,7 @@ describe('CreateEditRuleComponent (inline template)', ()=>{
"name": "target_04",
"username": "",
"password": "",
"insecure": true,
"type": 0
}
];

View File

@ -123,6 +123,7 @@ export class CreateEditRuleComponent implements AfterViewChecked {
name: '',
username: '',
password: '',
insecure: false,
type: 0
};
}

View File

@ -19,6 +19,7 @@ export const ENDPOINT_TEMPLATE: string = `
<clr-datagrid [clrDgLoading]="loading">
<clr-dg-column [clrDgField]="'name'">{{'DESTINATION.NAME' | translate}}</clr-dg-column>
<clr-dg-column [clrDgField]="'endpoint'">{{'DESTINATION.URL' | translate}}</clr-dg-column>
<clr-dg-column [clrDgField]="'insecure'">{{'CONFIG.VERIFY_REMOTE_CERT' | translate }}</clr-dg-column>
<clr-dg-column [clrDgSortBy]="creationTimeComparator">{{'DESTINATION.CREATION_TIME' | translate}}</clr-dg-column>
<clr-dg-placeholder>{{'DESTINATION.PLACEHOLDER' | translate }}</clr-dg-placeholder>
<clr-dg-row *clrDgItems="let t of targets" [clrDgItem]='t'>
@ -28,6 +29,9 @@ export const ENDPOINT_TEMPLATE: string = `
</clr-dg-action-overflow>
<clr-dg-cell>{{t.name}}</clr-dg-cell>
<clr-dg-cell>{{t.endpoint}}</clr-dg-cell>
<clr-dg-cell>
<clr-checkbox name="insecure" [clrChecked]="t.insecure"> </clr-checkbox>
</clr-dg-cell>
<clr-dg-cell>{{t.creation_time | date: 'short'}}</clr-dg-cell>
</clr-dg-row>
<clr-dg-footer>

View File

@ -25,6 +25,7 @@ describe('EndpointComponent (inline template)', () => {
"name": "target_01",
"username": "admin",
"password": "",
"insecure": true,
"type": 0
},
{
@ -33,6 +34,7 @@ describe('EndpointComponent (inline template)', () => {
"name": "target_02",
"username": "AAA",
"password": "",
"insecure": false,
"type": 0
},
{
@ -41,6 +43,7 @@ describe('EndpointComponent (inline template)', () => {
"name": "target_03",
"username": "admin",
"password": "",
"insecure": false,
"type": 0
},
{
@ -49,6 +52,7 @@ describe('EndpointComponent (inline template)', () => {
"name": "target_04",
"username": "",
"password": "",
"insecure": false,
"type": 0
}
];
@ -59,6 +63,7 @@ describe('EndpointComponent (inline template)', () => {
"name": "target_01",
"username": "admin",
"password": "",
"insecure": false,
"type": 0
};

View File

@ -69,6 +69,7 @@ export class EndpointComponent implements OnInit {
name: "",
username: "",
password: "",
insecure: false,
type: 0
};
}

View File

@ -107,6 +107,7 @@ describe('Replication Component (inline template)', ()=>{
"name": "target_01",
"username": "admin",
"password": "",
"insecure": false,
"type": 0
},
{
@ -115,6 +116,7 @@ describe('Replication Component (inline template)', ()=>{
"name": "target_02",
"username": "AAA",
"password": "",
"insecure": false,
"type": 0
},
{
@ -123,6 +125,7 @@ describe('Replication Component (inline template)', ()=>{
"name": "target_03",
"username": "admin",
"password": "",
"insecure": false,
"type": 0
},
{
@ -131,6 +134,7 @@ describe('Replication Component (inline template)', ()=>{
"name": "target_04",
"username": "",
"password": "",
"insecure": false,
"type": 0
}
];

View File

@ -73,6 +73,7 @@ export interface Endpoint extends Base {
name: string;
username?: string;
password?: string;
insecure: boolean;
type: number;
}

View File

@ -31,7 +31,7 @@
"clarity-icons": "^0.9.8",
"clarity-ui": "^0.9.8",
"core-js": "^2.4.1",
"harbor-ui": "0.4.83",
"harbor-ui": "0.4.85",
"intl": "^1.2.5",
"mutationobserver-shim": "^0.3.2",
"ngx-cookie": "^1.0.0",

View File

@ -6,9 +6,6 @@
<li role="presentation" class="nav-item">
<button id="config-auth" class="btn btn-link nav-link active" aria-controls="authentication" [class.active]='isCurrentTabLink("config-auth")' type="button" (click)='tabLinkClick("config-auth")'>{{'CONFIG.AUTH' | translate }}</button>
</li>
<li role="presentation" class="nav-item">
<button id="config-replication" class="btn btn-link nav-link" aria-controls="replication" [class.active]='isCurrentTabLink("config-replication")' type="button" (click)='tabLinkClick("config-replication")'>{{'CONFIG.REPLICATION' | translate }}</button>
</li>
<li role="presentation" class="nav-item">
<button id="config-email" class="btn btn-link nav-link" aria-controls="email" [class.active]='isCurrentTabLink("config-email")' type="button" (click)='tabLinkClick("config-email")'>{{'CONFIG.EMAIL' | translate }}</button>
</li>
@ -22,9 +19,6 @@
<section id="authentication" role="tabpanel" aria-labelledby="config-auth" [hidden]='!isCurrentTabContent("authentication")'>
<config-auth [ldapConfig]="allConfig"></config-auth>
</section>
<section id="replication" role="tabpanel" aria-labelledby="config-replication" [hidden]='!isCurrentTabContent("replication")'>
<replication-config [(replicationConfig)]="allConfig"></replication-config>
</section>
<section id="email" role="tabpanel" aria-labelledby="config-email" [hidden]='!isCurrentTabContent("email")'>
<config-email [mailConfig]="allConfig"></config-email>
</section>

View File

@ -30,7 +30,6 @@ import {
Configuration,
StringValueItem,
ComplexValueItem,
ReplicationConfigComponent,
SystemSettingsComponent,
VulnerabilityConfigComponent,
ClairDBStatus
@ -59,7 +58,6 @@ export class ConfigurationComponent implements OnInit, OnDestroy {
testingMailOnGoing: boolean = false;
testingLDAPOnGoing: boolean = false;
@ViewChild(ReplicationConfigComponent) replicationConfig: ReplicationConfigComponent;
@ViewChild(SystemSettingsComponent) systemSettingsConfig: SystemSettingsComponent;
@ViewChild(VulnerabilityConfigComponent) vulnerabilityConfig: VulnerabilityConfigComponent;
@ViewChild(ConfigurationEmailComponent) mailConfig: ConfigurationEmailComponent;
@ -170,9 +168,7 @@ export class ConfigurationComponent implements OnInit, OnDestroy {
}
public isValid(): boolean {
return this.replicationConfig &&
this.replicationConfig.isValid &&
this.systemSettingsConfig &&
return this.systemSettingsConfig &&
this.systemSettingsConfig.isValid &&
this.mailConfig &&
this.mailConfig.isValid() &&

View File

@ -57,19 +57,25 @@ if [[ $DRONE_BRANCH == "master" || $DRONE_BRANCH == *"refs/tags"* || $DRONE_BRAN
echo "Package Harbor build."
pybot --removekeywords TAG:secret --include Bundle tests/robot-cases/Group0-Distro-Harbor
echo "Running full CI for $DRONE_BUILD_EVENT on $DRONE_BRANCH"
pybot -v ip:$container_ip --removekeywords TAG:secret --include BAT tests/robot-cases/Group0-BAT
upload_latest_build=true
pybot -v ip:$container_ip --removekeywords TAG:secret --include BAT tests/robot-cases/Group0-BAT
elif (echo $buildinfo | grep -q "\[Specific CI="); then
buildtype=$(echo $buildinfo | grep "\[Specific CI=")
testsuite=$(echo $buildtype | awk -v FS="(=|])" '{print $2}')
pybot -v ip:$container_ip --removekeywords TAG:secret --suite $testsuite --suite Regression tests/robot-cases
elif (echo $buildinfo | grep -q "\[Full CI\]"); then
upload_build=true
pybot -v ip:$container_ip --removekeywords TAG:secret --exclude skip tests/robot-cases
elif (echo $buildinfo | grep -q "\[Skip CI\]"); then
echo "Skip CI."
elif (echo $buildinfo | grep -q "\[Upload Build\]"); then
upload_latest_build=true
upload_build=true
echo "Package Harbor build."
pybot --removekeywords TAG:secret --include Bundle tests/robot-cases/Group0-Distro-Harbor
echo "Running full CI for $DRONE_BUILD_EVENT on $DRONE_BRANCH"
pybot -v ip:$container_ip --removekeywords TAG:secret --include BAT tests/robot-cases/Group0-BAT
else
# default mode is BAT.
# default mode is BAT.
pybot -v ip:$container_ip --removekeywords TAG:secret --include BAT tests/robot-cases/Group0-BAT
fi
@ -90,6 +96,13 @@ else
echo "No log output file to upload"
fi
## --------------------------------------------- Upload Harbor Build File ---------------------------------------
if [ $upload_build == true ] && [ $rc -eq 0 ]; then
harbor_build_bundle=$(basename harbor-offline-installer-*.tgz)
gsutil cp $harbor_build_bundle gs://harbor-builds
gsutil -D setacl public-read gs://harbor-builds/$harbor_build_bundle &> /dev/null
fi
## --------------------------------------------- Upload Harbor Latest Build File ---------------------------------------
if [ $upload_latest_build == true ] && [ $rc -eq 0 ]; then
echo "update latest build file."

View File

@ -102,7 +102,7 @@ Project Creation Should Not Display
Switch To System Settings
Sleep 1
Click Element xpath=//clr-main-container//nav//ul/li[3]
Click Element xpath=//config//ul/li[4]
Click Element xpath=//*[@id="config-system"]
Modify Token Expiration
[Arguments] ${minutes}

View File

@ -176,23 +176,6 @@ Test Case - Edit Self-Registration
Enable Self Reg
Close Browser
Test Case - Edit Verify Remote Cert
Init Chrome Driver
Sign In Harbor ${HARBOR_URL} %{HARBOR_ADMIN} %{HARBOR_PASSWORD}
Switch To System Replication
Check Verify Remote Cert
Logout Harbor
Sign In Harbor ${HARBOR_URL} %{HARBOR_ADMIN} %{HARBOR_PASSWORD}
Switch To System Replication
Should Verify Remote Cert Be Enabled
#restore setting
Check Verify Remote Cert
Close Browser
Test Case - Edit Email Settings
Init Chrome Driver
Sign In Harbor ${HARBOR_URL} %{HARBOR_ADMIN} %{HARBOR_PASSWORD}

View File

@ -1,5 +1,4 @@
#!/bin/sh
set -e
if docker ps --filter "status=restarting" | grep 'vmware'; then

View File

@ -1,16 +1,11 @@
FROM mysql:5.6
FROM vmware/mariadb-photon:10.2.8
MAINTAINER bhe@vmware.com
RUN sed -i -e 's/us.archive.ubuntu.com/archive.ubuntu.com/g' /etc/apt/sources.list
RUN apt-get update
RUN apt-get install -y curl python python-pip git python-mysqldb
RUN pip install alembic
RUN mkdir -p /harbor-migration
RUN tdnf distro-sync || echo \
&& tdnf install -y mariadb-devel python2 python2-devel python-pip gcc\
linux-api-headers glibc-devel binutils zlib-devel openssl-devel \
&& pip install mysqlclient alembic \
&& tdnf clean all \
&& mkdir -p /harbor-migration
WORKDIR /harbor-migration

View File

@ -30,7 +30,7 @@ script_location = migration_harbor
# are written from script.py.mako
# output_encoding = utf-8
sqlalchemy.url = mysql://$DB_USR:$DB_PWD@localhost:3306/registry
sqlalchemy.url = mysql://$DB_USR:$DB_PWD@localhost:3306/registry?unix_socket=/var/run/mysqld/mysqld.sock
# Logging configuration
[loggers]

View File

@ -54,4 +54,5 @@ Changelog for harbor database schema
- create table `project_metadata`
- insert data into table `project_metadata`
- delete column `public` from table `project`
- delete column `public` from table `project`
- add column `insecure` to table `replication_target`

View File

@ -8,8 +8,6 @@ fi
source ./alembic.tpl > ./alembic.ini
WAITTIME=60
DBCNF="-hlocalhost -u${DB_USR}"
#prevent shell to print insecure message
@ -44,24 +42,21 @@ fi
echo 'Trying to start mysql server...'
DBRUN=0
mysqld &
for i in $(seq 1 $WAITTIME); do
echo "$(/usr/sbin/service mysql status)"
if [[ "$(/usr/sbin/service mysql status)" =~ "not running" ]]; then
sleep 1
else
DBRUN=1
for i in {60..0}; do
mysqladmin -u$DB_USR -p$DB_PWD processlist >/dev/null 2>&1
if [ $? = 0 ]; then
break
fi
echo 'Waiting for MySQL start...'
sleep 1
done
if [[ $DBRUN -eq 0 ]]; then
if [ "$i" = 0 ]; then
echo "timeout. Can't run mysql server."
if [[ $1 = "test" ]]; then
echo "test failed."
fi
exit 1
fi
if [[ $1 = "test" ]]; then
echo "test passed."
exit 0