Merge remote-tracking branch 'upstream/master' into labelFilter

This commit is contained in:
pfh 2018-05-02 14:14:15 +08:00
commit 08f750b04d
114 changed files with 1058 additions and 667 deletions

View File

@ -13,11 +13,11 @@ services:
dist: trusty
env:
MYSQL_HOST: localhost
MYSQL_PORT: 3306
MYSQL_USR: root
MYSQL_PWD: root123
MYSQL_DATABASE: registry
POSTGRESQL_HOST: localhost
POSTGRESQL_PORT: 5432
POSTGRESQL_USR: postgres
POSTGRESQL_PWD: root123
POSTGRESQL_DATABASE: registry
SQLITE_FILE: /tmp/registry.db
ADMINSERVER_URL: http://127.0.0.1:8888
DOCKER_COMPOSE_VERSION: 1.7.1
@ -53,7 +53,7 @@ install:
# - mysql --version
- go get -d github.com/docker/distribution
- go get -d github.com/docker/libtrust
- go get -d github.com/go-sql-driver/mysql
- go get -d github.com/lib/pq
- go get github.com/golang/lint/golint
- go get github.com/GeertJohan/fgt
@ -80,12 +80,11 @@ script:
- sudo mv ./tests/ca.crt /etc/ui/ca/
- sudo mkdir -p /harbor
- sudo mv ./VERSION /harbor/UIVERSION
- sudo service mysql stop
- sudo service postgresql stop
- sudo make run_clarity_ut CLARITYIMAGE=vmware/harbor-clarity-ui-builder:1.4.0
- cat ./src/ui_ng/lib/npm-ut-test-results
- sudo ./tests/testprepare.sh
- sudo make -f make/photon/Makefile -e MARIADBVERSION=10.2.10 -e VERSIONTAG=dev
- sudo make -f make/photon/Makefile _build_registry -e REGISTRYVERSION=v2.6.2 -e VERSIONTAG=dev
- sudo make -f make/photon/Makefile _build_postgresql _build_db _build_registry -e VERSIONTAG=dev -e CLAIRDBVERSION=dev -e REGISTRYVERSION=v2.6.2
- sudo sed -i 's/__reg_version__/v2.6.2-dev/g' ./make/docker-compose.test.yml
- sudo sed -i 's/__version__/dev/g' ./make/docker-compose.test.yml
- sudo mkdir -p ./make/common/config/registry/
@ -93,7 +92,7 @@ script:
- sudo docker-compose -f ./make/docker-compose.test.yml up -d
- go list ./... | grep -v -E 'vendor|tests|test' | xargs -L1 fgt golint
- go list ./... | grep -v -E 'vendor|tests|test' | xargs -L1 go vet
- export MYSQL_HOST=$IP
- export POSTGRESQL_HOST=$IP
- export REGISTRY_URL=$IP:5000
- echo $REGISTRY_URL
- ./tests/pushimage.sh

View File

@ -226,14 +226,13 @@ PACKAGE_ONLINE_PARA=-zcvf harbor-online-installer-$(PKGVERSIONTAG).tgz \
DOCKERCOMPOSE_LIST=-f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
ifeq ($(NOTARYFLAG), true)
DOCKERSAVE_PARA+= vmware/notary-server-photon:$(NOTARYVERSION)-$(VERSIONTAG) vmware/notary-signer-photon:$(NOTARYVERSION)-$(VERSIONTAG) \
vmware/mariadb-photon:$(MARIADBVERSION)
DOCKERSAVE_PARA+= vmware/notary-server-photon:$(NOTARYVERSION)-$(VERSIONTAG) vmware/notary-signer-photon:$(NOTARYVERSION)-$(VERSIONTAG)
PACKAGE_OFFLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSENOTARYFILENAME)
PACKAGE_ONLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSENOTARYFILENAME)
DOCKERCOMPOSE_LIST+= -f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSENOTARYFILENAME)
endif
ifeq ($(CLAIRFLAG), true)
DOCKERSAVE_PARA+= vmware/clair-photon:$(CLAIRVERSION)-$(VERSIONTAG) vmware/postgresql-photon:$(CLAIRDBVERSION)
DOCKERSAVE_PARA+= vmware/clair-photon:$(CLAIRVERSION)-$(VERSIONTAG)
PACKAGE_OFFLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSECLAIRFILENAME)
PACKAGE_ONLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSECLAIRFILENAME)
DOCKERCOMPOSE_LIST+= -f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECLAIRFILENAME)
@ -292,6 +291,7 @@ modify_composefile: modify_composefile_notary modify_composefile_clair
@cp $(DOCKERCOMPOSEFILEPATH)/ha/$(DOCKERCOMPOSETPLFILENAME) $(DOCKERCOMPOSEFILEPATH)/ha/$(DOCKERCOMPOSEFILENAME)
@$(SEDCMD) -i 's/__version__/$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/ha/$(DOCKERCOMPOSEFILENAME)
@$(SEDCMD) -i 's/__version__/$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
@$(SEDCMD) -i 's/__postgresql_version__/$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
@$(SEDCMD) -i 's/__reg_version__/$(REGISTRYVERSION)-$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
@$(SEDCMD) -i 's/__reg_version__/$(REGISTRYVERSION)-$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/ha/$(DOCKERCOMPOSEFILENAME)
@$(SEDCMD) -i 's/__nginx_version__/$(NGINXVERSION)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)

View File

@ -54,7 +54,6 @@ data:
CLAIR_DB_PORT: "5432"
CLAIR_DB: "{{ .Values.clair.postgresDatabase }}"
CLAIR_DB_USERNAME: "{{ .Values.clair.postgresUser }}"
CLAIR_DB_PASSWORD: "{{ .Values.clair.postgresPassword }}"
UAA_ENDPOINT: ""
UAA_CLIENTID: ""
UAA_CLIENTSECRET: ""

View File

@ -13,7 +13,9 @@ data:
MYSQL_PWD: {{ .Values.mysql.pass | b64enc | quote }}
JOBSERVICE_SECRET: {{ .Values.jobservice.secret | b64enc | quote }}
UI_SECRET: {{ .Values.ui.secret | b64enc | quote }}
{{- if eq .Values.adminserver.authenticationMode "ldap_auth" }}
LDAP_SEARCH_PWD: {{ .Values.adminserver.ldap.searchPwd | b64enc | quote }}
{{- end }}
{{ if .Values.clair.enabled }}
CLAIR_DB_PASSWORD: {{ .Values.clair.postgresPassword | b64enc | quote }}
{{ end }}
#LDAP_SEARCH_PWD: not-a-secure-password

View File

@ -72,6 +72,7 @@ adminserver:
ldap:
url: "ldaps://ldapserver"
searchDN: ""
searchPassword: ""
baseDN: ""
filter: "(objectClass=person)"
uid: "uid"

View File

@ -513,7 +513,7 @@ paths:
'403':
description: User in session does not have permission to the project.
'404':
description: Project does not exist.
description: Project does not exist, or the username does not found, or the user group does not found.
'500':
description: Unexpected internal errors.
'/projects/{project_id}/members/{mid}':

View File

@ -12,16 +12,16 @@ LDAP_UID=$ldap_uid
LDAP_SCOPE=$ldap_scope
LDAP_TIMEOUT=$ldap_timeout
LDAP_VERIFY_CERT=$ldap_verify_cert
DATABASE_TYPE=postgresql
POSTGRESQL_HOST=$db_host
POSTGRESQL_PORT=$db_port
POSTGRESQL_USERNAME=$db_user
POSTGRESQL_PASSWORD=$db_password
POSTGRESQL_DATABASE=registry
LDAP_GROUP_BASEDN=$ldap_group_basedn
LDAP_GROUP_FILTER=$ldap_group_filter
LDAP_GROUP_GID=$ldap_group_gid
LDAP_GROUP_SCOPE=$ldap_group_scope
DATABASE_TYPE=mysql
MYSQL_HOST=$db_host
MYSQL_PORT=$db_port
MYSQL_USR=$db_user
MYSQL_PWD=$db_password
MYSQL_DATABASE=registry
REGISTRY_URL=$registry_url
TOKEN_SERVICE_URL=$token_service_url
EMAIL_HOST=$email_host

View File

@ -1 +1 @@
MYSQL_ROOT_PASSWORD=$db_password
POSTGRES_PASSWORD=$db_password

View File

@ -1,7 +0,0 @@
CREATE DATABASE IF NOT EXISTS `notaryserver`;
CREATE USER "server"@"notary-server.%" IDENTIFIED BY "";
GRANT
ALL PRIVILEGES ON `notaryserver`.*
TO "server"@"notary-server.%"

View File

@ -1,7 +0,0 @@
CREATE DATABASE IF NOT EXISTS `notarysigner`;
CREATE USER "signer"@"notary-signer.%" IDENTIFIED BY "";
GRANT
ALL PRIVILEGES ON `notarysigner`.*
TO "signer"@"notary-signer.%";

View File

@ -0,0 +1,28 @@
{
"server": {
"http_addr": ":4443"
},
"trust_service": {
"type": "remote",
"hostname": "notarysigner",
"port": "7899",
"tls_ca_file": "./notary-signer-ca.crt",
"key_algorithm": "ecdsa"
},
"logging": {
"level": "debug"
},
"storage": {
"backend": "postgres",
"db_url": "postgres://server:password@postgresql:5432/notaryserver?sslmode=disable"
},
"auth": {
"type": "token",
"options": {
"realm": "$token_endpoint/service/token",
"service": "harbor-notary",
"issuer": "harbor-token-issuer",
"rootcertbundle": "/etc/notary/root.crt"
}
}
}

View File

@ -0,0 +1,2 @@
MIGRATIONS_PATH=migrations/server/postgresql
DB_URL=postgres://server:password@postgresql:5432/notaryserver?sslmode=disable

View File

@ -0,0 +1,15 @@
{
"server": {
"grpc_addr": ":7899",
"tls_cert_file": "./notary-signer.crt",
"tls_key_file": "./notary-signer.key"
},
"logging": {
"level": "debug"
},
"storage": {
"backend": "postgres",
"db_url": "postgres://signer:password@postgresql:5432/notarysigner?sslmode=disable",
"default_alias":"defaultalias"
}
}

View File

@ -1,2 +1,3 @@
NOTARY_SIGNER_DEFAULTALIAS=$alias
MIGRATIONS_PATH=migrations/signer/postgresql
DB_URL=postgres://signer:password@postgresql:5432/notarysigner?sslmode=disable

View File

@ -11,26 +11,11 @@ services:
registry:
networks:
- harbor-clair
postgres:
postgresql:
networks:
harbor-clair:
aliases:
- postgres
container_name: clair-db
image: vmware/postgresql-photon:__postgresql_version__
restart: always
depends_on:
- log
env_file:
./common/config/clair/postgres_env
volumes:
- ./common/config/clair/postgresql-init.d/:/docker-entrypoint-initdb.d:z
- /data/clair-db:/var/lib/postgresql/data:z
logging:
driver: "syslog"
options:
syslog-address: "tcp://127.0.0.1:1514"
tag: "clair-db"
- harbor-db
clair:
networks:
- harbor-clair
@ -39,7 +24,7 @@ services:
restart: always
cpu_quota: 150000
depends_on:
- postgres
- postgresql
volumes:
- ./common/config/clair/config.yaml:/etc/clair/config.yaml:z
logging:

View File

@ -6,18 +6,24 @@ services:
proxy:
networks:
- harbor-notary
postgresql:
networks:
harbor-notary:
aliases:
- harbor-db
notary-server:
image: vmware/notary-server-photon:__notary_version__
container_name: notary-server
restart: always
networks:
- notary-mdb
- notary-sig
- harbor-notary
volumes:
- ./common/config/notary:/etc/notary:z
env_file:
- ./common/config/notary/server_env
depends_on:
- notary-db
- postgresql
- notary-signer
logging:
driver: "syslog"
@ -29,7 +35,7 @@ services:
container_name: notary-signer
restart: always
networks:
notary-mdb:
harbor-notary:
notary-sig:
aliases:
- notarysigner
@ -38,38 +44,14 @@ services:
env_file:
- ./common/config/notary/signer_env
depends_on:
- notary-db
- postgresql
logging:
driver: "syslog"
options:
syslog-address: "tcp://127.0.0.1:1514"
tag: "notary-signer"
notary-db:
image: vmware/mariadb-photon:__mariadb_version__
container_name: notary-db
restart: always
networks:
notary-mdb:
aliases:
- mysql
volumes:
- ./common/config/notary/mysql-initdb.d:/docker-entrypoint-initdb.d:z
- /data/notary-db:/var/lib/mysql:z
environment:
- TERM=dumb
- MYSQL_ALLOW_EMPTY_PASSWORD="true"
command: mysqld --innodb_file_per_table
depends_on:
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://127.0.0.1:1514"
tag: "notary-db"
networks:
harbor-notary:
external: false
notary-mdb:
external: false
notary-sig:
external: false

View File

@ -31,12 +31,12 @@ services:
options:
syslog-address: "tcp://127.0.0.1:1514"
tag: "registry"
mysql:
postgresql:
image: vmware/harbor-db:__version__
container_name: harbor-db
restart: always
volumes:
- /data/database:/var/lib/mysql:z
- /data/database:/var/lib/postgresql/data:z
networks:
- harbor
env_file:
@ -47,7 +47,7 @@ services:
driver: "syslog"
options:
syslog-address: "tcp://127.0.0.1:1514"
tag: "mysql"
tag: "postgresql"
adminserver:
image: vmware/harbor-adminserver:__version__
container_name: harbor-adminserver
@ -119,6 +119,13 @@ services:
- /data/redis:/data
networks:
- harbor
depends_on:
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://127.0.0.1:1514"
tag: "redis"
proxy:
image: vmware/nginx-photon:__nginx_version__
container_name: nginx
@ -132,7 +139,7 @@ services:
- 443:443
- 4443:4443
depends_on:
- mysql
- postgresql
- registry
- ui
- log

View File

@ -127,16 +127,16 @@ project_creation_restriction = everyone
#######Harbor DB configuration section#######
#The address of the Harbor database. Only need to change when using external db.
db_host = mysql
db_host = postgresql
#The password for the root user of Harbor DB. Change this before any production use.
db_password = root123
#The port of Harbor database host
db_port = 3306
db_port = 5432
#The user name of Harbor database
db_user = root
db_user = postgres
##### End of Harbor DB configuration#######
@ -147,11 +147,11 @@ redis_url = redis:6379
##########Clair DB configuration############
#Clair DB host address. Only change it when using an exteral DB.
clair_db_host = postgres
clair_db_host = postgresql
#The password of the Clair's postgres database. Only effective when Harbor is deployed with Clair.
#Please update it before deployment. Subsequent update will cause Clair's API server and Harbor unable to access Clair's database.
clair_db_password = password
clair_db_password = root123
#Clair DB connect port
clair_db_port = 5432

View File

@ -50,7 +50,7 @@ DOCKERFILEPATH_LOG=$(DOCKERFILEPATH)/log
DOCKERFILENAME_LOG=Dockerfile
DOCKERIMAGENAME_LOG=vmware/harbor-log
DOCKERFILEPATH_DB=$(DOCKERFILEPATH)/db
DOCKERFILEPATH_DB=$(DOCKERFILEPATH)/db/postgresql
DOCKERFILENAME_DB=Dockerfile
DOCKERIMAGENAME_DB=vmware/harbor-db
@ -84,13 +84,13 @@ DOCKERFILEPATH_REDIS=$(DOCKERFILEPATH)/redis
DOCKERFILENAME_REDIS=Dockerfile
DOCKERIMAGENAME_REDIS=vmware/redis-photon
_build_db: _build_mariadb
_build_db:
@echo "modify the db dockerfile..."
@$(SEDCMD) -i 's/__version__/$(MARIADBVERSION)/g' $(DOCKERFILEPATH_DB)/$(DOCKERFILENAME_DB)
@$(SEDCMD) -i 's/__postgresql_version__/$(VERSIONTAG)/g' $(DOCKERFILEPATH_DB)/$(DOCKERFILENAME_DB)
@echo "building db container for photon..."
@cd $(DOCKERFILEPATH_DB) && $(DOCKERBUILD) -f $(DOCKERFILEPATH_DB)/$(DOCKERFILENAME_DB) -t $(DOCKERIMAGENAME_DB):$(VERSIONTAG) .
@echo "Done."
@$(SEDCMD) -i 's/$(MARIADBVERSION)/__version__/g' $(DOCKERFILEPATH_DB)/$(DOCKERFILENAME_DB)
@$(SEDCMD) -i 's/$(VERSIONTAG)/__postgresql_version__/g' $(DOCKERFILEPATH_DB)/$(DOCKERFILENAME_DB)
_build_adminiserver:
@echo "building adminserver container for photon..."
@ -142,7 +142,7 @@ _build_notary:
rm -rf $(DOCKERFILEPATH_NOTARY)/binary && mkdir -p $(DOCKERFILEPATH_NOTARY)/binary && \
$(call _get_binary, https://storage.googleapis.com/harbor-builds/bin/notary/notary-signer, $(DOCKERFILEPATH_NOTARY)/binary/notary-signer) && \
$(call _get_binary, https://storage.googleapis.com/harbor-builds/bin/notary/notary-server, $(DOCKERFILEPATH_NOTARY)/binary/notary-server) && \
$(call _get_binary, https://storage.googleapis.com/harbor-builds/bin/notary/notary-migrate.tgz, $(DOCKERFILEPATH_NOTARY)/binary/notary-migrate.tgz); \
$(call _get_binary, https://storage.googleapis.com/harbor-builds/bin/notary/notary-migrate-postgresql.tgz, $(DOCKERFILEPATH_NOTARY)/binary/notary-migrate.tgz); \
cd $(DOCKERFILEPATH_NOTARY)/binary && tar -zvxf notary-migrate.tgz; \
else \
cd $(DOCKERFILEPATH_NOTARY) && $(DOCKERFILEPATH_NOTARY)/builder_public $(NOTARYVERSION); \

View File

@ -0,0 +1,6 @@
FROM vmware/postgresql-photon:__postgresql_version__
COPY registry.sql /docker-entrypoint-initdb.d/
COPY initial-notaryserver.sql /docker-entrypoint-initdb.d/
COPY initial-notarysigner.sql /docker-entrypoint-initdb.d/

View File

@ -0,0 +1,21 @@
#!/bin/bash
set -eo pipefail
host="$(hostname -i || echo '127.0.0.1')"
user="${POSTGRES_USER:-postgres}"
db="${POSTGRES_DB:-$POSTGRES_USER}"
export PGPASSWORD="${POSTGRES_PASSWORD:-}"
args=(
# force postgres to not use the local unix socket (test "external" connectibility)
--host "$host"
--username "$user"
--dbname "$db"
--quiet --no-align --tuples-only
)
if select="$(echo 'SELECT 1' | psql "${args[@]}")" && [ "$select" = '1' ]; then
exit 0
fi
exit 1

View File

@ -0,0 +1,4 @@
CREATE DATABASE notaryserver;
CREATE USER server;
alter user server with encrypted password 'password';
GRANT ALL PRIVILEGES ON DATABASE notaryserver TO server;

View File

@ -0,0 +1,4 @@
CREATE DATABASE notarysigner;
CREATE USER signer;
alter user signer with encrypted password 'password';
GRANT ALL PRIVILEGES ON DATABASE notarysigner TO signer;

View File

@ -0,0 +1,348 @@
CREATE DATABASE registry ENCODING 'UTF8';
\c registry;
create table access (
access_id SERIAL PRIMARY KEY NOT NULL,
access_code char(1),
comment varchar (30)
);
insert into access (access_code, comment) values
('M', 'Management access for project'),
('R', 'Read access for project'),
('W', 'Write access for project'),
('D', 'Delete access for project'),
('S', 'Search access for project');
create table role (
role_id SERIAL PRIMARY KEY NOT NULL,
role_mask int DEFAULT 0 NOT NULL,
role_code varchar(20),
name varchar (20)
);
/*
role mask is used for future enhancement when a project member can have multi-roles
currently set to 0
*/
insert into role (role_code, name) values
('MDRWS', 'projectAdmin'),
('RWS', 'developer'),
('RS', 'guest');
create table harbor_user (
user_id SERIAL PRIMARY KEY NOT NULL,
username varchar(255),
email varchar(255),
password varchar(40) NOT NULL,
realname varchar (255) NOT NULL,
comment varchar (30),
deleted boolean DEFAULT false NOT NULL,
reset_uuid varchar(40) DEFAULT NULL,
salt varchar(40) DEFAULT NULL,
sysadmin_flag boolean DEFAULT false NOT NULL,
creation_time timestamp(0),
update_time timestamp(0),
UNIQUE (username),
UNIQUE (email)
);
insert into harbor_user (username, email, password, realname, comment, deleted, sysadmin_flag, creation_time, update_time) values
('admin', 'admin@example.com', '', 'system admin', 'admin user',false, true, NOW(), NOW()),
('anonymous', 'anonymous@example.com', '', 'anonymous user', 'anonymous user', true, false, NOW(), NOW());
create table project (
project_id SERIAL PRIMARY KEY NOT NULL,
owner_id int NOT NULL,
/*
The max length of name controlled by API is 30,
and 11 is reserved for marking the deleted project.
*/
name varchar (255) NOT NULL,
creation_time timestamp,
update_time timestamp,
deleted boolean DEFAULT false NOT NULL,
FOREIGN KEY (owner_id) REFERENCES harbor_user(user_id),
UNIQUE (name)
);
insert into project (owner_id, name, creation_time, update_time) values
(1, 'library', NOW(), NOW());
create table project_member (
id SERIAL NOT NULL,
project_id int NOT NULL,
entity_id int NOT NULL,
/*
entity_type indicates the type of member,
u for user, g for user group
*/
entity_type char(1) NOT NULL,
role int NOT NULL,
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
PRIMARY KEY (id),
CONSTRAINT unique_project_entity_type UNIQUE (project_id, entity_id, entity_type)
);
CREATE FUNCTION update_update_time_at_column() RETURNS trigger
LANGUAGE plpgsql
AS $$
BEGIN
NEW.update_time = NOW();
RETURN NEW;
END;
$$;
CREATE TRIGGER project_member_update_time_at_modtime BEFORE UPDATE ON project_member FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
insert into project_member (project_id, entity_id, role, entity_type) values
(1, 1, 1, 'u');
create table project_metadata (
id SERIAL NOT NULL,
project_id int NOT NULL,
name varchar(255) NOT NULL,
value varchar(255),
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
deleted boolean DEFAULT false NOT NULL,
PRIMARY KEY (id),
CONSTRAINT unique_project_id_and_name UNIQUE (project_id,name),
FOREIGN KEY (project_id) REFERENCES project(project_id)
);
CREATE TRIGGER project_metadata_update_time_at_modtime BEFORE UPDATE ON project_metadata FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
insert into project_metadata (project_id, name, value, creation_time, update_time, deleted) values
(1, 'public', 'true', NOW(), NOW(), false);
create table user_group (
id SERIAL NOT NULL,
group_name varchar(255) NOT NULL,
group_type smallint default 0,
ldap_group_dn varchar(512) NOT NULL,
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
PRIMARY KEY (id)
);
CREATE TRIGGER user_group_update_time_at_modtime BEFORE UPDATE ON user_group FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table access_log (
log_id SERIAL NOT NULL,
username varchar (255) NOT NULL,
project_id int NOT NULL,
repo_name varchar (256),
repo_tag varchar (128),
GUID varchar(64),
operation varchar(20) NOT NULL,
op_time timestamp,
primary key (log_id)
);
CREATE INDEX pid_optime ON access_log (project_id, op_time);
create table repository (
repository_id SERIAL NOT NULL,
name varchar(255) NOT NULL,
project_id int NOT NULL,
description text,
pull_count int DEFAULT 0 NOT NULL,
star_count int DEFAULT 0 NOT NULL,
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
primary key (repository_id),
UNIQUE (name)
);
CREATE TRIGGER repository_update_time_at_modtime BEFORE UPDATE ON repository FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table replication_policy (
id SERIAL NOT NULL,
name varchar(256),
project_id int NOT NULL,
target_id int NOT NULL,
enabled boolean NOT NULL DEFAULT true,
description text,
deleted boolean DEFAULT false NOT NULL,
cron_str varchar(256),
filters varchar(1024),
replicate_deletion boolean DEFAULT false NOT NULL,
start_time timestamp NULL,
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
PRIMARY KEY (id)
);
CREATE TRIGGER replication_policy_update_time_at_modtime BEFORE UPDATE ON replication_policy FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table replication_target (
id SERIAL NOT NULL,
name varchar(64),
url varchar(64),
username varchar(255),
password varchar(128),
/*
target_type indicates the type of target registry,
0 means it's a harbor instance,
1 means it's a regulart registry
*/
target_type SMALLINT NOT NULL DEFAULT 0,
insecure boolean NOT NULL DEFAULT false,
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
PRIMARY KEY (id)
);
CREATE TRIGGER replication_target_update_time_at_modtime BEFORE UPDATE ON replication_target FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table replication_job (
id SERIAL NOT NULL,
status varchar(64) NOT NULL,
policy_id int NOT NULL,
repository varchar(256) NOT NULL,
operation varchar(64) NOT NULL,
tags varchar(16384),
/*
New job service only records uuid, for compatibility in this table both IDs are stored.
*/
job_uuid varchar(64),
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
PRIMARY KEY (id)
);
CREATE INDEX policy ON replication_job (policy_id);
CREATE INDEX poid_uptime ON replication_job (policy_id, update_time);
CREATE INDEX poid_status ON replication_job (policy_id, status);
CREATE TRIGGER replication_job_update_time_at_modtime BEFORE UPDATE ON replication_job FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table replication_immediate_trigger (
id SERIAL NOT NULL,
policy_id int NOT NULL,
namespace varchar(256) NOT NULL,
on_push boolean NOT NULL DEFAULT false,
on_deletion boolean NOT NULL DEFAULT false,
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
PRIMARY KEY (id)
);
CREATE TRIGGER replication_immediate_trigger_update_time_at_modtime BEFORE UPDATE ON replication_immediate_trigger FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table img_scan_job (
id SERIAL NOT NULL,
status varchar(64) NOT NULL,
repository varchar(256) NOT NULL,
tag varchar(128) NOT NULL,
digest varchar(128),
/*
New job service only records uuid, for compatibility in this table both IDs are stored.
*/
job_uuid varchar(64),
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
PRIMARY KEY (id)
);
CREATE INDEX idx_status ON img_scan_job (status);
CREATE INDEX idx_digest ON img_scan_job (digest);
CREATE INDEX idx_uuid ON img_scan_job (job_uuid);
CREATE INDEX idx_repository_tag ON img_scan_job (repository,tag);
CREATE TRIGGER img_scan_job_update_time_at_modtime BEFORE UPDATE ON img_scan_job FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table img_scan_overview (
id SERIAL NOT NULL,
image_digest varchar(128) NOT NULL,
scan_job_id int NOT NULL,
/* 0 indicates none, the higher the number, the more severe the status */
severity int NOT NULL default 0,
/* the json string to store components severity status, currently use a json to be more flexible and avoid creating additional tables. */
components_overview varchar(2048),
/* primary key for querying details, in clair it should be the name of the "top layer" */
details_key varchar(128),
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
PRIMARY KEY(id),
UNIQUE(image_digest)
);
CREATE TRIGGER img_scan_overview_update_time_at_modtime BEFORE UPDATE ON img_scan_overview FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table clair_vuln_timestamp (
id SERIAL NOT NULL,
namespace varchar(128) NOT NULL,
last_update timestamp NOT NULL,
PRIMARY KEY(id),
UNIQUE(namespace)
);
create table properties (
id SERIAL NOT NULL,
k varchar(64) NOT NULL,
v varchar(128) NOT NULL,
PRIMARY KEY(id),
UNIQUE (k)
);
create table harbor_label (
id SERIAL NOT NULL,
name varchar(128) NOT NULL,
description text,
color varchar(16),
/*
's' for system level labels
'u' for user level labels
*/
level char(1) NOT NULL,
/*
'g' for global labels
'p' for project labels
*/
scope char(1) NOT NULL,
project_id int,
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
PRIMARY KEY(id),
CONSTRAINT unique_label UNIQUE (name,scope, project_id)
);
CREATE TRIGGER harbor_label_update_time_at_modtime BEFORE UPDATE ON harbor_label FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
create table harbor_resource_label (
id SERIAL NOT NULL,
label_id int NOT NULL,
/*
the resource_id is the ID of project when the resource_type is p
the resource_id is the ID of repository when the resource_type is r
*/
resource_id int,
/*
the resource_name is the name of image when the resource_type is i
*/
resource_name varchar(256),
/*
'p' for project
'r' for repository
'i' for image
*/
resource_type char(1) NOT NULL,
creation_time timestamp default 'now'::timestamp,
update_time timestamp default 'now'::timestamp,
PRIMARY KEY(id),
CONSTRAINT unique_label_resource UNIQUE (label_id,resource_id, resource_name, resource_type)
);
CREATE TRIGGER harbor_resource_label_update_time_at_modtime BEFORE UPDATE ON harbor_resource_label FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
CREATE TABLE IF NOT EXISTS alembic_version (
version_num varchar(32) NOT NULL
);
insert into alembic_version values ('1.5.0');

View File

@ -29,7 +29,7 @@ insert into role (role_code, name) values
('RS', 'guest');
create table user (
create table harbor_user (
user_id INTEGER PRIMARY KEY,
/*
The max length of username controlled by API is 20,
@ -56,7 +56,7 @@ create table user (
UNIQUE (email)
);
insert into user (username, email, password, realname, comment, deleted, sysadmin_flag, creation_time, update_time) values
insert into harbor_user (username, email, password, realname, comment, deleted, sysadmin_flag, creation_time, update_time) values
('admin', 'admin@example.com', '', 'system admin', 'admin user',0, 1, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP),
('anonymous', 'anonymous@example.com', '', 'anonymous user', 'anonymous user', 1, 0, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP);
@ -80,7 +80,7 @@ create table project (
creation_time timestamp,
update_time timestamp,
deleted tinyint (1) DEFAULT 0 NOT NULL,
FOREIGN KEY (owner_id) REFERENCES user(user_id),
FOREIGN KEY (owner_id) REFERENCES harbor_user(user_id),
UNIQUE (name)
);

View File

@ -1,2 +1,2 @@
#!/bin/sh
sudo -E -u \#10000 sh -c "/usr/bin/env /migrations/migrate.sh && /bin/notary-server -config=/etc/notary/server-config.json -logf=logfmt"
sudo -E -u \#10000 sh -c "/usr/bin/env /migrations/migrate.sh && /bin/notary-server -config=/etc/notary/server-config.postgres.json -logf=logfmt"

View File

@ -1,6 +1,6 @@
FROM vmware/photon:1.0
RUN tdnf distro-sync -y \
RUN tdnf distro-sync -y || echo \
&& tdnf erase vim -y \
&& tdnf install -y shadow sudo \
&& tdnf clean all \

View File

@ -1,2 +1,2 @@
#!/bin/sh
sudo -E -u \#10000 sh -c "/usr/bin/env && /migrations/migrate.sh && /bin/notary-signer -config=/etc/notary/signer-config.json -logf=logfmt"
sudo -E -u \#10000 sh -c "/usr/bin/env && /migrations/migrate.sh && /bin/notary-signer -config=/etc/notary/signer-config.postgres.json -logf=logfmt"

View File

@ -1,6 +1,6 @@
FROM vmware/photon:1.0
RUN tdnf distro-sync -y \
RUN tdnf distro-sync -y || echo \
&& tdnf erase vim -y \
&& tdnf install -y shadow sudo \
&& tdnf clean all \

View File

@ -168,7 +168,7 @@ loglevel notice
# Specify the log file name. Also the empty string can be used to force
# Redis to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
logfile "/var/log/redis/redis.log"
logfile ""
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
# and optionally update the other syslog parameters to suit your needs.

View File

@ -516,9 +516,9 @@ if args.notary_mode:
notary_config_dir = prep_conf_dir(config_dir, "notary")
notary_temp_dir = os.path.join(templates_dir, "notary")
print("Copying sql file for notary DB")
if os.path.exists(os.path.join(notary_config_dir, "mysql-initdb.d")):
shutil.rmtree(os.path.join(notary_config_dir, "mysql-initdb.d"))
shutil.copytree(os.path.join(notary_temp_dir, "mysql-initdb.d"), os.path.join(notary_config_dir, "mysql-initdb.d"))
# if os.path.exists(os.path.join(notary_config_dir, "postgresql-initdb.d")):
# shutil.rmtree(os.path.join(notary_config_dir, "postgresql-initdb.d"))
# shutil.copytree(os.path.join(notary_temp_dir, "postgresql-initdb.d"), os.path.join(notary_config_dir, "postgresql-initdb.d"))
if customize_crt == 'on' and openssl_installed():
try:
temp_cert_dir = os.path.join(base_dir, "cert_tmp")
@ -553,11 +553,10 @@ if args.notary_mode:
mark_file(os.path.join(notary_config_dir, "notary-signer-ca.crt"))
mark_file(os.path.join(notary_config_dir, "root.crt"))
print("Copying notary signer configuration file")
shutil.copy2(os.path.join(notary_temp_dir, "signer-config.json"), notary_config_dir)
render(os.path.join(notary_temp_dir, "server-config.json"),
os.path.join(notary_config_dir, "server-config.json"),
shutil.copy2(os.path.join(notary_temp_dir, "signer-config.postgres.json"), notary_config_dir)
render(os.path.join(notary_temp_dir, "server-config.postgres.json"),
os.path.join(notary_config_dir, "server-config.postgres.json"),
token_endpoint=public_url)
print("Copying nginx configuration file for notary")
shutil.copy2(os.path.join(templates_dir, "nginx", "notary.upstream.conf"), nginx_conf_d)
render(os.path.join(templates_dir, "nginx", "notary.server.conf"),
@ -567,6 +566,7 @@ if args.notary_mode:
default_alias = get_alias(secretkey_path)
render(os.path.join(notary_temp_dir, "signer_env"), os.path.join(notary_config_dir, "signer_env"), alias = default_alias)
shutil.copy2(os.path.join(notary_temp_dir, "server_env"), notary_config_dir)
if args.clair_mode:
clair_temp_dir = os.path.join(templates_dir, "clair")
@ -601,4 +601,3 @@ if args.ha_mode:
FNULL.close()
print("The configuration files are ready, please use docker-compose to start the service.")

View File

@ -35,10 +35,10 @@ var (
common.LDAPScope: true,
common.LDAPTimeout: true,
common.TokenExpiration: true,
common.MySQLPort: true,
common.MaxJobWorkers: true,
common.CfgExpiration: true,
common.ClairDBPort: true,
common.PostGreSQLPort: true,
}
boolKeys = map[string]bool{
common.WithClair: true,

View File

@ -2,9 +2,10 @@ package database
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/vmware/harbor/src/common/models"
"github.com/vmware/harbor/src/common"
"github.com/vmware/harbor/src/common/models"
)
func TestCfgStore_Name(t *testing.T) {
@ -26,7 +27,7 @@ func TestWrapperConfig(t *testing.T) {
Value: "true",
},
{
Key:common.MySQLHost,
Key: common.PostGreSQLHOST,
Value: "192.168.1.210",
},
}
@ -37,9 +38,9 @@ func TestWrapperConfig(t *testing.T) {
withNotary, _ := result[common.WithNotary].(bool)
assert.Equal(t, true, withNotary)
mysqlhost, ok := result[common.MySQLHost].(string)
postgresqlhost, ok := result[common.PostGreSQLHOST].(string)
assert.True(t, ok)
assert.Equal(t, "192.168.1.210", mysqlhost)
assert.Equal(t, "192.168.1.210", postgresqlhost)
expiration, ok := result[common.CfgExpiration].(float64)
@ -49,7 +50,7 @@ func TestWrapperConfig(t *testing.T) {
func TestTranslateConfig(t *testing.T) {
config := map[string]interface{}{}
config[common.MySQLHost]="192.168.1.210"
config[common.PostGreSQLHOST] = "192.168.1.210"
entries, err := TranslateConfig(config)
if err != nil {

View File

@ -47,7 +47,7 @@ var (
attrs = []string{
common.EmailPassword,
common.LDAPSearchPwd,
common.MySQLPassword,
common.PostGreSQLPassword,
common.AdminInitialPassword,
common.ClairDBPassword,
common.UAAClientSecret,
@ -62,15 +62,15 @@ var (
parse: parseStringToBool,
},
common.DatabaseType: "DATABASE_TYPE",
common.MySQLHost: "MYSQL_HOST",
common.MySQLPort: &parser{
env: "MYSQL_PORT",
common.PostGreSQLHOST: "POSTGRESQL_HOST",
common.PostGreSQLPort: &parser{
env: "POSTGRESQL_PORT",
parse: parseStringToInt,
},
common.MySQLUsername: "MYSQL_USR",
common.MySQLPassword: "MYSQL_PWD",
common.MySQLDatabase: "MYSQL_DATABASE",
common.SQLiteFile: "SQLITE_FILE",
common.PostGreSQLUsername: "POSTGRESQL_USERNAME",
common.PostGreSQLPassword: "POSTGRESQL_PASSWORD",
common.PostGreSQLDatabase: "POSTGRESQL_DATABASE",
common.PostGreSQLSSLMode: "POSTGRESQL_SSLMODE",
common.LDAPURL: "LDAP_URL",
common.LDAPSearchDN: "LDAP_SEARCH_DN",
common.LDAPSearchPwd: "LDAP_SEARCH_PWD",
@ -141,7 +141,10 @@ var (
common.ClairDB: "CLAIR_DB",
common.ClairDBUsername: "CLAIR_DB_USERNAME",
common.ClairDBHost: "CLAIR_DB_HOST",
common.ClairDBPort: "CLAIR_DB_PORT",
common.ClairDBPort: &parser{
env: "CLAIR_DB_PORT",
parse: parseStringToInt,
},
common.UAAEndpoint: "UAA_ENDPOINT",
common.UAAClientID: "UAA_CLIENTID",
common.UAAClientSecret: "UAA_CLIENTSECRET",
@ -165,14 +168,15 @@ var (
// every time the system startup
repeatLoadEnvs = map[string]interface{}{
common.ExtEndpoint: "EXT_ENDPOINT",
common.MySQLPassword: "MYSQL_PWD",
common.MySQLHost: "MYSQL_HOST",
common.MySQLUsername: "MYSQL_USR",
common.MySQLDatabase: "MYSQL_DATABASE",
common.MySQLPort: &parser{
env: "MYSQL_PORT",
common.PostGreSQLHOST: "POSTGRESQL_HOST",
common.PostGreSQLPort: &parser{
env: "POSTGRESQL_PORT",
parse: parseStringToInt,
},
common.PostGreSQLUsername: "POSTGRESQL_USERNAME",
common.PostGreSQLPassword: "POSTGRESQL_PASSWORD",
common.PostGreSQLDatabase: "POSTGRESQL_DATABASE",
common.PostGreSQLSSLMode: "POSTGRESQL_SSLMODE",
common.MaxJobWorkers: &parser{
env: "MAX_JOB_WORKERS",
parse: parseStringToInt,
@ -383,16 +387,13 @@ func LoadFromEnv(cfgs map[string]interface{}, all bool) error {
func GetDatabaseFromCfg(cfg map[string]interface{}) *models.Database {
database := &models.Database{}
database.Type = cfg[common.DatabaseType].(string)
mysql := &models.MySQL{}
mysql.Host = cfg[common.MySQLHost].(string)
mysql.Port = int(cfg[common.MySQLPort].(int))
mysql.Username = cfg[common.MySQLUsername].(string)
mysql.Password = cfg[common.MySQLPassword].(string)
mysql.Database = cfg[common.MySQLDatabase].(string)
database.MySQL = mysql
sqlite := &models.SQLite{}
sqlite.File = cfg[common.SQLiteFile].(string)
database.SQLite = sqlite
postgresql := &models.PostGreSQL{}
postgresql.Host = cfg[common.PostGreSQLHOST].(string)
postgresql.Port = int(cfg[common.PostGreSQLPort].(int))
postgresql.Username = cfg[common.PostGreSQLUsername].(string)
postgresql.Password = cfg[common.PostGreSQLPassword].(string)
postgresql.Database = cfg[common.PostGreSQLDatabase].(string)
database.PostGreSQL = postgresql
return database
}

View File

@ -128,18 +128,17 @@ func TestLoadFromEnv(t *testing.T) {
func TestGetDatabaseFromCfg(t *testing.T) {
cfg := map[string]interface{}{
common.DatabaseType: "mysql",
common.MySQLDatabase: "registry",
common.MySQLHost: "127.0.0.1",
common.MySQLPort: 3306,
common.MySQLPassword: "1234",
common.MySQLUsername: "root",
common.SQLiteFile: "/tmp/sqlite.db",
common.DatabaseType: "postgresql",
common.PostGreSQLDatabase: "registry",
common.PostGreSQLHOST: "127.0.0.1",
common.PostGreSQLPort: 5432,
common.PostGreSQLPassword: "root123",
common.PostGreSQLUsername: "postgres",
}
database := GetDatabaseFromCfg(cfg)
assert.Equal(t, "mysql", database.Type)
assert.Equal(t, "postgresql", database.Type)
}
func TestValidLdapScope(t *testing.T) {

View File

@ -41,12 +41,12 @@ const (
ExtEndpoint = "ext_endpoint"
AUTHMode = "auth_mode"
DatabaseType = "database_type"
MySQLHost = "mysql_host"
MySQLPort = "mysql_port"
MySQLUsername = "mysql_username"
MySQLPassword = "mysql_password"
MySQLDatabase = "mysql_database"
SQLiteFile = "sqlite_file"
PostGreSQLHOST = "postgresql_host"
PostGreSQLPort = "postgresql_port"
PostGreSQLUsername = "postgresql_username"
PostGreSQLPassword = "postgresql_password"
PostGreSQLDatabase = "postgresql_database"
PostGreSQLSSLMode = "postgresql_sslmode"
SelfRegistration = "self_registration"
UIURL = "ui_url"
JobServiceURL = "jobservice_url"

View File

@ -47,7 +47,7 @@ func InitClairDB(clairDB *models.PostGreSQL) error {
//Except for password other information will not be configurable, so keep it hard coded for 1.2.0.
p := &pgsql{
host: clairDB.Host,
port: clairDB.Port,
port: strconv.Itoa(clairDB.Port),
usr: clairDB.Username,
pwd: clairDB.Password,
database: clairDB.Database,
@ -87,14 +87,13 @@ func InitDatabase(database *models.Database) error {
func getDatabase(database *models.Database) (db Database, err error) {
switch database.Type {
case "", "mysql":
db = NewMySQL(database.MySQL.Host,
strconv.Itoa(database.MySQL.Port),
database.MySQL.Username,
database.MySQL.Password,
database.MySQL.Database)
case "sqlite":
db = NewSQLite(database.SQLite.File)
case "", "postgresql":
db = NewPQSQL(database.PostGreSQL.Host,
strconv.Itoa(database.PostGreSQL.Port),
database.PostGreSQL.Username,
database.PostGreSQL.Password,
database.PostGreSQL.Database,
false)
default:
err = fmt.Errorf("invalid database: %s", database.Type)
}

View File

@ -51,7 +51,7 @@ func cleanByUser(username string) {
from project_member
where entity_id = (
select user_id
from user
from harbor_user
where username = ?
) `, username)
if err != nil {
@ -98,7 +98,7 @@ func cleanByUser(username string) {
log.Error(err)
}
err = execUpdate(o, `delete from user where username = ?`, username)
err = execUpdate(o, `delete from harbor_user where username = ?`, username)
if err != nil {
o.Rollback()
log.Error(err)
@ -134,16 +134,13 @@ const publicityOn = 1
const publicityOff = 0
func TestMain(m *testing.M) {
databases := []string{"mysql", "sqlite"}
databases := []string{"postgresql"}
for _, database := range databases {
log.Infof("run test cases for database: %s", database)
result := 1
switch database {
case "mysql":
PrepareTestForMySQL()
case "sqlite":
PrepareTestForSQLite()
case "postgresql":
PrepareTestForPostgresSQL()
default:
log.Fatalf("invalid database: %s", database)
}
@ -167,7 +164,7 @@ func clearAll() {
tables := []string{"project_member",
"project_metadata", "access_log", "repository", "replication_policy",
"replication_target", "replication_job", "replication_immediate_trigger", "img_scan_job",
"img_scan_overview", "clair_vuln_timestamp", "project", "user"}
"img_scan_overview", "clair_vuln_timestamp", "project", "harbor_user"}
for _, t := range tables {
if err := ClearTable(t); err != nil {
log.Errorf("Failed to clear table: %s,error: %v", t, err)
@ -693,7 +690,7 @@ func TestGetRoleByID(t *testing.T) {
}
func TestToggleAdminRole(t *testing.T) {
err := ToggleUserAdminRole(currentUser.UserID, 1)
err := ToggleUserAdminRole(currentUser.UserID, true)
if err != nil {
t.Errorf("Error in toggle ToggleUserAdmin role: %v, user: %+v", err, currentUser)
}
@ -704,7 +701,7 @@ func TestToggleAdminRole(t *testing.T) {
if !isAdmin {
t.Errorf("User is not admin after toggled, user id: %d", currentUser.UserID)
}
err = ToggleUserAdminRole(currentUser.UserID, 0)
err = ToggleUserAdminRole(currentUser.UserID, false)
if err != nil {
t.Errorf("Error in toggle ToggleUserAdmin role: %v, user: %+v", err, currentUser)
}
@ -1195,7 +1192,7 @@ func TestDeleteRepPolicy(t *testing.T) {
if err != nil && err != orm.ErrNoRows {
t.Errorf("Error occurred in GetRepPolicy:%v", err)
}
if p != nil && p.Deleted != 1 {
if p != nil && !p.Deleted {
t.Errorf("Able to find rep policy after deletion, id: %d", policyID)
}
}

View File

@ -15,6 +15,8 @@
package group
import (
"time"
"github.com/vmware/harbor/src/common/dao"
"github.com/vmware/harbor/src/common/models"
"github.com/vmware/harbor/src/common/utils/log"
@ -23,11 +25,17 @@ import (
// AddUserGroup - Add User Group
func AddUserGroup(userGroup models.UserGroup) (int, error) {
o := dao.GetOrmer()
id, err := o.Insert(&userGroup)
sql := "insert into user_group (group_name, group_type, ldap_group_dn, creation_time, update_time) values (?, ?, ?, ?, ?) RETURNING id"
var id int
now := time.Now()
err := o.Raw(sql, userGroup.GroupName, userGroup.GroupType, userGroup.LdapGroupDN, now, now).QueryRow(&id)
if err != nil {
return 0, err
}
return int(id), err
return id, nil
}
// QueryUserGroup - Query User Group

View File

@ -30,33 +30,31 @@ var createdUserGroupID int
func TestMain(m *testing.M) {
//databases := []string{"mysql", "sqlite"}
databases := []string{"mysql"}
databases := []string{"postgresql"}
for _, database := range databases {
log.Infof("run test cases for database: %s", database)
result := 1
switch database {
case "mysql":
dao.PrepareTestForMySQL()
case "sqlite":
dao.PrepareTestForSQLite()
case "postgresql":
dao.PrepareTestForPostgresSQL()
default:
log.Fatalf("invalid database: %s", database)
}
//Extract to test utils
initSqls := []string{
"insert into user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')",
"insert into harbor_user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')",
"insert into project (name, owner_id) values ('member_test_01', 1)",
"insert into user_group (group_name, group_type, ldap_group_dn) values ('test_group_01', 1, 'CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com')",
"update project set owner_id = (select user_id from user where username = 'member_test_01') where name = 'member_test_01'",
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from user where username = 'member_test_01'), 'u', 1)",
"update project set owner_id = (select user_id from harbor_user where username = 'member_test_01') where name = 'member_test_01'",
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from harbor_user where username = 'member_test_01'), 'u', 1)",
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_group_01'), 'g', 1)",
}
clearSqls := []string{
"delete from project where name='member_test_01'",
"delete from user where username='member_test_01' or username='pm_sample'",
"delete from harbor_user where username='member_test_01' or username='pm_sample'",
"delete from user_group",
"delete from project_member",
}

View File

@ -24,7 +24,7 @@ import (
type pgsql struct {
host string
port int
port string
usr string
pwd string
database string
@ -47,13 +47,25 @@ func (p *pgsql) Name() string {
// String ...
func (p *pgsql) String() string {
return fmt.Sprintf("type-%s host-%s port-%d databse-%s sslmode-%q",
return fmt.Sprintf("type-%s host-%s port-%s databse-%s sslmode-%q",
p.Name(), p.host, p.port, p.database, pgsqlSSLMode(p.sslmode))
}
// NewPQSQL returns an instance of postgres
func NewPQSQL(host string, port string, usr string, pwd string, database string, sslmode bool) Database {
return &pgsql{
host: host,
port: port,
usr: usr,
pwd: pwd,
database: database,
sslmode: sslmode,
}
}
//Register registers pgSQL to orm with the info wrapped by the instance.
func (p *pgsql) Register(alias ...string) error {
if err := utils.TestTCPConn(fmt.Sprintf("%s:%d", p.host, p.port), 60, 2); err != nil {
if err := utils.TestTCPConn(fmt.Sprintf("%s:%s", p.host, p.port), 60, 2); err != nil {
return err
}
@ -65,7 +77,7 @@ func (p *pgsql) Register(alias ...string) error {
if len(alias) != 0 {
an = alias[0]
}
info := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s",
info := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=%s",
p.host, p.port, p.usr, p.pwd, p.database, pgsqlSSLMode(p.sslmode))
return orm.RegisterDataBase(an, "postgres", info)

View File

@ -27,7 +27,7 @@ func AddProjectMetadata(meta *models.ProjectMetadata) error {
now := time.Now()
sql := `insert into project_metadata
(project_id, name, value, creation_time, update_time, deleted)
values (?, ?, ?, ?, ?, 0)`
values (?, ?, ?, ?, ?, false)`
_, err := GetOrmer().Raw(sql, meta.ProjectID, meta.Name, meta.Value,
now, now).Exec()
return err
@ -39,7 +39,7 @@ func AddProjectMetadata(meta *models.ProjectMetadata) error {
func DeleteProjectMetadata(projectID int64, name ...string) error {
params := make([]interface{}, 1)
sql := `update project_metadata
set deleted = 1
set deleted = true
where project_id = ?`
params = append(params, projectID)
@ -56,7 +56,7 @@ func DeleteProjectMetadata(projectID int64, name ...string) error {
func UpdateProjectMetadata(meta *models.ProjectMetadata) error {
sql := `update project_metadata
set value = ?, update_time = ?
where project_id = ? and name = ? and deleted = 0`
where project_id = ? and name = ? and deleted = false`
_, err := GetOrmer().Raw(sql, meta.Value, time.Now(), meta.ProjectID,
meta.Name).Exec()
return err
@ -70,7 +70,7 @@ func GetProjectMetadata(projectID int64, name ...string) ([]*models.ProjectMetad
params := make([]interface{}, 1)
sql := `select * from project_metadata
where project_id = ? and deleted = 0`
where project_id = ? and deleted = false`
params = append(params, projectID)
if len(name) > 0 {
@ -93,7 +93,7 @@ func paramPlaceholder(n int) string {
// ListProjectMetadata ...
func ListProjectMetadata(name, value string) ([]*models.ProjectMetadata, error) {
sql := `select * from project_metadata
where name = ? and value = ? and deleted = 0`
where name = ? and value = ? and deleted = false`
metadatas := []*models.ProjectMetadata{}
_, err := GetOrmer().Raw(sql, name, value).QueryRows(&metadatas)
return metadatas, err

View File

@ -25,20 +25,13 @@ import (
// AddProject adds a project to the database along with project roles information and access log records.
func AddProject(project models.Project) (int64, error) {
o := GetOrmer()
p, err := o.Raw("insert into project (owner_id, name, creation_time, update_time, deleted) values (?, ?, ?, ?, ?)").Prepare()
if err != nil {
return 0, err
}
sql := "insert into project (owner_id, name, creation_time, update_time, deleted) values (?, ?, ?, ?, ?) RETURNING project_id"
var projectID int64
now := time.Now()
r, err := p.Exec(project.OwnerID, project.Name, now, now, project.Deleted)
if err != nil {
return 0, err
}
projectID, err := r.LastInsertId()
err := o.Raw(sql, project.OwnerID, project.Name, now, now, project.Deleted).QueryRow(&projectID)
if err != nil {
return 0, err
}
@ -53,10 +46,10 @@ func AddProject(project models.Project) (int64, error) {
return 0, err
}
if pmID == 0 {
return projectID, fmt.Errorf("Failed to add project member, pmid=0")
}
return projectID, err
}
return projectID, nil
}
func addProjectMember(member models.Member) (int, error) {
@ -72,16 +65,13 @@ func addProjectMember(member models.Member) (int, error) {
return 0, fmt.Errorf("Invalid project_id, member: %+v", member)
}
sql := "insert into project_member (project_id, entity_id , role, entity_type) values (?, ?, ?, ?)"
r, err := o.Raw(sql, member.ProjectID, member.EntityID, member.Role, member.EntityType).Exec()
var pmID int
sql := "insert into project_member (project_id, entity_id , role, entity_type) values (?, ?, ?, ?) RETURNING id"
err := o.Raw(sql, member.ProjectID, member.EntityID, member.Role, member.EntityType).QueryRow(&pmID)
if err != nil {
return 0, err
}
pmid, err := r.LastInsertId()
if err != nil {
return 0, err
}
return int(pmid), err
return pmID, err
}
// GetProjectByID ...
@ -89,7 +79,7 @@ func GetProjectByID(id int64) (*models.Project, error) {
o := GetOrmer()
sql := `select p.project_id, p.name, u.username as owner_name, p.owner_id, p.creation_time, p.update_time
from project p left join user u on p.owner_id = u.user_id where p.deleted = 0 and p.project_id = ?`
from project p left join harbor_user u on p.owner_id = u.user_id where p.deleted = false and p.project_id = ?`
queryParam := make([]interface{}, 1)
queryParam = append(queryParam, id)
@ -111,7 +101,7 @@ func GetProjectByID(id int64) (*models.Project, error) {
func GetProjectByName(name string) (*models.Project, error) {
o := GetOrmer()
var p []models.Project
n, err := o.Raw(`select * from project where name = ? and deleted = 0`, name).QueryRows(&p)
n, err := o.Raw(`select * from project where name = ? and deleted = false`, name).QueryRows(&p)
if err != nil {
return nil, err
}
@ -146,10 +136,12 @@ func GetTotalOfProjects(query *models.ProjectQueryParam) (int64, error) {
// GetProjects returns a project list according to the query conditions
func GetProjects(query *models.ProjectQueryParam) ([]*models.Project, error) {
sql, params := projectQueryConditions(query)
if query == nil {
sql += ` order by p.name`
}
sql = `select distinct p.project_id, p.name, p.owner_id,
p.creation_time, p.update_time ` + sql
var projects []*models.Project
_, err := GetOrmer().Raw(sql, params).QueryRows(&projects)
return projects, err
@ -161,7 +153,7 @@ func projectQueryConditions(query *models.ProjectQueryParam) (string, []interfac
sql := ` from project as p`
if query == nil {
sql += ` where p.deleted=0 order by p.name`
sql += ` where p.deleted=false`
return sql, params
}
@ -172,17 +164,17 @@ func projectQueryConditions(query *models.ProjectQueryParam) (string, []interfac
}
if len(query.Owner) != 0 {
sql += ` join user u1
sql += ` join harbor_user u1
on p.owner_id = u1.user_id`
}
if query.Member != nil && len(query.Member.Name) != 0 {
sql += ` join project_member pm
on p.project_id = pm.project_id
join user u2
join harbor_user u2
on pm.entity_id=u2.user_id`
}
sql += ` where p.deleted=0`
sql += ` where p.deleted=false`
if len(query.Owner) != 0 {
sql += ` and u1.username=?`
@ -220,10 +212,8 @@ func projectQueryConditions(query *models.ProjectQueryParam) (string, []interfac
params = append(params, query.ProjectIDs)
}
sql += ` order by p.name`
if query.Pagination != nil && query.Pagination.Size > 0 {
sql += ` limit ?`
sql += ` order by p.name limit ?`
params = append(params, query.Pagination.Size)
if query.Pagination.Page > 0 {
@ -245,7 +235,7 @@ func DeleteProject(id int64) error {
name := fmt.Sprintf("%s#%d", project.Name, project.ProjectID)
sql := `update project
set deleted = 1, name = ?
set deleted = true, name = ?
where project_id = ?`
_, err = GetOrmer().Raw(sql, name, id).Exec()
return err

View File

@ -35,9 +35,9 @@ func GetProjectMember(queryMember models.Member) ([]*models.Member, error) {
on pm.project_id = ? and ug.id = pm.entity_id join role r on pm.role = r.role_id where pm.entity_type = 'g')
union
(select pm.id as id, pm.project_id as project_id, u.user_id as entity_id, u.username as entity_name, u.creation_time, u.update_time, r.name as rolename,
r.role_id as role, pm.entity_type as entity_type from user u join project_member pm
r.role_id as role, pm.entity_type as entity_type from harbor_user u join project_member pm
on pm.project_id = ? and u.user_id = pm.entity_id
join role r on pm.role = r.role_id where u.deleted = 0 and pm.entity_type = 'u')) as a where a.project_id = ? `
join role r on pm.role = r.role_id where u.deleted = false and pm.entity_type = 'u')) as a where a.project_id = ? `
queryParam := make([]interface{}, 1)
// used ProjectID already
@ -89,16 +89,14 @@ func AddProjectMember(member models.Member) (int, error) {
if err != nil {
return 0, err
}
sql := "insert into project_member (project_id, entity_id , role, entity_type) values (?, ?, ?, ?)"
r, err := o.Raw(sql, member.ProjectID, member.EntityID, member.Role, member.EntityType).Exec()
var pmid int
sql := "insert into project_member (project_id, entity_id , role, entity_type) values (?, ?, ?, ?) RETURNING id"
err = o.Raw(sql, member.ProjectID, member.EntityID, member.Role, member.EntityType).QueryRow(&pmid)
if err != nil {
return 0, err
}
pmid, err := r.LastInsertId()
if err != nil {
return 0, err
}
return int(pmid), err
return pmid, err
}
// UpdateProjectMemberRole updates the record in table project_member, only role can be changed
@ -127,9 +125,9 @@ func SearchMemberByName(projectID int64, entityName string) ([]*models.Member, e
r.name as rolename,
pm.role, pm.entity_id, pm.entity_type
from project_member pm
left join user u on pm.entity_id = u.user_id and pm.entity_type = 'u'
left join harbor_user u on pm.entity_id = u.user_id and pm.entity_type = 'u'
left join role r on pm.role = r.role_id
where u.deleted = 0 and pm.project_id = ? and u.username like ? order by entity_name )
where u.deleted = false and pm.project_id = ? and u.username like ? order by entity_name )
union
(select pm.id, pm.project_id,
ug.group_name as entity_name,

View File

@ -31,33 +31,31 @@ import (
func TestMain(m *testing.M) {
//databases := []string{"mysql", "sqlite"}
databases := []string{"mysql"}
databases := []string{"postgresql"}
for _, database := range databases {
log.Infof("run test cases for database: %s", database)
result := 1
switch database {
case "mysql":
dao.PrepareTestForMySQL()
case "sqlite":
dao.PrepareTestForSQLite()
case "postgresql":
dao.PrepareTestForPostgresSQL()
default:
log.Fatalf("invalid database: %s", database)
}
//Extract to test utils
initSqls := []string{
"insert into user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')",
"insert into harbor_user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')",
"insert into project (name, owner_id) values ('member_test_01', 1)",
"insert into user_group (group_name, group_type, ldap_group_dn) values ('test_group_01', 1, 'CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com')",
"update project set owner_id = (select user_id from user where username = 'member_test_01') where name = 'member_test_01'",
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from user where username = 'member_test_01'), 'u', 1)",
"update project set owner_id = (select user_id from harbor_user where username = 'member_test_01') where name = 'member_test_01'",
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from harbor_user where username = 'member_test_01'), 'u', 1)",
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_group_01'), 'g', 1)",
}
clearSqls := []string{
"delete from project where name='member_test_01'",
"delete from user where username='member_test_01' or username='pm_sample'",
"delete from harbor_user where username='member_test_01' or username='pm_sample'",
"delete from user_group",
"delete from project_member",
}

View File

@ -49,8 +49,8 @@ func TestDeleteProject(t *testing.T) {
t.Fatalf("failed to get project: %v", err)
}
if p.Deleted != 1 {
t.Errorf("unexpeced deleted column: %d != %d", p.Deleted, 1)
if !p.Deleted {
t.Errorf("unexpeced deleted column: %t != %t", p.Deleted, true)
}
deletedName := fmt.Sprintf("%s#%d", name, id)
@ -96,16 +96,16 @@ func Test_projectQueryConditions(t *testing.T) {
[]interface{}{}},
{"Query with valid projectID",
args{query: &models.ProjectQueryParam{ProjectIDs: []int64{2, 3}, Owner: "admin"}},
` from project as p join user u1
on p.owner_id = u1.user_id where p.deleted=0 and u1.username=? and p.project_id in ( ?,? ) order by p.name`,
` from project as p join harbor_user u1
on p.owner_id = u1.user_id where p.deleted=false and u1.username=? and p.project_id in ( ?,? )`,
[]interface{}{2, 3}},
{"Query with valid page and member",
args{query: &models.ProjectQueryParam{ProjectIDs: []int64{2, 3}, Owner: "admin", Name: "sample", Member: &models.MemberQuery{Name: "name", Role: 1}, Pagination: &models.Pagination{Page: 1, Size: 20}}},
` from project as p join user u1
` from project as p join harbor_user u1
on p.owner_id = u1.user_id join project_member pm
on p.project_id = pm.project_id
join user u2
on pm.entity_id=u2.user_id where p.deleted=0 and u1.username=? and p.name like ? and u2.username=? and pm.role = ? and p.project_id in ( ?,? ) order by p.name limit ? offset ?`,
join harbor_user u2
on pm.entity_id=u2.user_id where p.deleted=false and u1.username=? and p.name like ? and u2.username=? and pm.role = ? and p.project_id in ( ?,? ) order by p.name limit ? offset ?`,
[]interface{}{1, []int64{2, 3}, 20, 0}},
}
for _, tt := range tests {

View File

@ -24,27 +24,21 @@ import (
// Register is used for user to register, the password is encrypted before the record is inserted into database.
func Register(user models.User) (int64, error) {
o := GetOrmer()
p, err := o.Raw("insert into user (username, password, realname, email, comment, salt, sysadmin_flag, creation_time, update_time) values (?, ?, ?, ?, ?, ?, ?, ?, ?)").Prepare()
if err != nil {
return 0, err
}
defer p.Close()
salt := utils.GenerateRandomString()
now := time.Now()
r, err := p.Exec(user.Username, utils.Encrypt(user.Password, salt), user.Realname, user.Email, user.Comment, salt, user.HasAdminRole, now, now)
salt := utils.GenerateRandomString()
sql := `insert into harbor_user
(username, password, realname, email, comment, salt, sysadmin_flag, creation_time, update_time)
values (?, ?, ?, ?, ?, ?, ?, ?, ?) RETURNING user_id`
var userID int64
err := o.Raw(sql, user.Username, utils.Encrypt(user.Password, salt), user.Realname, user.Email,
user.Comment, salt, user.HasAdminRole, now, now).QueryRow(&userID)
if err != nil {
return 0, err
}
userID, err := r.LastInsertId()
if err != nil {
return 0, err
}
return userID, nil
}
// UserExists returns whether a user exists according username or Email.
@ -56,7 +50,7 @@ func UserExists(user models.User, target string) (bool, error) {
o := GetOrmer()
sql := `select user_id from user where 1=1 `
sql := `select user_id from harbor_user where 1=1 `
queryParam := make([]interface{}, 1)
switch target {

View File

@ -27,7 +27,15 @@ import (
// AddRepTarget ...
func AddRepTarget(target models.RepTarget) (int64, error) {
o := GetOrmer()
return o.Insert(&target)
sql := "insert into replication_target (name, url, username, password, insecure, target_type) values (?, ?, ?, ?, ?, ?) RETURNING id"
var targetID int64
err := o.Raw(sql, target.Name, target.URL, target.Username, target.Password, target.Insecure, target.Type).QueryRow(&targetID)
if err != nil {
return 0, err
}
return targetID, nil
}
// GetRepTarget ...
@ -75,8 +83,13 @@ func DeleteRepTarget(id int64) error {
// UpdateRepTarget ...
func UpdateRepTarget(target models.RepTarget) error {
o := GetOrmer()
target.UpdateTime = time.Now()
_, err := o.Update(&target, "URL", "Name", "Username", "Password", "Insecure", "UpdateTime")
sql := `update replication_target
set url = ?, name = ?, username = ?, password = ?, insecure = ?, update_time = ?
where id = ?`
_, err := o.Raw(sql, target.URL, target.Name, target.Username, target.Password, target.Insecure, time.Now(), target.ID).Exec()
return err
}
@ -89,7 +102,7 @@ func FilterRepTargets(name string) ([]*models.RepTarget, error) {
sql := `select * from replication_target `
if len(name) != 0 {
sql += `where name like ? `
args = append(args, `%`+Escape(name)+`%`)
args = append(args, "%"+Escape(name)+"%")
}
sql += `order by creation_time`
@ -106,25 +119,27 @@ func FilterRepTargets(name string) ([]*models.RepTarget, error) {
func AddRepPolicy(policy models.RepPolicy) (int64, error) {
o := GetOrmer()
sql := `insert into replication_policy (name, project_id, target_id, enabled, description, cron_str, creation_time, update_time, filters, replicate_deletion)
values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`
values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) RETURNING id`
params := []interface{}{}
now := time.Now()
params = append(params, policy.Name, policy.ProjectID, policy.TargetID, 1,
params = append(params, policy.Name, policy.ProjectID, policy.TargetID, true,
policy.Description, policy.Trigger, now, now, policy.Filters,
policy.ReplicateDeletion)
result, err := o.Raw(sql, params...).Exec()
var policyID int64
err := o.Raw(sql, params...).QueryRow(&policyID)
if err != nil {
return 0, err
}
return result.LastInsertId()
return policyID, nil
}
// GetRepPolicy ...
func GetRepPolicy(id int64) (*models.RepPolicy, error) {
o := GetOrmer()
sql := `select * from replication_policy where id = ? and deleted = 0`
sql := `select * from replication_policy where id = ? and deleted = false`
var policy models.RepPolicy
@ -140,7 +155,7 @@ func GetRepPolicy(id int64) (*models.RepPolicy, error) {
// GetTotalOfRepPolicies returns the total count of replication policies
func GetTotalOfRepPolicies(name string, projectID int64) (int64, error) {
qs := GetOrmer().QueryTable(&models.RepPolicy{}).Filter("deleted", 0)
qs := GetOrmer().QueryTable(&models.RepPolicy{}).Filter("deleted", false)
if len(name) != 0 {
qs = qs.Filter("name__icontains", name)
@ -166,23 +181,23 @@ func FilterRepPolicies(name string, projectID, page, pageSize int64) ([]*models.
count(rj.status) as error_job_count
from replication_policy rp
left join replication_target rt on rp.target_id=rt.id
left join replication_job rj on rp.id=rj.policy_id and (rj.status="error"
or rj.status="retrying")
where rp.deleted = 0 `
left join replication_job rj on rp.id=rj.policy_id and (rj.status='error'
or rj.status='retrying')
where rp.deleted = false `
if len(name) != 0 && projectID != 0 {
sql += `and rp.name like ? and rp.project_id = ? `
args = append(args, `%`+Escape(name)+`%`)
args = append(args, "%"+Escape(name)+"%")
args = append(args, projectID)
} else if len(name) != 0 {
sql += `and rp.name like ? `
args = append(args, `%`+Escape(name)+`%`)
args = append(args, "%"+Escape(name)+"%")
} else if projectID != 0 {
sql += `and rp.project_id = ? `
args = append(args, projectID)
}
sql += `group by rp.id order by rp.creation_time`
sql += `group by rt.name, rp.id order by rp.creation_time`
if page > 0 && pageSize > 0 {
sql += ` limit ? offset ?`
@ -200,7 +215,7 @@ func FilterRepPolicies(name string, projectID, page, pageSize int64) ([]*models.
// GetRepPolicyByName ...
func GetRepPolicyByName(name string) (*models.RepPolicy, error) {
o := GetOrmer()
sql := `select * from replication_policy where deleted = 0 and name = ?`
sql := `select * from replication_policy where deleted = false and name = ?`
var policy models.RepPolicy
@ -217,7 +232,7 @@ func GetRepPolicyByName(name string) (*models.RepPolicy, error) {
// GetRepPolicyByProject ...
func GetRepPolicyByProject(projectID int64) ([]*models.RepPolicy, error) {
o := GetOrmer()
sql := `select * from replication_policy where deleted = 0 and project_id = ?`
sql := `select * from replication_policy where deleted = false and project_id = ?`
var policies []*models.RepPolicy
@ -231,7 +246,7 @@ func GetRepPolicyByProject(projectID int64) ([]*models.RepPolicy, error) {
// GetRepPolicyByTarget ...
func GetRepPolicyByTarget(targetID int64) ([]*models.RepPolicy, error) {
o := GetOrmer()
sql := `select * from replication_policy where deleted = 0 and target_id = ?`
sql := `select * from replication_policy where deleted = false and target_id = ?`
var policies []*models.RepPolicy
@ -245,7 +260,7 @@ func GetRepPolicyByTarget(targetID int64) ([]*models.RepPolicy, error) {
// GetRepPolicyByProjectAndTarget ...
func GetRepPolicyByProjectAndTarget(projectID, targetID int64) ([]*models.RepPolicy, error) {
o := GetOrmer()
sql := `select * from replication_policy where deleted = 0 and project_id = ? and target_id = ?`
sql := `select * from replication_policy where deleted = false and project_id = ? and target_id = ?`
var policies []*models.RepPolicy
@ -259,9 +274,13 @@ func GetRepPolicyByProjectAndTarget(projectID, targetID int64) ([]*models.RepPol
// UpdateRepPolicy ...
func UpdateRepPolicy(policy *models.RepPolicy) error {
o := GetOrmer()
policy.UpdateTime = time.Now()
_, err := o.Update(policy, "ProjectID", "TargetID", "Name", "Description",
"Trigger", "Filters", "ReplicateDeletion", "UpdateTime")
sql := `update replication_policy
set project_id = ?, target_id = ?, name = ?, description = ?, cron_str = ?, filters = ?, replicate_deletion = ?, update_time = ?
where id = ?`
_, err := o.Raw(sql, policy.ProjectID, policy.TargetID, policy.Name, policy.Description, policy.Trigger, policy.Filters, policy.ReplicateDeletion, time.Now(), policy.ID).Exec()
return err
}
@ -270,7 +289,7 @@ func DeleteRepPolicy(id int64) error {
o := GetOrmer()
policy := &models.RepPolicy{
ID: id,
Deleted: 1,
Deleted: true,
UpdateTime: time.Now(),
}
_, err := o.Update(policy, "Deleted")

View File

@ -70,7 +70,7 @@ func IsAdminRole(userIDOrUsername interface{}) (bool, error) {
return false, nil
}
return user.HasAdminRole == 1, nil
return user.HasAdminRole, nil
}
// GetRoleByID ...

View File

@ -27,32 +27,40 @@ var defaultRegistered = false
// PrepareTestForMySQL is for test only.
func PrepareTestForMySQL() {
dbHost := os.Getenv("MYSQL_HOST")
}
// PrepareTestForSQLite is for test only.
func PrepareTestForSQLite() {
}
// PrepareTestForPostgresSQL is for test only.
func PrepareTestForPostgresSQL() {
dbHost := os.Getenv("POSTGRESQL_HOST")
if len(dbHost) == 0 {
log.Fatalf("environment variable MYSQL_HOST is not set")
log.Fatalf("environment variable POSTGRESQL_HOST is not set")
}
dbUser := os.Getenv("MYSQL_USR")
dbUser := os.Getenv("POSTGRESQL_USR")
if len(dbUser) == 0 {
log.Fatalf("environment variable MYSQL_USR is not set")
log.Fatalf("environment variable POSTGRESQL_USR is not set")
}
dbPortStr := os.Getenv("MYSQL_PORT")
dbPortStr := os.Getenv("POSTGRESQL_PORT")
if len(dbPortStr) == 0 {
log.Fatalf("environment variable MYSQL_PORT is not set")
log.Fatalf("environment variable POSTGRESQL_PORT is not set")
}
dbPort, err := strconv.Atoi(dbPortStr)
if err != nil {
log.Fatalf("invalid MYSQL_PORT: %v", err)
log.Fatalf("invalid POSTGRESQL_PORT: %v", err)
}
dbPassword := os.Getenv("MYSQL_PWD")
dbDatabase := os.Getenv("MYSQL_DATABASE")
dbPassword := os.Getenv("POSTGRESQL_PWD")
dbDatabase := os.Getenv("POSTGRESQL_DATABASE")
if len(dbDatabase) == 0 {
log.Fatalf("environment variable MYSQL_DATABASE is not set")
log.Fatalf("environment variable POSTGRESQL_DATABASE is not set")
}
database := &models.Database{
Type: "mysql",
MySQL: &models.MySQL{
Type: "postgresql",
PostGreSQL: &models.PostGreSQL{
Host: dbHost,
Port: dbPort,
Username: dbUser,
@ -61,23 +69,7 @@ func PrepareTestForMySQL() {
},
}
log.Infof("MYSQL_HOST: %s, MYSQL_USR: %s, MYSQL_PORT: %d, MYSQL_PWD: %s\n", dbHost, dbUser, dbPort, dbPassword)
initDatabaseForTest(database)
}
// PrepareTestForSQLite is for test only.
func PrepareTestForSQLite() {
file := os.Getenv("SQLITE_FILE")
if len(file) == 0 {
log.Fatalf("environment variable SQLITE_FILE is not set")
}
database := &models.Database{
Type: "sqlite",
SQLite: &models.SQLite{
File: file,
},
}
log.Infof("POSTGRES_HOST: %s, POSTGRES_USR: %s, POSTGRES_PORT: %d, POSTGRES_PWD: %s\n", dbHost, dbUser, dbPort, dbPassword)
initDatabaseForTest(database)
}

View File

@ -34,8 +34,8 @@ func GetUser(query models.User) (*models.User, error) {
sql := `select user_id, username, email, realname, comment, reset_uuid, salt,
sysadmin_flag, creation_time, update_time
from user u
where deleted = 0 `
from harbor_user u
where deleted = false `
queryParam := make([]interface{}, 1)
if query.UserID != 0 {
sql += ` and user_id = ? `
@ -79,7 +79,7 @@ func LoginByDb(auth models.AuthModel) (*models.User, error) {
o := GetOrmer()
var users []models.User
n, err := o.Raw(`select * from user where (username = ? or email = ?) and deleted = 0`,
n, err := o.Raw(`select * from harbor_user where (username = ? or email = ?) and deleted = false`,
auth.Principal, auth.Principal).QueryRows(&users)
if err != nil {
return nil, err
@ -134,10 +134,10 @@ func userQueryConditions(query *models.UserQuery) orm.QuerySeter {
}
// ToggleUserAdminRole gives a user admin role.
func ToggleUserAdminRole(userID, hasAdmin int) error {
func ToggleUserAdminRole(userID int, hasAdmin bool) error {
o := GetOrmer()
queryParams := make([]interface{}, 1)
sql := `update user set sysadmin_flag = ? where user_id = ?`
sql := `update harbor_user set sysadmin_flag = ? where user_id = ?`
queryParams = append(queryParams, hasAdmin)
queryParams = append(queryParams, userID)
r, err := o.Raw(sql, queryParams).Exec()
@ -164,9 +164,9 @@ func ChangeUserPassword(u models.User, oldPassword ...string) (err error) {
salt := utils.GenerateRandomString()
if len(oldPassword) == 0 {
//In some cases, it may no need to check old password, just as Linux change password policies.
r, err = o.Raw(`update user set password=?, salt=? where user_id=?`, utils.Encrypt(u.Password, salt), salt, u.UserID).Exec()
r, err = o.Raw(`update harbor_user set password=?, salt=? where user_id=?`, utils.Encrypt(u.Password, salt), salt, u.UserID).Exec()
} else {
r, err = o.Raw(`update user set password=?, salt=? where user_id=? and password = ?`, utils.Encrypt(u.Password, salt), salt, u.UserID, utils.Encrypt(oldPassword[0], u.Salt)).Exec()
r, err = o.Raw(`update harbor_user set password=?, salt=? where user_id=? and password = ?`, utils.Encrypt(u.Password, salt), salt, u.UserID, utils.Encrypt(oldPassword[0], u.Salt)).Exec()
}
if err != nil {
@ -186,7 +186,7 @@ func ChangeUserPassword(u models.User, oldPassword ...string) (err error) {
// ResetUserPassword ...
func ResetUserPassword(u models.User) error {
o := GetOrmer()
r, err := o.Raw(`update user set password=?, reset_uuid=? where reset_uuid=?`, utils.Encrypt(u.Password, u.Salt), "", u.ResetUUID).Exec()
r, err := o.Raw(`update harbor_user set password=?, reset_uuid=? where reset_uuid=?`, utils.Encrypt(u.Password, u.Salt), "", u.ResetUUID).Exec()
if err != nil {
return err
}
@ -203,7 +203,7 @@ func ResetUserPassword(u models.User) error {
// UpdateUserResetUUID ...
func UpdateUserResetUUID(u models.User) error {
o := GetOrmer()
_, err := o.Raw(`update user set reset_uuid=? where email=?`, u.ResetUUID, u.Email).Exec()
_, err := o.Raw(`update harbor_user set reset_uuid=? where email=?`, u.ResetUUID, u.Email).Exec()
return err
}
@ -218,7 +218,7 @@ func CheckUserPassword(query models.User) (*models.User, error) {
return nil, nil
}
sql := `select user_id, username, salt from user where deleted = 0 and username = ? and password = ?`
sql := `select user_id, username, salt from harbor_user where deleted = false and username = ? and password = ?`
queryParam := make([]interface{}, 1)
queryParam = append(queryParam, currentUser.Username)
queryParam = append(queryParam, utils.Encrypt(query.Password, currentUser.Salt))
@ -251,8 +251,8 @@ func DeleteUser(userID int) error {
name := fmt.Sprintf("%s#%d", user.Username, user.UserID)
email := fmt.Sprintf("%s#%d", user.Email, user.UserID)
_, err = o.Raw(`update user
set deleted = 1, username = ?, email = ?
_, err = o.Raw(`update harbor_user
set deleted = true, username = ?, email = ?
where user_id = ?`, name, email, userID).Exec()
return err
}

View File

@ -50,13 +50,13 @@ func TestDeleteUser(t *testing.T) {
}
user := &models.User{}
sql := "select * from user where user_id = ?"
sql := "select * from harbor_user where user_id = ?"
if err = GetOrmer().Raw(sql, id).
QueryRow(user); err != nil {
t.Fatalf("failed to query user: %v", err)
}
if user.Deleted != 1 {
if user.Deleted != true {
t.Error("user is not deleted")
}

View File

@ -35,10 +35,19 @@ type DatabaseWatchItemDAO struct{}
// Add a WatchItem
func (d *DatabaseWatchItemDAO) Add(item *models.WatchItem) (int64, error) {
o := GetOrmer()
var triggerID int64
now := time.Now()
item.CreationTime = now
item.UpdateTime = now
return GetOrmer().Insert(item)
sql := "insert into replication_immediate_trigger (policy_id, namespace, on_deletion, on_push, creation_time, update_time) values (?, ?, ?, ?, ?, ?) RETURNING id"
err := o.Raw(sql, item.PolicyID, item.Namespace, item.OnDeletion, item.OnPush, now, now).QueryRow(&triggerID)
if err != nil {
return 0, err
}
return triggerID, nil
}
// DeleteByPolicyID deletes the WatchItem specified by policy ID
@ -51,9 +60,9 @@ func (d *DatabaseWatchItemDAO) DeleteByPolicyID(policyID int64) error {
func (d *DatabaseWatchItemDAO) Get(namespace, operation string) ([]models.WatchItem, error) {
qs := GetOrmer().QueryTable(&models.WatchItem{}).Filter("Namespace", namespace)
if operation == "push" {
qs = qs.Filter("OnPush", 1)
qs = qs.Filter("OnPush", true)
} else if operation == "delete" {
qs = qs.Filter("OnDeletion", 1)
qs = qs.Filter("OnDeletion", true)
}
items := []models.WatchItem{}

View File

@ -25,7 +25,7 @@ type AccessLog struct {
ProjectID int64 `orm:"column(project_id)" json:"project_id"`
RepoName string `orm:"column(repo_name)" json:"repo_name"`
RepoTag string `orm:"column(repo_tag)" json:"repo_tag"`
GUID string `orm:"column(GUID)" json:"guid"`
GUID string `orm:"column(guid)" json:"guid"`
Operation string `orm:"column(operation)" json:"operation"`
OpTime time.Time `orm:"column(op_time)" json:"op_time"`
}

View File

@ -26,8 +26,7 @@ type Authentication struct {
// Database ...
type Database struct {
Type string `json:"type"`
MySQL *MySQL `json:"mysql,omitempty"`
SQLite *SQLite `json:"sqlite,omitempty"`
PostGreSQL *PostGreSQL `json:"postgresql,omitempty"`
}
// MySQL ...

View File

@ -40,5 +40,5 @@ type ProjectMetadata struct {
Value string `orm:"column(value)" json:"value"`
CreationTime time.Time `orm:"column(creation_time)" json:"creation_time"`
UpdateTime time.Time `orm:"column(update_time)" json:"update_time"`
Deleted int `orm:"column(deleted)" json:"deleted"`
Deleted bool `orm:"column(deleted)" json:"deleted"`
}

View File

@ -29,7 +29,7 @@ type Project struct {
Name string `orm:"column(name)" json:"name"`
CreationTime time.Time `orm:"column(creation_time)" json:"creation_time"`
UpdateTime time.Time `orm:"column(update_time)" json:"update_time"`
Deleted int `orm:"column(deleted)" json:"deleted"`
Deleted bool `orm:"column(deleted)" json:"deleted"`
OwnerName string `orm:"-" json:"owner_name"`
Togglable bool `orm:"-" json:"togglable"`
Role int `orm:"-" json:"current_user_role_id"`

View File

@ -48,7 +48,7 @@ type RepPolicy struct {
ReplicateDeletion bool `orm:"column(replicate_deletion)"`
CreationTime time.Time `orm:"column(creation_time);auto_now_add"`
UpdateTime time.Time `orm:"column(update_time);auto_now"`
Deleted int `orm:"column(deleted)"`
Deleted bool `orm:"column(deleted)"`
}
// RepJob is the model for a replication job, which is the execution unit on job service, currently it is used to transfer/remove

View File

@ -19,7 +19,7 @@ import (
)
// UserTable is the name of table in DB that holds the user object
const UserTable = "user"
const UserTable = "harbor_user"
// User holds the details of a user.
type User struct {
@ -29,13 +29,13 @@ type User struct {
Password string `orm:"column(password)" json:"password"`
Realname string `orm:"column(realname)" json:"realname"`
Comment string `orm:"column(comment)" json:"comment"`
Deleted int `orm:"column(deleted)" json:"deleted"`
Deleted bool `orm:"column(deleted)" json:"deleted"`
Rolename string `orm:"-" json:"role_name"`
//if this field is named as "RoleID", beego orm can not map role_id
//to it.
Role int `orm:"-" json:"role_id"`
// RoleList []Role `json:"role_list"`
HasAdminRole int `orm:"column(sysadmin_flag)" json:"has_admin_role"`
HasAdminRole bool `orm:"column(sysadmin_flag)" json:"has_admin_role"`
ResetUUID string `orm:"column(reset_uuid)" json:"reset_uuid"`
Salt string `orm:"column(salt)" json:"-"`
CreationTime time.Time `orm:"column(creation_time)" json:"creation_time"`

View File

@ -56,7 +56,7 @@ func (s *SecurityContext) IsSysAdmin() bool {
if !s.IsAuthenticated() {
return false
}
return s.user.HasAdminRole == 1
return s.user.HasAdminRole
}
// IsSolutionUser ...

View File

@ -53,32 +53,32 @@ var (
)
func TestMain(m *testing.M) {
dbHost := os.Getenv("MYSQL_HOST")
dbHost := os.Getenv("POSTGRESQL_HOST")
if len(dbHost) == 0 {
log.Fatalf("environment variable MYSQL_HOST is not set")
log.Fatalf("environment variable POSTGRES_HOST is not set")
}
dbPortStr := os.Getenv("MYSQL_PORT")
dbUser := os.Getenv("POSTGRESQL_USR")
if len(dbUser) == 0 {
log.Fatalf("environment variable POSTGRES_USR is not set")
}
dbPortStr := os.Getenv("POSTGRESQL_PORT")
if len(dbPortStr) == 0 {
log.Fatalf("environment variable MYSQL_PORT is not set")
log.Fatalf("environment variable POSTGRES_PORT is not set")
}
dbPort, err := strconv.Atoi(dbPortStr)
if err != nil {
log.Fatalf("invalid MYSQL_PORT: %v", err)
}
dbUser := os.Getenv("MYSQL_USR")
if len(dbUser) == 0 {
log.Fatalf("environment variable MYSQL_USR is not set")
log.Fatalf("invalid POSTGRESQL_PORT: %v", err)
}
dbPassword := os.Getenv("MYSQL_PWD")
dbDatabase := os.Getenv("MYSQL_DATABASE")
dbPassword := os.Getenv("POSTGRESQL_PWD")
dbDatabase := os.Getenv("POSTGRESQL_DATABASE")
if len(dbDatabase) == 0 {
log.Fatalf("environment variable MYSQL_DATABASE is not set")
log.Fatalf("environment variable POSTGRESQL_DATABASE is not set")
}
database := &models.Database{
Type: "mysql",
MySQL: &models.MySQL{
Type: "postgresql",
PostGreSQL: &models.PostGreSQL{
Host: dbHost,
Port: dbPort,
Username: dbUser,
@ -87,7 +87,7 @@ func TestMain(m *testing.M) {
},
}
log.Infof("MYSQL_HOST: %s, MYSQL_USR: %s, MYSQL_PORT: %d, MYSQL_PWD: %s\n", dbHost, dbUser, dbPort, dbPassword)
log.Infof("POSTGRES_HOST: %s, POSTGRES_USR: %s, POSTGRES_PORT: %d, POSTGRES_PWD: %s\n", dbHost, dbUser, dbPort, dbPassword)
if err := dao.InitDatabase(database); err != nil {
log.Fatalf("failed to initialize database: %v", err)
@ -197,7 +197,7 @@ func TestIsSysAdmin(t *testing.T) {
// authenticated, admin
ctx = NewSecurityContext(&models.User{
Username: "test",
HasAdminRole: 1,
HasAdminRole: true,
}, nil)
assert.True(t, ctx.IsSysAdmin())
}
@ -229,7 +229,7 @@ func TestHasReadPerm(t *testing.T) {
// private project, authenticated, system admin
ctx = NewSecurityContext(&models.User{
Username: "admin",
HasAdminRole: 1,
HasAdminRole: true,
}, pm)
assert.True(t, ctx.HasReadPerm(private.Name))
}
@ -250,7 +250,7 @@ func TestHasWritePerm(t *testing.T) {
// authenticated, system admin
ctx = NewSecurityContext(&models.User{
Username: "admin",
HasAdminRole: 1,
HasAdminRole: true,
}, pm)
assert.True(t, ctx.HasReadPerm(private.Name))
}
@ -267,7 +267,7 @@ func TestHasAllPerm(t *testing.T) {
// authenticated, system admin
ctx = NewSecurityContext(&models.User{
Username: "admin",
HasAdminRole: 1,
HasAdminRole: true,
}, pm)
assert.True(t, ctx.HasAllPerm(private.Name))
}

View File

@ -30,13 +30,12 @@ import (
var adminServerLdapTestConfig = map[string]interface{}{
common.ExtEndpoint: "host01.com",
common.AUTHMode: "ldap_auth",
common.DatabaseType: "mysql",
common.MySQLHost: "127.0.0.1",
common.MySQLPort: 3306,
common.MySQLUsername: "root",
common.MySQLPassword: "root123",
common.MySQLDatabase: "registry",
common.SQLiteFile: "/tmp/registry.db",
common.DatabaseType: "postgresql",
common.PostGreSQLHOST: "127.0.0.1",
common.PostGreSQLPort: 5432,
common.PostGreSQLUsername: "postgres",
common.PostGreSQLPassword: "root123",
common.PostGreSQLDatabase: "registry",
//config.SelfRegistration: true,
common.LDAPURL: "ldap://127.0.0.1",
common.LDAPSearchDN: "cn=admin,dc=example,dc=com",
@ -53,13 +52,12 @@ var adminServerLdapTestConfig = map[string]interface{}{
var adminServerDefaultConfigWithVerifyCert = map[string]interface{}{
common.ExtEndpoint: "https://host01.com",
common.AUTHMode: common.LDAPAuth,
common.DatabaseType: "mysql",
common.MySQLHost: "127.0.0.1",
common.MySQLPort: 3306,
common.MySQLUsername: "root",
common.MySQLPassword: "root123",
common.MySQLDatabase: "registry",
common.SQLiteFile: "/tmp/registry.db",
common.DatabaseType: "postgresql",
common.PostGreSQLHOST: "127.0.0.1",
common.PostGreSQLPort: 5432,
common.PostGreSQLUsername: "postgres",
common.PostGreSQLPassword: "root123",
common.PostGreSQLDatabase: "registry",
common.SelfRegistration: true,
common.LDAPURL: "ldap://127.0.0.1:389",
common.LDAPSearchDN: "cn=admin,dc=example,dc=com",

View File

@ -26,13 +26,12 @@ import (
var adminServerDefaultConfig = map[string]interface{}{
common.ExtEndpoint: "https://host01.com",
common.AUTHMode: common.DBAuth,
common.DatabaseType: "mysql",
common.MySQLHost: "127.0.0.1",
common.MySQLPort: 3306,
common.MySQLUsername: "user01",
common.MySQLPassword: "password",
common.MySQLDatabase: "registry",
common.SQLiteFile: "/tmp/registry.db",
common.DatabaseType: "postgresql",
common.PostGreSQLHOST: "127.0.0.1",
common.PostGreSQLPort: 5432,
common.PostGreSQLUsername: "postgres",
common.PostGreSQLPassword: "root123",
common.PostGreSQLDatabase: "registry",
common.SelfRegistration: true,
common.LDAPURL: "ldap://127.0.0.1",
common.LDAPSearchDN: "uid=searchuser,ou=people,dc=mydomain,dc=com",
@ -65,10 +64,10 @@ var adminServerDefaultConfig = map[string]interface{}{
common.WithNotary: false,
common.WithClair: false,
common.ClairDBUsername: "postgres",
common.ClairDBHost: "postgres",
common.ClairDBHost: "postgresql",
common.ClairDB: "postgres",
common.ClairDBPort: 5432,
common.ClairDBPassword: "password",
common.ClairDBPassword: "root123",
common.UAAClientID: "testid",
common.UAAClientSecret: "testsecret",
common.UAAEndpoint: "10.192.168.5",

View File

@ -25,32 +25,32 @@ import (
// InitDatabaseFromEnv is used to initialize database for testing
func InitDatabaseFromEnv() {
dbHost := os.Getenv("MYSQL_HOST")
dbHost := os.Getenv("POSTGRESQL_HOST")
if len(dbHost) == 0 {
log.Fatalf("environment variable MYSQL_HOST is not set")
log.Fatalf("environment variable POSTGRESQL_HOST is not set")
}
dbPortStr := os.Getenv("MYSQL_PORT")
dbUser := os.Getenv("POSTGRESQL_USR")
if len(dbUser) == 0 {
log.Fatalf("environment variable POSTGRESQL_USR is not set")
}
dbPortStr := os.Getenv("POSTGRESQL_PORT")
if len(dbPortStr) == 0 {
log.Fatalf("environment variable MYSQL_PORT is not set")
log.Fatalf("environment variable POSTGRESQL_PORT is not set")
}
dbPort, err := strconv.Atoi(dbPortStr)
if err != nil {
log.Fatalf("invalid MYSQL_PORT: %v", err)
}
dbUser := os.Getenv("MYSQL_USR")
if len(dbUser) == 0 {
log.Fatalf("environment variable MYSQL_USR is not set")
log.Fatalf("invalid POSTGRESQL_PORT: %v", err)
}
dbPassword := os.Getenv("MYSQL_PWD")
dbDatabase := os.Getenv("MYSQL_DATABASE")
dbPassword := os.Getenv("POSTGRESQL_PWD")
dbDatabase := os.Getenv("POSTGRESQL_DATABASE")
if len(dbDatabase) == 0 {
log.Fatalf("environment variable MYSQL_DATABASE is not set")
log.Fatalf("environment variable POSTGRESQL_DATABASE is not set")
}
database := &models.Database{
Type: "mysql",
MySQL: &models.MySQL{
Type: "postgresql",
PostGreSQL: &models.PostGreSQL{
Host: dbHost,
Port: dbPort,
Username: dbUser,
@ -59,7 +59,7 @@ func InitDatabaseFromEnv() {
},
}
log.Infof("MYSQL_HOST: %s, MYSQL_USR: %s, MYSQL_PORT: %d, MYSQL_PWD: %s\n", dbHost, dbUser, dbPort, dbPassword)
log.Infof("POSTGRES_HOST: %s, POSTGRES_USR: %s, POSTGRES_PORT: %d, POSTGRES_PWD: %s\n", dbHost, dbUser, dbPort, dbPassword)
if err := dao.InitDatabase(database); err != nil {
log.Fatalf("failed to initialize database: %v", err)

View File

@ -158,13 +158,13 @@ func (c *Context) GetLogger() logger.Interface {
func getDBFromConfig(cfg map[string]interface{}) *models.Database {
database := &models.Database{}
database.Type = cfg[common.DatabaseType].(string)
mysql := &models.MySQL{}
mysql.Host = cfg[common.MySQLHost].(string)
mysql.Port = int(cfg[common.MySQLPort].(float64))
mysql.Username = cfg[common.MySQLUsername].(string)
mysql.Password = cfg[common.MySQLPassword].(string)
mysql.Database = cfg[common.MySQLDatabase].(string)
database.MySQL = mysql
postgresql := &models.PostGreSQL{}
postgresql.Host = cfg[common.PostGreSQLHOST].(string)
postgresql.Port = int(cfg[common.PostGreSQLPort].(float64))
postgresql.Username = cfg[common.PostGreSQLUsername].(string)
postgresql.Password = cfg[common.PostGreSQLPassword].(string)
postgresql.Database = cfg[common.PostGreSQLDatabase].(string)
database.PostGreSQL = postgresql
return database
}

View File

@ -307,7 +307,7 @@ func validateCfg(c map[string]interface{}) (bool, error) {
return false, fmt.Errorf("invalid %s: %d", k, n)
}
if (k == common.EmailPort ||
k == common.MySQLPort) && n > 65535 {
k == common.PostGreSQLPort) && n > 65535 {
return false, fmt.Errorf("invalid %s: %d", k, n)
}
}

View File

@ -919,13 +919,13 @@ func (a testapi) UsersPut(userID int, profile apilib.UserProfile, authInfo usrIn
}
//Update a registered user to be an administrator of Harbor.
func (a testapi) UsersToggleAdminRole(userID int, authInfo usrInfo, hasAdminRole int32) (int, error) {
func (a testapi) UsersToggleAdminRole(userID int, authInfo usrInfo, hasAdminRole bool) (int, error) {
_sling := sling.New().Put(a.basePath)
// create path and map variables
path := "/api/users/" + fmt.Sprintf("%d", userID) + "/sysadmin"
_sling = _sling.Path(path)
type QueryParams struct {
HasAdminRole int32 `json:"has_admin_role,omitempty"`
HasAdminRole bool `json:"has_admin_role,omitempty"`
}
_sling = _sling.BodyJSON(&QueryParams{HasAdminRole: hasAdminRole})

View File

@ -122,6 +122,10 @@ func (pma *ProjectMemberAPI) Post() {
var request models.MemberReq
pma.DecodeJSONReq(&request)
pmid, err := AddOrUpdateProjectMember(projectID, request)
if err == auth.ErrorGroupNotExist || err == auth.ErrorUserNotExist {
pma.HandleNotFound(fmt.Sprintf("Failed to add project member, error: %v", err))
return
}
if err != nil {
pma.HandleInternalServerError(fmt.Sprintf("Failed to add project member, error: %v", err))
return

View File

@ -114,6 +114,20 @@ func TestProjectMemberAPI_Post(t *testing.T) {
},
code: http.StatusCreated,
},
&codeCheckingCase{
request: &testingRequest{
method: http.MethodPost,
url: "/api/projects/1/members",
bodyJSON: &models.MemberReq{
Role: 1,
MemberUser: models.User{
Username: "notexistuser",
},
},
credential: admin,
},
code: http.StatusNotFound,
},
&codeCheckingCase{
request: &testingRequest{
method: http.MethodPost,

View File

@ -252,6 +252,8 @@ func (pa *RepPolicyAPI) Delete() {
count, err := dao.GetTotalCountOfRepJobs(&models.RepJobQuery{
PolicyID: id,
Statuses: []string{models.JobRunning, models.JobRetrying, models.JobPending},
// only get the transfer and delete jobs, do not get schedule job
Operations: []string{models.RepOpTransfer, models.RepOpDelete},
})
if err != nil {
log.Errorf("failed to filter jobs of policy %d: %v", id, err)

View File

@ -15,10 +15,11 @@ package api
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/vmware/harbor/src/common/api"
"github.com/vmware/harbor/tests/apitests/apilib"
"testing"
"github.com/astaxie/beego"
)
@ -306,7 +307,7 @@ func TestUsersToggleAdminRole(t *testing.T) {
assert := assert.New(t)
apiTest := newHarborAPI()
//case 1: toggle user2 admin role without admin auth
code, err := apiTest.UsersToggleAdminRole(testUser0002ID, *testUser0002Auth, int32(1))
code, err := apiTest.UsersToggleAdminRole(testUser0002ID, *testUser0002Auth, true)
if err != nil {
t.Error("Error occured while toggle user admin role", err.Error())
t.Log(err)
@ -314,7 +315,7 @@ func TestUsersToggleAdminRole(t *testing.T) {
assert.Equal(403, code, "Toggle user admin role status should be 403")
}
//case 2: toggle user2 admin role with admin auth
code, err = apiTest.UsersToggleAdminRole(testUser0002ID, *admin, int32(1))
code, err = apiTest.UsersToggleAdminRole(testUser0002ID, *admin, true)
if err != nil {
t.Error("Error occured while toggle user admin role", err.Error())
t.Log(err)

View File

@ -27,13 +27,12 @@ var l = NewUserLock(2 * time.Second)
var adminServerLdapTestConfig = map[string]interface{}{
common.ExtEndpoint: "host01.com",
common.AUTHMode: "ldap_auth",
common.DatabaseType: "mysql",
common.MySQLHost: "127.0.0.1",
common.MySQLPort: 3306,
common.MySQLUsername: "root",
common.MySQLPassword: "root123",
common.MySQLDatabase: "registry",
common.SQLiteFile: "/tmp/registry.db",
common.DatabaseType: "postgresql",
common.PostGreSQLHOST: "127.0.0.1",
common.PostGreSQLPort: 5432,
common.PostGreSQLUsername: "postgres",
common.PostGreSQLPassword: "root123",
common.PostGreSQLDatabase: "registry",
common.LDAPURL: "ldap://127.0.0.1",
common.LDAPSearchDN: "cn=admin,dc=example,dc=com",
common.LDAPSearchPwd: "admin",

View File

@ -31,6 +31,12 @@ const frozenTime time.Duration = 1500 * time.Millisecond
var lock = NewUserLock(frozenTime)
// ErrorUserNotExist ...
var ErrorUserNotExist = errors.New("User does not exist")
// ErrorGroupNotExist ...
var ErrorGroupNotExist = errors.New("Group does not exist")
//ErrAuth is the type of error to indicate a failed authentication due to user's error.
type ErrAuth struct {
details string
@ -200,6 +206,9 @@ func SearchGroup(groupKey string) (*models.UserGroup, error) {
// SearchAndOnBoardUser ... Search user and OnBoard user, if user exist, return the ID of current user.
func SearchAndOnBoardUser(username string) (int, error) {
user, err := SearchUser(username)
if user == nil {
return 0, ErrorUserNotExist
}
if err != nil {
return 0, err
}
@ -215,6 +224,9 @@ func SearchAndOnBoardUser(username string) (int, error) {
// SearchAndOnBoardGroup ... if altGroupName is not empty, take the altGroupName as groupName in harbor DB
func SearchAndOnBoardGroup(groupKey, altGroupName string) (int, error) {
userGroup, err := SearchGroup(groupKey)
if userGroup == nil {
return 0, ErrorGroupNotExist
}
if err != nil {
return 0, err
}

View File

@ -31,13 +31,12 @@ import (
var adminServerTestConfig = map[string]interface{}{
common.ExtEndpoint: "host01.com",
common.AUTHMode: "db_auth",
common.DatabaseType: "mysql",
common.MySQLHost: "127.0.0.1",
common.MySQLPort: 3306,
common.MySQLUsername: "root",
common.MySQLPassword: "root123",
common.MySQLDatabase: "registry",
common.SQLiteFile: "/tmp/registry.db",
common.DatabaseType: "postgresql",
common.PostGreSQLHOST: "127.0.0.1",
common.PostGreSQLPort: 5432,
common.PostGreSQLUsername: "postgres",
common.PostGreSQLPassword: "root123",
common.PostGreSQLDatabase: "registry",
//config.SelfRegistration: true,
common.LDAPURL: "ldap://127.0.0.1",
common.LDAPSearchDN: "cn=admin,dc=example,dc=com",

View File

@ -34,13 +34,12 @@ import (
var adminServerLdapTestConfig = map[string]interface{}{
common.ExtEndpoint: "host01.com",
common.AUTHMode: "ldap_auth",
common.DatabaseType: "mysql",
common.MySQLHost: "127.0.0.1",
common.MySQLPort: 3306,
common.MySQLUsername: "root",
common.MySQLPassword: "root123",
common.MySQLDatabase: "registry",
common.SQLiteFile: "/tmp/registry.db",
common.DatabaseType: "postgresql",
common.PostGreSQLHOST: "127.0.0.1",
common.PostGreSQLPort: 5432,
common.PostGreSQLUsername: "postgres",
common.PostGreSQLPassword: "root123",
common.PostGreSQLDatabase: "registry",
//config.SelfRegistration: true,
common.LDAPURL: "ldap://127.0.0.1",
common.LDAPSearchDN: "cn=admin,dc=example,dc=com",
@ -113,14 +112,14 @@ func TestMain(m *testing.M) {
"insert into user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')",
"insert into project (name, owner_id) values ('member_test_01', 1)",
"insert into user_group (group_name, group_type, group_property) values ('test_group_01', 1, 'CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com')",
"update project set owner_id = (select user_id from user where username = 'member_test_01') where name = 'member_test_01'",
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from user where username = 'member_test_01'), 'u', 1)",
"update project set owner_id = (select user_id from harbor_user where username = 'member_test_01') where name = 'member_test_01'",
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from harbor_user where username = 'member_test_01'), 'u', 1)",
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_group_01'), 'g', 1)",
}
clearSqls := []string{
"delete from project where name='member_test_01'",
"delete from user where username='member_test_01' or username='pm_sample'",
"delete from harbor_user where username='member_test_01' or username='pm_sample'",
"delete from user_group",
"delete from project_member",
}

View File

@ -15,54 +15,20 @@
package uaa
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/vmware/harbor/src/common/dao"
"github.com/vmware/harbor/src/common/models"
"github.com/vmware/harbor/src/common/utils/log"
"github.com/vmware/harbor/src/common/utils/test"
utilstest "github.com/vmware/harbor/src/common/utils/test"
"github.com/vmware/harbor/src/common/utils/uaa"
"github.com/vmware/harbor/src/ui/config"
"os"
"strconv"
"testing"
)
func TestMain(m *testing.M) {
dbHost := os.Getenv("MYSQL_HOST")
if len(dbHost) == 0 {
log.Fatalf("environment variable MYSQL_HOST is not set")
}
dbUser := os.Getenv("MYSQL_USR")
if len(dbUser) == 0 {
log.Fatalf("environment variable MYSQL_USR is not set")
}
dbPortStr := os.Getenv("MYSQL_PORT")
if len(dbPortStr) == 0 {
log.Fatalf("environment variable MYSQL_PORT is not set")
}
dbPort, err := strconv.Atoi(dbPortStr)
if err != nil {
log.Fatalf("invalid MYSQL_PORT: %v", err)
}
dbPassword := os.Getenv("MYSQL_PWD")
dbDatabase := os.Getenv("MYSQL_DATABASE")
if len(dbDatabase) == 0 {
log.Fatalf("environment variable MYSQL_DATABASE is not set")
}
database := &models.Database{
Type: "mysql",
MySQL: &models.MySQL{
Host: dbHost,
Port: dbPort,
Username: dbUser,
Password: dbPassword,
Database: dbDatabase,
},
}
dao.InitDatabase(database)
test.InitDatabaseFromEnv()
server, err := utilstest.NewAdminserver(nil)
if err != nil {
panic(err)
@ -93,7 +59,7 @@ func TestMain(m *testing.M) {
if err != nil {
panic(err)
}
err = dao.ClearTable("user")
err = dao.ClearTable("harbor_user")
if err != nil {
panic(err)
}

View File

@ -395,18 +395,17 @@ func Database() (*models.Database, error) {
if err != nil {
return nil, err
}
database := &models.Database{}
database.Type = cfg[common.DatabaseType].(string)
mysql := &models.MySQL{}
mysql.Host = cfg[common.MySQLHost].(string)
mysql.Port = int(cfg[common.MySQLPort].(float64))
mysql.Username = cfg[common.MySQLUsername].(string)
mysql.Password = cfg[common.MySQLPassword].(string)
mysql.Database = cfg[common.MySQLDatabase].(string)
database.MySQL = mysql
sqlite := &models.SQLite{}
sqlite.File = cfg[common.SQLiteFile].(string)
database.SQLite = sqlite
postgresql := &models.PostGreSQL{}
postgresql.Host = cfg[common.PostGreSQLHOST].(string)
postgresql.Port = int(cfg[common.PostGreSQLPort].(float64))
postgresql.Username = cfg[common.PostGreSQLUsername].(string)
postgresql.Password = cfg[common.PostGreSQLPassword].(string)
postgresql.Database = cfg[common.PostGreSQLDatabase].(string)
database.PostGreSQL = postgresql
return database, nil
}

View File

@ -72,7 +72,7 @@ func (cc *CommonController) Login() {
cc.SetSession("userId", user.UserID)
cc.SetSession("username", user.Username)
cc.SetSession("isSysAdmin", user.HasAdminRole == 1)
cc.SetSession("isSysAdmin", user.HasAdminRole)
}
// LogOut Habor UI

View File

@ -33,13 +33,12 @@ func TestReadonlyFilter(t *testing.T) {
common.AUTHMode: "db_auth",
common.CfgExpiration: 5,
common.TokenExpiration: 30,
common.DatabaseType: "mysql",
common.MySQLHost: "127.0.0.1",
common.MySQLPort: 3306,
common.MySQLUsername: "root",
common.MySQLPassword: "root123",
common.MySQLDatabase: "registry",
common.SQLiteFile: "/tmp/registry.db",
common.DatabaseType: "postgresql",
common.PostGreSQLDatabase: "registry",
common.PostGreSQLHOST: "127.0.0.1",
common.PostGreSQLPort: 5432,
common.PostGreSQLPassword: "root123",
common.PostGreSQLUsername: "postgres",
common.ReadOnly: true,
}
adminServer, err := utilstest.NewAdminserver(defaultConfig)

View File

@ -234,7 +234,7 @@ func (s *sessionReqCtxModifier) Modify(ctx *beegoctx.Context) bool {
}
isSysAdmin := ctx.Input.Session("isSysAdmin")
if isSysAdmin != nil && isSysAdmin.(bool) {
user.HasAdminRole = 1
user.HasAdminRole = true
}
log.Debug("using local database project manager")

View File

@ -27,7 +27,7 @@ import (
"github.com/vmware/harbor/src/ui/promgr/pmsdriver"
)
const dupProjectPattern = `Duplicate entry '\w+' for key 'name'`
const dupProjectPattern = `duplicate key value violates unique constraint \"project_name_key\"`
type driver struct {
}

View File

@ -16,57 +16,16 @@ package local
import (
"os"
"strconv"
"testing"
"github.com/stretchr/testify/assert"
"github.com/vmware/harbor/src/common/dao"
"github.com/vmware/harbor/src/common/models"
errutil "github.com/vmware/harbor/src/common/utils/error"
"github.com/vmware/harbor/src/common/utils/log"
"github.com/vmware/harbor/src/common/utils/test"
)
func TestMain(m *testing.M) {
dbHost := os.Getenv("MYSQL_HOST")
if len(dbHost) == 0 {
log.Fatalf("environment variable MYSQL_HOST is not set")
}
dbPortStr := os.Getenv("MYSQL_PORT")
if len(dbPortStr) == 0 {
log.Fatalf("environment variable MYSQL_PORT is not set")
}
dbPort, err := strconv.Atoi(dbPortStr)
if err != nil {
log.Fatalf("invalid MYSQL_PORT: %v", err)
}
dbUser := os.Getenv("MYSQL_USR")
if len(dbUser) == 0 {
log.Fatalf("environment variable MYSQL_USR is not set")
}
dbPassword := os.Getenv("MYSQL_PWD")
dbDatabase := os.Getenv("MYSQL_DATABASE")
if len(dbDatabase) == 0 {
log.Fatalf("environment variable MYSQL_DATABASE is not set")
}
database := &models.Database{
Type: "mysql",
MySQL: &models.MySQL{
Host: dbHost,
Port: dbPort,
Username: dbUser,
Password: dbPassword,
Database: dbDatabase,
},
}
log.Infof("MYSQL_HOST: %s, MYSQL_USR: %s, MYSQL_PORT: %d, MYSQL_PWD: %s\n", dbHost, dbUser, dbPort, dbPassword)
if err := dao.InitDatabase(database); err != nil {
log.Fatalf("failed to initialize database: %v", err)
}
test.InitDatabaseFromEnv()
os.Exit(m.Run())
}

View File

@ -116,13 +116,12 @@ func TestPMSPolicyChecker(t *testing.T) {
common.WithNotary: true,
common.CfgExpiration: 5,
common.TokenExpiration: 30,
common.DatabaseType: "mysql",
common.MySQLHost: "127.0.0.1",
common.MySQLPort: 3306,
common.MySQLUsername: "root",
common.MySQLPassword: "root123",
common.MySQLDatabase: "registry",
common.SQLiteFile: "/tmp/registry.db",
common.DatabaseType: "postgresql",
common.PostGreSQLHOST: "127.0.0.1",
common.PostGreSQLPort: 5432,
common.PostGreSQLUsername: "postgres",
common.PostGreSQLPassword: "root123",
common.PostGreSQLDatabase: "registry",
}
adminServer, err := utilstest.NewAdminserver(defaultConfigAdmiral)
if err != nil {

View File

@ -1,6 +1,6 @@
{
"name": "harbor-ui",
"version": "0.7.5",
"version": "0.7.10",
"description": "Harbor shared UI components based on Clarity and Angular4",
"scripts": {
"start": "ng serve --host 0.0.0.0 --port 4500 --proxy-config proxy.config.json",

View File

@ -1,6 +1,6 @@
{
"name": "harbor-ui",
"version": "0.7.5",
"version": "0.7.10",
"description": "Harbor shared UI components based on Clarity and Angular4",
"author": "VMware",
"module": "index.js",

View File

@ -69,7 +69,7 @@ export const CREATE_EDIT_RULE_TEMPLATE: string = `
</div>
</div>
<label *ngIf="noEndpointInfo.length != 0" class="colorRed alertLabel">{{noEndpointInfo | translate}}</label>
<span class="alertLabel goLink" *ngIf="noEndpointInfo.length != 0" (click)="goRegistry()">{{'SIDE_NAV.SYSTEM_MGMT.REGISTRY' | translate}}</span>
<span class="alertLabel goLink" *ngIf="noEndpointInfo.length != 0" (click)="goRegistry()">{{'REPLICATION.ENDPOINTS' | translate}}</span>
</div>
<!--Trigger-->

View File

@ -140,8 +140,6 @@ export interface HarborModuleConfig {
}
/**
*
*
* @export
* @param {AppConfigService} configService
* @returns

View File

@ -73,16 +73,16 @@ export const REPOSITORY_GRIDVIEW_TEMPLATE = `
</div>
<div class="card-footer">
<clr-dropdown [clrCloseMenuOnItemClick]="false">
<button *ngIf="withAdmiral" type="button" class="btn btn-link" (click)="provisionItemEvent($event, item)">{{'REPOSITORY.DEPLOY' | translate}}</button>
<button type="button" class="btn btn-link" (click)="$event.stopPropagation()" clrDropdownTrigger>
<button *ngIf="withAdmiral" type="button" class="btn btn-link" (click)="provisionItemEvent($event, item)" [disabled]="!hasProjectAdminRole">{{'REPOSITORY.DEPLOY' | translate}}</button>
<button type="button" class="btn btn-link" (click)="$event.stopPropagation()" [disabled]="!hasProjectAdminRole" clrDropdownTrigger>
{{'REPOSITORY.ACTION' | translate}}
<clr-icon shape="caret down"></clr-icon>
</button>
<clr-dropdown-menu clrPosition="top-left" *clrIfOpen>
<button *ngIf="withAdmiral" type="button" class="btn btn-link" clrDropdownItem (click)="itemAddInfoEvent($event, item)">
<button *ngIf="withAdmiral" type="button" class="btn btn-link" clrDropdownItem (click)="itemAddInfoEvent($event, item)" [disabled]="!hasProjectAdminRole">
{{'REPOSITORY.ADDITIONAL_INFO' | translate}}
</button>
<button type="button" class="btn btn-link" clrDropdownItem (click)="deleteItemEvent($event, item)">
<button type="button" class="btn btn-link" clrDropdownItem (click)="deleteItemEvent($event, item)" [disabled]="!hasProjectAdminRole">
{{'REPOSITORY.DELETE' | translate}}
</button>
</clr-dropdown-menu>

View File

@ -9,7 +9,7 @@ import { REPOSITORY_GRIDVIEW_TEMPLATE } from './repository-gridview.component.ht
import { REPOSITORY_GRIDVIEW_STYLE } from './repository-gridview.component.css';
import { Repository, SystemInfo, SystemInfoService, RepositoryService, RequestQueryParams, RepositoryItem, TagService } from '../service/index';
import { ErrorHandler } from '../error-handler/error-handler';
import { toPromise, CustomComparator , DEFAULT_PAGE_SIZE, calculatePage, doFiltering, doSorting} from '../utils';
import { toPromise, CustomComparator , DEFAULT_PAGE_SIZE, calculatePage, doFiltering, doSorting, clone} from '../utils';
import { ConfirmationState, ConfirmationTargets, ConfirmationButtons } from '../shared/shared.const';
import { ConfirmationDialogComponent } from '../confirmation-dialog/confirmation-dialog.component';
import { ConfirmationMessage } from '../confirmation-dialog/confirmation-message';
@ -266,12 +266,16 @@ export class RepositoryGridviewComponent implements OnChanges, OnInit {
provisionItemEvent(evt: any, repo: RepositoryItem): void {
evt.stopPropagation();
this.repoProvisionEvent.emit(repo);
let repoCopy = clone(repo)
repoCopy.name = this.registryUrl + ':443/' + repoCopy.name;
this.repoProvisionEvent.emit(repoCopy);
}
itemAddInfoEvent(evt: any, repo: RepositoryItem): void {
evt.stopPropagation();
this.addInfoEvent.emit(repo);
let repoCopy = clone(repo)
repoCopy.name = this.registryUrl + ':443/' + repoCopy.name;
this.addInfoEvent.emit(repoCopy);
}
deleteItemEvent(evt: any, item: RepositoryItem): void {

View File

@ -88,6 +88,13 @@ export interface IServiceConfig {
*/
projectPolicyEndpoint?: string;
/**
* The base endpoint of service used to handle projects
* @type {string}
* @memberOf IServiceConfig
*/
projectBaseEndpoint?: string;
/**
* To determine whether or not to enable the i18 multiple languages supporting.
*

View File

@ -1,9 +1,9 @@
import { Observable } from 'rxjs/Observable';
import { Injectable, Inject } from '@angular/core';
import 'rxjs/add/observable/of';
import { Http, Headers, RequestOptions } from '@angular/http';
import { SERVICE_CONFIG, IServiceConfig } from '../service.config';
import 'rxjs/add/observable/of';
import { SERVICE_CONFIG, IServiceConfig } from '../service.config';
import { Project } from '../project-policy-config/project';
import { ProjectPolicy } from '../project-policy-config/project-policy-config.component';
import {HTTP_JSON_OPTIONS, HTTP_GET_OPTIONS, buildHttpRequestOptions} from "../utils";
@ -40,6 +40,18 @@ export abstract class ProjectService {
*/
abstract updateProjectPolicy(projectId: number | string, projectPolicy: ProjectPolicy): Observable<any> | Promise<any> | any;
/**
* Get all projects
*
* @abstract
* @param {string} name
* @param {number} isPublic
* @param {number} page
* @param {number} pageSize
* @returns {(Observable<any> | Promise<any> | any)}
*
* @memberOf EndpointService
*/
abstract listProjects(name: string, isPublic: number, page?: number, pageSize?: number): Observable<Project[]> | Promise<Project[]> | Project[];
}
@ -64,14 +76,15 @@ export class ProjectDefaultService extends ProjectService {
if (!projectId) {
return Promise.reject('Bad argument');
}
let baseUrl: string = this.config.projectBaseEndpoint ? this.config.projectBaseEndpoint : '/api/projects';
return this.http
.get(`/api/projects/${projectId}`, HTTP_GET_OPTIONS)
.get(`${baseUrl}/${projectId}`, HTTP_GET_OPTIONS)
.map(response => response.json())
.catch(error => Observable.throw(error));
}
listProjects(name: string, isPublic: number, page?: number, pageSize?: number): Observable<Project[]> | Promise<Project[]> | Project[] {
public listProjects(name: string, isPublic: number, page?: number, pageSize?: number): Observable<Project[]> | Promise<Project[]> | Project[] {
let baseUrl: string = this.config.projectBaseEndpoint ? this.config.projectBaseEndpoint : '/api/projects';
let params = new RequestQueryParams();
if (page && pageSize) {
params.set('page', page + '');
@ -87,20 +100,24 @@ export class ProjectDefaultService extends ProjectService {
// let options = new RequestOptions({ headers: this.getHeaders, search: params });
return this.http
.get(`/api/projects`, buildHttpRequestOptions(params))
.get(baseUrl, buildHttpRequestOptions(params))
.map(response => response.json())
.catch(error => Observable.throw(error));
}
public updateProjectPolicy(projectId: number | string, projectPolicy: ProjectPolicy): any {
let baseUrl: string = this.config.projectBaseEndpoint ? this.config.projectBaseEndpoint : '/api/projects';
return this.http
.put(`/api/projects/${projectId}`, { 'metadata': {
.put(`${baseUrl}/${projectId}`, {
'metadata': {
'public': projectPolicy.Public ? 'true' : 'false',
'enable_content_trust': projectPolicy.ContentTrust ? 'true' : 'false',
'prevent_vul': projectPolicy.PreventVulImg ? 'true' : 'false',
'severity': projectPolicy.PreventVulImgSeverity,
'auto_scan': projectPolicy.ScanImgOnPush ? 'true' : 'false'
} }, HTTP_JSON_OPTIONS)
}
},
HTTP_JSON_OPTIONS)
.map(response => response.status)
.catch(error => Observable.throw(error));
}

View File

@ -147,7 +147,7 @@ export class TagDefaultService extends TagService {
return Promise.reject('Invalid parameters.');
}
let _addLabelToImageUrl = `/api/repositories/${repoName}/tags/${tagName}/labels`;
let _addLabelToImageUrl = `${this._baseUrl}/${repoName}/tags/${tagName}/labels`;
return this.http.post(_addLabelToImageUrl, {id: labelId}, HTTP_JSON_OPTIONS).toPromise()
.then(response => response.status)
.catch(error => Promise.reject(error));
@ -159,7 +159,7 @@ export class TagDefaultService extends TagService {
return Promise.reject('Invalid parameters.');
}
let _addLabelToImageUrl = `/api/repositories/${repoName}/tags/${tagName}/labels/${labelId}`;
let _addLabelToImageUrl = `${this._baseUrl}/${repoName}/tags/${tagName}/labels/${labelId}`;
return this.http.delete(_addLabelToImageUrl).toPromise()
.then(response => response.status)
.catch(error => Promise.reject(error));

View File

@ -285,6 +285,6 @@ export function isEmptyObject(obj: any): boolean {
* @returns {*}
*/
export function clone(srcObj: any): any {
if (!srcObj) return null;
if (!srcObj) { return null };
return JSON.parse(JSON.stringify(srcObj));
}

View File

@ -30,7 +30,7 @@
"clarity-icons": "^0.10.27",
"clarity-ui": "^0.10.27",
"core-js": "^2.4.1",
"harbor-ui": "0.7.9",
"harbor-ui": "0.7.10",
"intl": "^1.2.5",
"mutationobserver-shim": "^0.3.2",
"ngx-cookie": "^1.0.0",

View File

@ -95,7 +95,7 @@ export class HarborShellComponent implements OnInit, OnDestroy {
public get isSystemAdmin(): boolean {
let account = this.session.getCurrentUser();
return account != null && account.has_admin_role > 0;
return account != null && account.has_admin_role;
}
public get isUserExisting(): boolean {

View File

@ -103,7 +103,7 @@ export class NavigatorComponent implements OnInit {
public get canDownloadCert(): boolean {
return this.session.getCurrentUser() &&
this.session.getCurrentUser().has_admin_role > 0 &&
this.session.getCurrentUser().has_admin_role &&
this.appConfigService.getConfig() &&
this.appConfigService.getConfig().has_ca_root;
}

View File

@ -70,7 +70,7 @@ export class ConfigurationComponent implements OnInit, OnDestroy {
public get hasAdminRole(): boolean {
return this.session.getCurrentUser() &&
this.session.getCurrentUser().has_admin_role > 0;
this.session.getCurrentUser().has_admin_role;
}
public get hasCAFile(): boolean {
@ -142,7 +142,7 @@ export class ConfigurationComponent implements OnInit, OnDestroy {
// First load
// Double confirm the current use has admin role
let currentUser = this.session.getCurrentUser();
if (currentUser && currentUser.has_admin_role > 0) {
if (currentUser && currentUser.has_admin_role) {
this.retrieveConfig();
}

View File

@ -97,7 +97,7 @@ export class ListProjectComponent implements OnDestroy {
if (account) {
switch (this.appConfigService.getConfig().project_creation_restriction) {
case "adminonly":
return (account.has_admin_role === 1);
return (account.has_admin_role);
case "everyone":
return true;
}
@ -107,7 +107,7 @@ export class ListProjectComponent implements OnDestroy {
public get isSystemAdmin(): boolean {
let account = this.session.getCurrentUser();
return account != null && account.has_admin_role > 0;
return account != null && account.has_admin_role;
}
public get canDelete(): boolean {

View File

@ -19,10 +19,10 @@
"password": "",
"realname": "",
"comment": "",
"deleted": 0,
"deleted": false,
"role_name": "projectAdmin",
"role_id": 1,
"has_admin_role": 0,
"has_admin_role": false,
"reset_uuid": "",
"creation_time": "0001-01-01T00:00:00Z",
"update_time": "0001-01-01T00:00:00Z"

View File

@ -52,7 +52,7 @@ export class ProjectDetailComponent {
public get isSystemAdmin(): boolean {
let account = this.sessionService.getCurrentUser();
return account && account.has_admin_role > 0;
return account && account.has_admin_role;
}
public get isSProjectAdmin(): boolean {

Some files were not shown because too many files have changed in this diff Show More