mirror of
https://github.com/goharbor/harbor.git
synced 2024-11-23 02:35:17 +01:00
Merge remote-tracking branch 'upstream/master' into labelFilter
This commit is contained in:
commit
08f750b04d
19
.travis.yml
19
.travis.yml
@ -13,11 +13,11 @@ services:
|
|||||||
dist: trusty
|
dist: trusty
|
||||||
|
|
||||||
env:
|
env:
|
||||||
MYSQL_HOST: localhost
|
POSTGRESQL_HOST: localhost
|
||||||
MYSQL_PORT: 3306
|
POSTGRESQL_PORT: 5432
|
||||||
MYSQL_USR: root
|
POSTGRESQL_USR: postgres
|
||||||
MYSQL_PWD: root123
|
POSTGRESQL_PWD: root123
|
||||||
MYSQL_DATABASE: registry
|
POSTGRESQL_DATABASE: registry
|
||||||
SQLITE_FILE: /tmp/registry.db
|
SQLITE_FILE: /tmp/registry.db
|
||||||
ADMINSERVER_URL: http://127.0.0.1:8888
|
ADMINSERVER_URL: http://127.0.0.1:8888
|
||||||
DOCKER_COMPOSE_VERSION: 1.7.1
|
DOCKER_COMPOSE_VERSION: 1.7.1
|
||||||
@ -53,7 +53,7 @@ install:
|
|||||||
# - mysql --version
|
# - mysql --version
|
||||||
- go get -d github.com/docker/distribution
|
- go get -d github.com/docker/distribution
|
||||||
- go get -d github.com/docker/libtrust
|
- go get -d github.com/docker/libtrust
|
||||||
- go get -d github.com/go-sql-driver/mysql
|
- go get -d github.com/lib/pq
|
||||||
- go get github.com/golang/lint/golint
|
- go get github.com/golang/lint/golint
|
||||||
- go get github.com/GeertJohan/fgt
|
- go get github.com/GeertJohan/fgt
|
||||||
|
|
||||||
@ -80,12 +80,11 @@ script:
|
|||||||
- sudo mv ./tests/ca.crt /etc/ui/ca/
|
- sudo mv ./tests/ca.crt /etc/ui/ca/
|
||||||
- sudo mkdir -p /harbor
|
- sudo mkdir -p /harbor
|
||||||
- sudo mv ./VERSION /harbor/UIVERSION
|
- sudo mv ./VERSION /harbor/UIVERSION
|
||||||
- sudo service mysql stop
|
- sudo service postgresql stop
|
||||||
- sudo make run_clarity_ut CLARITYIMAGE=vmware/harbor-clarity-ui-builder:1.4.0
|
- sudo make run_clarity_ut CLARITYIMAGE=vmware/harbor-clarity-ui-builder:1.4.0
|
||||||
- cat ./src/ui_ng/lib/npm-ut-test-results
|
- cat ./src/ui_ng/lib/npm-ut-test-results
|
||||||
- sudo ./tests/testprepare.sh
|
- sudo ./tests/testprepare.sh
|
||||||
- sudo make -f make/photon/Makefile -e MARIADBVERSION=10.2.10 -e VERSIONTAG=dev
|
- sudo make -f make/photon/Makefile _build_postgresql _build_db _build_registry -e VERSIONTAG=dev -e CLAIRDBVERSION=dev -e REGISTRYVERSION=v2.6.2
|
||||||
- sudo make -f make/photon/Makefile _build_registry -e REGISTRYVERSION=v2.6.2 -e VERSIONTAG=dev
|
|
||||||
- sudo sed -i 's/__reg_version__/v2.6.2-dev/g' ./make/docker-compose.test.yml
|
- sudo sed -i 's/__reg_version__/v2.6.2-dev/g' ./make/docker-compose.test.yml
|
||||||
- sudo sed -i 's/__version__/dev/g' ./make/docker-compose.test.yml
|
- sudo sed -i 's/__version__/dev/g' ./make/docker-compose.test.yml
|
||||||
- sudo mkdir -p ./make/common/config/registry/
|
- sudo mkdir -p ./make/common/config/registry/
|
||||||
@ -93,7 +92,7 @@ script:
|
|||||||
- sudo docker-compose -f ./make/docker-compose.test.yml up -d
|
- sudo docker-compose -f ./make/docker-compose.test.yml up -d
|
||||||
- go list ./... | grep -v -E 'vendor|tests|test' | xargs -L1 fgt golint
|
- go list ./... | grep -v -E 'vendor|tests|test' | xargs -L1 fgt golint
|
||||||
- go list ./... | grep -v -E 'vendor|tests|test' | xargs -L1 go vet
|
- go list ./... | grep -v -E 'vendor|tests|test' | xargs -L1 go vet
|
||||||
- export MYSQL_HOST=$IP
|
- export POSTGRESQL_HOST=$IP
|
||||||
- export REGISTRY_URL=$IP:5000
|
- export REGISTRY_URL=$IP:5000
|
||||||
- echo $REGISTRY_URL
|
- echo $REGISTRY_URL
|
||||||
- ./tests/pushimage.sh
|
- ./tests/pushimage.sh
|
||||||
|
6
Makefile
6
Makefile
@ -226,14 +226,13 @@ PACKAGE_ONLINE_PARA=-zcvf harbor-online-installer-$(PKGVERSIONTAG).tgz \
|
|||||||
DOCKERCOMPOSE_LIST=-f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
DOCKERCOMPOSE_LIST=-f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||||
|
|
||||||
ifeq ($(NOTARYFLAG), true)
|
ifeq ($(NOTARYFLAG), true)
|
||||||
DOCKERSAVE_PARA+= vmware/notary-server-photon:$(NOTARYVERSION)-$(VERSIONTAG) vmware/notary-signer-photon:$(NOTARYVERSION)-$(VERSIONTAG) \
|
DOCKERSAVE_PARA+= vmware/notary-server-photon:$(NOTARYVERSION)-$(VERSIONTAG) vmware/notary-signer-photon:$(NOTARYVERSION)-$(VERSIONTAG)
|
||||||
vmware/mariadb-photon:$(MARIADBVERSION)
|
|
||||||
PACKAGE_OFFLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSENOTARYFILENAME)
|
PACKAGE_OFFLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSENOTARYFILENAME)
|
||||||
PACKAGE_ONLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSENOTARYFILENAME)
|
PACKAGE_ONLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSENOTARYFILENAME)
|
||||||
DOCKERCOMPOSE_LIST+= -f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSENOTARYFILENAME)
|
DOCKERCOMPOSE_LIST+= -f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSENOTARYFILENAME)
|
||||||
endif
|
endif
|
||||||
ifeq ($(CLAIRFLAG), true)
|
ifeq ($(CLAIRFLAG), true)
|
||||||
DOCKERSAVE_PARA+= vmware/clair-photon:$(CLAIRVERSION)-$(VERSIONTAG) vmware/postgresql-photon:$(CLAIRDBVERSION)
|
DOCKERSAVE_PARA+= vmware/clair-photon:$(CLAIRVERSION)-$(VERSIONTAG)
|
||||||
PACKAGE_OFFLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSECLAIRFILENAME)
|
PACKAGE_OFFLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSECLAIRFILENAME)
|
||||||
PACKAGE_ONLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSECLAIRFILENAME)
|
PACKAGE_ONLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSECLAIRFILENAME)
|
||||||
DOCKERCOMPOSE_LIST+= -f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECLAIRFILENAME)
|
DOCKERCOMPOSE_LIST+= -f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECLAIRFILENAME)
|
||||||
@ -292,6 +291,7 @@ modify_composefile: modify_composefile_notary modify_composefile_clair
|
|||||||
@cp $(DOCKERCOMPOSEFILEPATH)/ha/$(DOCKERCOMPOSETPLFILENAME) $(DOCKERCOMPOSEFILEPATH)/ha/$(DOCKERCOMPOSEFILENAME)
|
@cp $(DOCKERCOMPOSEFILEPATH)/ha/$(DOCKERCOMPOSETPLFILENAME) $(DOCKERCOMPOSEFILEPATH)/ha/$(DOCKERCOMPOSEFILENAME)
|
||||||
@$(SEDCMD) -i 's/__version__/$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/ha/$(DOCKERCOMPOSEFILENAME)
|
@$(SEDCMD) -i 's/__version__/$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/ha/$(DOCKERCOMPOSEFILENAME)
|
||||||
@$(SEDCMD) -i 's/__version__/$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
@$(SEDCMD) -i 's/__version__/$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||||
|
@$(SEDCMD) -i 's/__postgresql_version__/$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||||
@$(SEDCMD) -i 's/__reg_version__/$(REGISTRYVERSION)-$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
@$(SEDCMD) -i 's/__reg_version__/$(REGISTRYVERSION)-$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||||
@$(SEDCMD) -i 's/__reg_version__/$(REGISTRYVERSION)-$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/ha/$(DOCKERCOMPOSEFILENAME)
|
@$(SEDCMD) -i 's/__reg_version__/$(REGISTRYVERSION)-$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/ha/$(DOCKERCOMPOSEFILENAME)
|
||||||
@$(SEDCMD) -i 's/__nginx_version__/$(NGINXVERSION)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
@$(SEDCMD) -i 's/__nginx_version__/$(NGINXVERSION)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||||
|
@ -54,7 +54,6 @@ data:
|
|||||||
CLAIR_DB_PORT: "5432"
|
CLAIR_DB_PORT: "5432"
|
||||||
CLAIR_DB: "{{ .Values.clair.postgresDatabase }}"
|
CLAIR_DB: "{{ .Values.clair.postgresDatabase }}"
|
||||||
CLAIR_DB_USERNAME: "{{ .Values.clair.postgresUser }}"
|
CLAIR_DB_USERNAME: "{{ .Values.clair.postgresUser }}"
|
||||||
CLAIR_DB_PASSWORD: "{{ .Values.clair.postgresPassword }}"
|
|
||||||
UAA_ENDPOINT: ""
|
UAA_ENDPOINT: ""
|
||||||
UAA_CLIENTID: ""
|
UAA_CLIENTID: ""
|
||||||
UAA_CLIENTSECRET: ""
|
UAA_CLIENTSECRET: ""
|
||||||
|
@ -13,7 +13,9 @@ data:
|
|||||||
MYSQL_PWD: {{ .Values.mysql.pass | b64enc | quote }}
|
MYSQL_PWD: {{ .Values.mysql.pass | b64enc | quote }}
|
||||||
JOBSERVICE_SECRET: {{ .Values.jobservice.secret | b64enc | quote }}
|
JOBSERVICE_SECRET: {{ .Values.jobservice.secret | b64enc | quote }}
|
||||||
UI_SECRET: {{ .Values.ui.secret | b64enc | quote }}
|
UI_SECRET: {{ .Values.ui.secret | b64enc | quote }}
|
||||||
|
{{- if eq .Values.adminserver.authenticationMode "ldap_auth" }}
|
||||||
|
LDAP_SEARCH_PWD: {{ .Values.adminserver.ldap.searchPwd | b64enc | quote }}
|
||||||
|
{{- end }}
|
||||||
{{ if .Values.clair.enabled }}
|
{{ if .Values.clair.enabled }}
|
||||||
CLAIR_DB_PASSWORD: {{ .Values.clair.postgresPassword | b64enc | quote }}
|
CLAIR_DB_PASSWORD: {{ .Values.clair.postgresPassword | b64enc | quote }}
|
||||||
{{ end }}
|
{{ end }}
|
||||||
#LDAP_SEARCH_PWD: not-a-secure-password
|
|
||||||
|
@ -72,6 +72,7 @@ adminserver:
|
|||||||
ldap:
|
ldap:
|
||||||
url: "ldaps://ldapserver"
|
url: "ldaps://ldapserver"
|
||||||
searchDN: ""
|
searchDN: ""
|
||||||
|
searchPassword: ""
|
||||||
baseDN: ""
|
baseDN: ""
|
||||||
filter: "(objectClass=person)"
|
filter: "(objectClass=person)"
|
||||||
uid: "uid"
|
uid: "uid"
|
||||||
|
@ -513,7 +513,7 @@ paths:
|
|||||||
'403':
|
'403':
|
||||||
description: User in session does not have permission to the project.
|
description: User in session does not have permission to the project.
|
||||||
'404':
|
'404':
|
||||||
description: Project does not exist.
|
description: Project does not exist, or the username does not found, or the user group does not found.
|
||||||
'500':
|
'500':
|
||||||
description: Unexpected internal errors.
|
description: Unexpected internal errors.
|
||||||
'/projects/{project_id}/members/{mid}':
|
'/projects/{project_id}/members/{mid}':
|
||||||
|
@ -12,16 +12,16 @@ LDAP_UID=$ldap_uid
|
|||||||
LDAP_SCOPE=$ldap_scope
|
LDAP_SCOPE=$ldap_scope
|
||||||
LDAP_TIMEOUT=$ldap_timeout
|
LDAP_TIMEOUT=$ldap_timeout
|
||||||
LDAP_VERIFY_CERT=$ldap_verify_cert
|
LDAP_VERIFY_CERT=$ldap_verify_cert
|
||||||
|
DATABASE_TYPE=postgresql
|
||||||
|
POSTGRESQL_HOST=$db_host
|
||||||
|
POSTGRESQL_PORT=$db_port
|
||||||
|
POSTGRESQL_USERNAME=$db_user
|
||||||
|
POSTGRESQL_PASSWORD=$db_password
|
||||||
|
POSTGRESQL_DATABASE=registry
|
||||||
LDAP_GROUP_BASEDN=$ldap_group_basedn
|
LDAP_GROUP_BASEDN=$ldap_group_basedn
|
||||||
LDAP_GROUP_FILTER=$ldap_group_filter
|
LDAP_GROUP_FILTER=$ldap_group_filter
|
||||||
LDAP_GROUP_GID=$ldap_group_gid
|
LDAP_GROUP_GID=$ldap_group_gid
|
||||||
LDAP_GROUP_SCOPE=$ldap_group_scope
|
LDAP_GROUP_SCOPE=$ldap_group_scope
|
||||||
DATABASE_TYPE=mysql
|
|
||||||
MYSQL_HOST=$db_host
|
|
||||||
MYSQL_PORT=$db_port
|
|
||||||
MYSQL_USR=$db_user
|
|
||||||
MYSQL_PWD=$db_password
|
|
||||||
MYSQL_DATABASE=registry
|
|
||||||
REGISTRY_URL=$registry_url
|
REGISTRY_URL=$registry_url
|
||||||
TOKEN_SERVICE_URL=$token_service_url
|
TOKEN_SERVICE_URL=$token_service_url
|
||||||
EMAIL_HOST=$email_host
|
EMAIL_HOST=$email_host
|
||||||
|
@ -1 +1 @@
|
|||||||
MYSQL_ROOT_PASSWORD=$db_password
|
POSTGRES_PASSWORD=$db_password
|
||||||
|
@ -1,7 +0,0 @@
|
|||||||
CREATE DATABASE IF NOT EXISTS `notaryserver`;
|
|
||||||
|
|
||||||
CREATE USER "server"@"notary-server.%" IDENTIFIED BY "";
|
|
||||||
|
|
||||||
GRANT
|
|
||||||
ALL PRIVILEGES ON `notaryserver`.*
|
|
||||||
TO "server"@"notary-server.%"
|
|
@ -1,7 +0,0 @@
|
|||||||
CREATE DATABASE IF NOT EXISTS `notarysigner`;
|
|
||||||
|
|
||||||
CREATE USER "signer"@"notary-signer.%" IDENTIFIED BY "";
|
|
||||||
|
|
||||||
GRANT
|
|
||||||
ALL PRIVILEGES ON `notarysigner`.*
|
|
||||||
TO "signer"@"notary-signer.%";
|
|
28
make/common/templates/notary/server-config.postgres.json
Normal file
28
make/common/templates/notary/server-config.postgres.json
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
{
|
||||||
|
"server": {
|
||||||
|
"http_addr": ":4443"
|
||||||
|
},
|
||||||
|
"trust_service": {
|
||||||
|
"type": "remote",
|
||||||
|
"hostname": "notarysigner",
|
||||||
|
"port": "7899",
|
||||||
|
"tls_ca_file": "./notary-signer-ca.crt",
|
||||||
|
"key_algorithm": "ecdsa"
|
||||||
|
},
|
||||||
|
"logging": {
|
||||||
|
"level": "debug"
|
||||||
|
},
|
||||||
|
"storage": {
|
||||||
|
"backend": "postgres",
|
||||||
|
"db_url": "postgres://server:password@postgresql:5432/notaryserver?sslmode=disable"
|
||||||
|
},
|
||||||
|
"auth": {
|
||||||
|
"type": "token",
|
||||||
|
"options": {
|
||||||
|
"realm": "$token_endpoint/service/token",
|
||||||
|
"service": "harbor-notary",
|
||||||
|
"issuer": "harbor-token-issuer",
|
||||||
|
"rootcertbundle": "/etc/notary/root.crt"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
2
make/common/templates/notary/server_env
Normal file
2
make/common/templates/notary/server_env
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
MIGRATIONS_PATH=migrations/server/postgresql
|
||||||
|
DB_URL=postgres://server:password@postgresql:5432/notaryserver?sslmode=disable
|
15
make/common/templates/notary/signer-config.postgres.json
Normal file
15
make/common/templates/notary/signer-config.postgres.json
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
{
|
||||||
|
"server": {
|
||||||
|
"grpc_addr": ":7899",
|
||||||
|
"tls_cert_file": "./notary-signer.crt",
|
||||||
|
"tls_key_file": "./notary-signer.key"
|
||||||
|
},
|
||||||
|
"logging": {
|
||||||
|
"level": "debug"
|
||||||
|
},
|
||||||
|
"storage": {
|
||||||
|
"backend": "postgres",
|
||||||
|
"db_url": "postgres://signer:password@postgresql:5432/notarysigner?sslmode=disable",
|
||||||
|
"default_alias":"defaultalias"
|
||||||
|
}
|
||||||
|
}
|
@ -1,2 +1,3 @@
|
|||||||
NOTARY_SIGNER_DEFAULTALIAS=$alias
|
NOTARY_SIGNER_DEFAULTALIAS=$alias
|
||||||
|
MIGRATIONS_PATH=migrations/signer/postgresql
|
||||||
|
DB_URL=postgres://signer:password@postgresql:5432/notarysigner?sslmode=disable
|
||||||
|
@ -11,26 +11,11 @@ services:
|
|||||||
registry:
|
registry:
|
||||||
networks:
|
networks:
|
||||||
- harbor-clair
|
- harbor-clair
|
||||||
postgres:
|
postgresql:
|
||||||
networks:
|
networks:
|
||||||
harbor-clair:
|
harbor-clair:
|
||||||
aliases:
|
aliases:
|
||||||
- postgres
|
- harbor-db
|
||||||
container_name: clair-db
|
|
||||||
image: vmware/postgresql-photon:__postgresql_version__
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
- log
|
|
||||||
env_file:
|
|
||||||
./common/config/clair/postgres_env
|
|
||||||
volumes:
|
|
||||||
- ./common/config/clair/postgresql-init.d/:/docker-entrypoint-initdb.d:z
|
|
||||||
- /data/clair-db:/var/lib/postgresql/data:z
|
|
||||||
logging:
|
|
||||||
driver: "syslog"
|
|
||||||
options:
|
|
||||||
syslog-address: "tcp://127.0.0.1:1514"
|
|
||||||
tag: "clair-db"
|
|
||||||
clair:
|
clair:
|
||||||
networks:
|
networks:
|
||||||
- harbor-clair
|
- harbor-clair
|
||||||
@ -39,7 +24,7 @@ services:
|
|||||||
restart: always
|
restart: always
|
||||||
cpu_quota: 150000
|
cpu_quota: 150000
|
||||||
depends_on:
|
depends_on:
|
||||||
- postgres
|
- postgresql
|
||||||
volumes:
|
volumes:
|
||||||
- ./common/config/clair/config.yaml:/etc/clair/config.yaml:z
|
- ./common/config/clair/config.yaml:/etc/clair/config.yaml:z
|
||||||
logging:
|
logging:
|
||||||
|
@ -6,18 +6,24 @@ services:
|
|||||||
proxy:
|
proxy:
|
||||||
networks:
|
networks:
|
||||||
- harbor-notary
|
- harbor-notary
|
||||||
|
postgresql:
|
||||||
|
networks:
|
||||||
|
harbor-notary:
|
||||||
|
aliases:
|
||||||
|
- harbor-db
|
||||||
notary-server:
|
notary-server:
|
||||||
image: vmware/notary-server-photon:__notary_version__
|
image: vmware/notary-server-photon:__notary_version__
|
||||||
container_name: notary-server
|
container_name: notary-server
|
||||||
restart: always
|
restart: always
|
||||||
networks:
|
networks:
|
||||||
- notary-mdb
|
|
||||||
- notary-sig
|
- notary-sig
|
||||||
- harbor-notary
|
- harbor-notary
|
||||||
volumes:
|
volumes:
|
||||||
- ./common/config/notary:/etc/notary:z
|
- ./common/config/notary:/etc/notary:z
|
||||||
|
env_file:
|
||||||
|
- ./common/config/notary/server_env
|
||||||
depends_on:
|
depends_on:
|
||||||
- notary-db
|
- postgresql
|
||||||
- notary-signer
|
- notary-signer
|
||||||
logging:
|
logging:
|
||||||
driver: "syslog"
|
driver: "syslog"
|
||||||
@ -29,7 +35,7 @@ services:
|
|||||||
container_name: notary-signer
|
container_name: notary-signer
|
||||||
restart: always
|
restart: always
|
||||||
networks:
|
networks:
|
||||||
notary-mdb:
|
harbor-notary:
|
||||||
notary-sig:
|
notary-sig:
|
||||||
aliases:
|
aliases:
|
||||||
- notarysigner
|
- notarysigner
|
||||||
@ -38,38 +44,14 @@ services:
|
|||||||
env_file:
|
env_file:
|
||||||
- ./common/config/notary/signer_env
|
- ./common/config/notary/signer_env
|
||||||
depends_on:
|
depends_on:
|
||||||
- notary-db
|
- postgresql
|
||||||
logging:
|
logging:
|
||||||
driver: "syslog"
|
driver: "syslog"
|
||||||
options:
|
options:
|
||||||
syslog-address: "tcp://127.0.0.1:1514"
|
syslog-address: "tcp://127.0.0.1:1514"
|
||||||
tag: "notary-signer"
|
tag: "notary-signer"
|
||||||
notary-db:
|
|
||||||
image: vmware/mariadb-photon:__mariadb_version__
|
|
||||||
container_name: notary-db
|
|
||||||
restart: always
|
|
||||||
networks:
|
|
||||||
notary-mdb:
|
|
||||||
aliases:
|
|
||||||
- mysql
|
|
||||||
volumes:
|
|
||||||
- ./common/config/notary/mysql-initdb.d:/docker-entrypoint-initdb.d:z
|
|
||||||
- /data/notary-db:/var/lib/mysql:z
|
|
||||||
environment:
|
|
||||||
- TERM=dumb
|
|
||||||
- MYSQL_ALLOW_EMPTY_PASSWORD="true"
|
|
||||||
command: mysqld --innodb_file_per_table
|
|
||||||
depends_on:
|
|
||||||
- log
|
|
||||||
logging:
|
|
||||||
driver: "syslog"
|
|
||||||
options:
|
|
||||||
syslog-address: "tcp://127.0.0.1:1514"
|
|
||||||
tag: "notary-db"
|
|
||||||
networks:
|
networks:
|
||||||
harbor-notary:
|
harbor-notary:
|
||||||
external: false
|
external: false
|
||||||
notary-mdb:
|
|
||||||
external: false
|
|
||||||
notary-sig:
|
notary-sig:
|
||||||
external: false
|
external: false
|
@ -31,12 +31,12 @@ services:
|
|||||||
options:
|
options:
|
||||||
syslog-address: "tcp://127.0.0.1:1514"
|
syslog-address: "tcp://127.0.0.1:1514"
|
||||||
tag: "registry"
|
tag: "registry"
|
||||||
mysql:
|
postgresql:
|
||||||
image: vmware/harbor-db:__version__
|
image: vmware/harbor-db:__version__
|
||||||
container_name: harbor-db
|
container_name: harbor-db
|
||||||
restart: always
|
restart: always
|
||||||
volumes:
|
volumes:
|
||||||
- /data/database:/var/lib/mysql:z
|
- /data/database:/var/lib/postgresql/data:z
|
||||||
networks:
|
networks:
|
||||||
- harbor
|
- harbor
|
||||||
env_file:
|
env_file:
|
||||||
@ -47,7 +47,7 @@ services:
|
|||||||
driver: "syslog"
|
driver: "syslog"
|
||||||
options:
|
options:
|
||||||
syslog-address: "tcp://127.0.0.1:1514"
|
syslog-address: "tcp://127.0.0.1:1514"
|
||||||
tag: "mysql"
|
tag: "postgresql"
|
||||||
adminserver:
|
adminserver:
|
||||||
image: vmware/harbor-adminserver:__version__
|
image: vmware/harbor-adminserver:__version__
|
||||||
container_name: harbor-adminserver
|
container_name: harbor-adminserver
|
||||||
@ -119,6 +119,13 @@ services:
|
|||||||
- /data/redis:/data
|
- /data/redis:/data
|
||||||
networks:
|
networks:
|
||||||
- harbor
|
- harbor
|
||||||
|
depends_on:
|
||||||
|
- log
|
||||||
|
logging:
|
||||||
|
driver: "syslog"
|
||||||
|
options:
|
||||||
|
syslog-address: "tcp://127.0.0.1:1514"
|
||||||
|
tag: "redis"
|
||||||
proxy:
|
proxy:
|
||||||
image: vmware/nginx-photon:__nginx_version__
|
image: vmware/nginx-photon:__nginx_version__
|
||||||
container_name: nginx
|
container_name: nginx
|
||||||
@ -132,7 +139,7 @@ services:
|
|||||||
- 443:443
|
- 443:443
|
||||||
- 4443:4443
|
- 4443:4443
|
||||||
depends_on:
|
depends_on:
|
||||||
- mysql
|
- postgresql
|
||||||
- registry
|
- registry
|
||||||
- ui
|
- ui
|
||||||
- log
|
- log
|
||||||
|
@ -127,16 +127,16 @@ project_creation_restriction = everyone
|
|||||||
#######Harbor DB configuration section#######
|
#######Harbor DB configuration section#######
|
||||||
|
|
||||||
#The address of the Harbor database. Only need to change when using external db.
|
#The address of the Harbor database. Only need to change when using external db.
|
||||||
db_host = mysql
|
db_host = postgresql
|
||||||
|
|
||||||
#The password for the root user of Harbor DB. Change this before any production use.
|
#The password for the root user of Harbor DB. Change this before any production use.
|
||||||
db_password = root123
|
db_password = root123
|
||||||
|
|
||||||
#The port of Harbor database host
|
#The port of Harbor database host
|
||||||
db_port = 3306
|
db_port = 5432
|
||||||
|
|
||||||
#The user name of Harbor database
|
#The user name of Harbor database
|
||||||
db_user = root
|
db_user = postgres
|
||||||
|
|
||||||
##### End of Harbor DB configuration#######
|
##### End of Harbor DB configuration#######
|
||||||
|
|
||||||
@ -147,11 +147,11 @@ redis_url = redis:6379
|
|||||||
##########Clair DB configuration############
|
##########Clair DB configuration############
|
||||||
|
|
||||||
#Clair DB host address. Only change it when using an exteral DB.
|
#Clair DB host address. Only change it when using an exteral DB.
|
||||||
clair_db_host = postgres
|
clair_db_host = postgresql
|
||||||
|
|
||||||
#The password of the Clair's postgres database. Only effective when Harbor is deployed with Clair.
|
#The password of the Clair's postgres database. Only effective when Harbor is deployed with Clair.
|
||||||
#Please update it before deployment. Subsequent update will cause Clair's API server and Harbor unable to access Clair's database.
|
#Please update it before deployment. Subsequent update will cause Clair's API server and Harbor unable to access Clair's database.
|
||||||
clair_db_password = password
|
clair_db_password = root123
|
||||||
|
|
||||||
#Clair DB connect port
|
#Clair DB connect port
|
||||||
clair_db_port = 5432
|
clair_db_port = 5432
|
||||||
|
@ -50,7 +50,7 @@ DOCKERFILEPATH_LOG=$(DOCKERFILEPATH)/log
|
|||||||
DOCKERFILENAME_LOG=Dockerfile
|
DOCKERFILENAME_LOG=Dockerfile
|
||||||
DOCKERIMAGENAME_LOG=vmware/harbor-log
|
DOCKERIMAGENAME_LOG=vmware/harbor-log
|
||||||
|
|
||||||
DOCKERFILEPATH_DB=$(DOCKERFILEPATH)/db
|
DOCKERFILEPATH_DB=$(DOCKERFILEPATH)/db/postgresql
|
||||||
DOCKERFILENAME_DB=Dockerfile
|
DOCKERFILENAME_DB=Dockerfile
|
||||||
DOCKERIMAGENAME_DB=vmware/harbor-db
|
DOCKERIMAGENAME_DB=vmware/harbor-db
|
||||||
|
|
||||||
@ -84,13 +84,13 @@ DOCKERFILEPATH_REDIS=$(DOCKERFILEPATH)/redis
|
|||||||
DOCKERFILENAME_REDIS=Dockerfile
|
DOCKERFILENAME_REDIS=Dockerfile
|
||||||
DOCKERIMAGENAME_REDIS=vmware/redis-photon
|
DOCKERIMAGENAME_REDIS=vmware/redis-photon
|
||||||
|
|
||||||
_build_db: _build_mariadb
|
_build_db:
|
||||||
@echo "modify the db dockerfile..."
|
@echo "modify the db dockerfile..."
|
||||||
@$(SEDCMD) -i 's/__version__/$(MARIADBVERSION)/g' $(DOCKERFILEPATH_DB)/$(DOCKERFILENAME_DB)
|
@$(SEDCMD) -i 's/__postgresql_version__/$(VERSIONTAG)/g' $(DOCKERFILEPATH_DB)/$(DOCKERFILENAME_DB)
|
||||||
@echo "building db container for photon..."
|
@echo "building db container for photon..."
|
||||||
@cd $(DOCKERFILEPATH_DB) && $(DOCKERBUILD) -f $(DOCKERFILEPATH_DB)/$(DOCKERFILENAME_DB) -t $(DOCKERIMAGENAME_DB):$(VERSIONTAG) .
|
@cd $(DOCKERFILEPATH_DB) && $(DOCKERBUILD) -f $(DOCKERFILEPATH_DB)/$(DOCKERFILENAME_DB) -t $(DOCKERIMAGENAME_DB):$(VERSIONTAG) .
|
||||||
@echo "Done."
|
@echo "Done."
|
||||||
@$(SEDCMD) -i 's/$(MARIADBVERSION)/__version__/g' $(DOCKERFILEPATH_DB)/$(DOCKERFILENAME_DB)
|
@$(SEDCMD) -i 's/$(VERSIONTAG)/__postgresql_version__/g' $(DOCKERFILEPATH_DB)/$(DOCKERFILENAME_DB)
|
||||||
|
|
||||||
_build_adminiserver:
|
_build_adminiserver:
|
||||||
@echo "building adminserver container for photon..."
|
@echo "building adminserver container for photon..."
|
||||||
@ -142,7 +142,7 @@ _build_notary:
|
|||||||
rm -rf $(DOCKERFILEPATH_NOTARY)/binary && mkdir -p $(DOCKERFILEPATH_NOTARY)/binary && \
|
rm -rf $(DOCKERFILEPATH_NOTARY)/binary && mkdir -p $(DOCKERFILEPATH_NOTARY)/binary && \
|
||||||
$(call _get_binary, https://storage.googleapis.com/harbor-builds/bin/notary/notary-signer, $(DOCKERFILEPATH_NOTARY)/binary/notary-signer) && \
|
$(call _get_binary, https://storage.googleapis.com/harbor-builds/bin/notary/notary-signer, $(DOCKERFILEPATH_NOTARY)/binary/notary-signer) && \
|
||||||
$(call _get_binary, https://storage.googleapis.com/harbor-builds/bin/notary/notary-server, $(DOCKERFILEPATH_NOTARY)/binary/notary-server) && \
|
$(call _get_binary, https://storage.googleapis.com/harbor-builds/bin/notary/notary-server, $(DOCKERFILEPATH_NOTARY)/binary/notary-server) && \
|
||||||
$(call _get_binary, https://storage.googleapis.com/harbor-builds/bin/notary/notary-migrate.tgz, $(DOCKERFILEPATH_NOTARY)/binary/notary-migrate.tgz); \
|
$(call _get_binary, https://storage.googleapis.com/harbor-builds/bin/notary/notary-migrate-postgresql.tgz, $(DOCKERFILEPATH_NOTARY)/binary/notary-migrate.tgz); \
|
||||||
cd $(DOCKERFILEPATH_NOTARY)/binary && tar -zvxf notary-migrate.tgz; \
|
cd $(DOCKERFILEPATH_NOTARY)/binary && tar -zvxf notary-migrate.tgz; \
|
||||||
else \
|
else \
|
||||||
cd $(DOCKERFILEPATH_NOTARY) && $(DOCKERFILEPATH_NOTARY)/builder_public $(NOTARYVERSION); \
|
cd $(DOCKERFILEPATH_NOTARY) && $(DOCKERFILEPATH_NOTARY)/builder_public $(NOTARYVERSION); \
|
||||||
|
6
make/photon/db/postgresql/Dockerfile
Normal file
6
make/photon/db/postgresql/Dockerfile
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
FROM vmware/postgresql-photon:__postgresql_version__
|
||||||
|
|
||||||
|
COPY registry.sql /docker-entrypoint-initdb.d/
|
||||||
|
|
||||||
|
COPY initial-notaryserver.sql /docker-entrypoint-initdb.d/
|
||||||
|
COPY initial-notarysigner.sql /docker-entrypoint-initdb.d/
|
21
make/photon/db/postgresql/healthcheck.sh
Normal file
21
make/photon/db/postgresql/healthcheck.sh
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
host="$(hostname -i || echo '127.0.0.1')"
|
||||||
|
user="${POSTGRES_USER:-postgres}"
|
||||||
|
db="${POSTGRES_DB:-$POSTGRES_USER}"
|
||||||
|
export PGPASSWORD="${POSTGRES_PASSWORD:-}"
|
||||||
|
|
||||||
|
args=(
|
||||||
|
# force postgres to not use the local unix socket (test "external" connectibility)
|
||||||
|
--host "$host"
|
||||||
|
--username "$user"
|
||||||
|
--dbname "$db"
|
||||||
|
--quiet --no-align --tuples-only
|
||||||
|
)
|
||||||
|
|
||||||
|
if select="$(echo 'SELECT 1' | psql "${args[@]}")" && [ "$select" = '1' ]; then
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit 1
|
4
make/photon/db/postgresql/initial-notaryserver.sql
Normal file
4
make/photon/db/postgresql/initial-notaryserver.sql
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
CREATE DATABASE notaryserver;
|
||||||
|
CREATE USER server;
|
||||||
|
alter user server with encrypted password 'password';
|
||||||
|
GRANT ALL PRIVILEGES ON DATABASE notaryserver TO server;
|
4
make/photon/db/postgresql/initial-notarysigner.sql
Normal file
4
make/photon/db/postgresql/initial-notarysigner.sql
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
CREATE DATABASE notarysigner;
|
||||||
|
CREATE USER signer;
|
||||||
|
alter user signer with encrypted password 'password';
|
||||||
|
GRANT ALL PRIVILEGES ON DATABASE notarysigner TO signer;
|
348
make/photon/db/postgresql/registry.sql
Normal file
348
make/photon/db/postgresql/registry.sql
Normal file
@ -0,0 +1,348 @@
|
|||||||
|
CREATE DATABASE registry ENCODING 'UTF8';
|
||||||
|
|
||||||
|
\c registry;
|
||||||
|
|
||||||
|
create table access (
|
||||||
|
access_id SERIAL PRIMARY KEY NOT NULL,
|
||||||
|
access_code char(1),
|
||||||
|
comment varchar (30)
|
||||||
|
);
|
||||||
|
|
||||||
|
insert into access (access_code, comment) values
|
||||||
|
('M', 'Management access for project'),
|
||||||
|
('R', 'Read access for project'),
|
||||||
|
('W', 'Write access for project'),
|
||||||
|
('D', 'Delete access for project'),
|
||||||
|
('S', 'Search access for project');
|
||||||
|
|
||||||
|
create table role (
|
||||||
|
role_id SERIAL PRIMARY KEY NOT NULL,
|
||||||
|
role_mask int DEFAULT 0 NOT NULL,
|
||||||
|
role_code varchar(20),
|
||||||
|
name varchar (20)
|
||||||
|
);
|
||||||
|
|
||||||
|
/*
|
||||||
|
role mask is used for future enhancement when a project member can have multi-roles
|
||||||
|
currently set to 0
|
||||||
|
*/
|
||||||
|
|
||||||
|
insert into role (role_code, name) values
|
||||||
|
('MDRWS', 'projectAdmin'),
|
||||||
|
('RWS', 'developer'),
|
||||||
|
('RS', 'guest');
|
||||||
|
|
||||||
|
create table harbor_user (
|
||||||
|
user_id SERIAL PRIMARY KEY NOT NULL,
|
||||||
|
username varchar(255),
|
||||||
|
email varchar(255),
|
||||||
|
password varchar(40) NOT NULL,
|
||||||
|
realname varchar (255) NOT NULL,
|
||||||
|
comment varchar (30),
|
||||||
|
deleted boolean DEFAULT false NOT NULL,
|
||||||
|
reset_uuid varchar(40) DEFAULT NULL,
|
||||||
|
salt varchar(40) DEFAULT NULL,
|
||||||
|
sysadmin_flag boolean DEFAULT false NOT NULL,
|
||||||
|
creation_time timestamp(0),
|
||||||
|
update_time timestamp(0),
|
||||||
|
UNIQUE (username),
|
||||||
|
UNIQUE (email)
|
||||||
|
);
|
||||||
|
|
||||||
|
insert into harbor_user (username, email, password, realname, comment, deleted, sysadmin_flag, creation_time, update_time) values
|
||||||
|
('admin', 'admin@example.com', '', 'system admin', 'admin user',false, true, NOW(), NOW()),
|
||||||
|
('anonymous', 'anonymous@example.com', '', 'anonymous user', 'anonymous user', true, false, NOW(), NOW());
|
||||||
|
|
||||||
|
create table project (
|
||||||
|
project_id SERIAL PRIMARY KEY NOT NULL,
|
||||||
|
owner_id int NOT NULL,
|
||||||
|
/*
|
||||||
|
The max length of name controlled by API is 30,
|
||||||
|
and 11 is reserved for marking the deleted project.
|
||||||
|
*/
|
||||||
|
name varchar (255) NOT NULL,
|
||||||
|
creation_time timestamp,
|
||||||
|
update_time timestamp,
|
||||||
|
deleted boolean DEFAULT false NOT NULL,
|
||||||
|
FOREIGN KEY (owner_id) REFERENCES harbor_user(user_id),
|
||||||
|
UNIQUE (name)
|
||||||
|
);
|
||||||
|
|
||||||
|
insert into project (owner_id, name, creation_time, update_time) values
|
||||||
|
(1, 'library', NOW(), NOW());
|
||||||
|
|
||||||
|
create table project_member (
|
||||||
|
id SERIAL NOT NULL,
|
||||||
|
project_id int NOT NULL,
|
||||||
|
entity_id int NOT NULL,
|
||||||
|
/*
|
||||||
|
entity_type indicates the type of member,
|
||||||
|
u for user, g for user group
|
||||||
|
*/
|
||||||
|
entity_type char(1) NOT NULL,
|
||||||
|
role int NOT NULL,
|
||||||
|
creation_time timestamp default 'now'::timestamp,
|
||||||
|
update_time timestamp default 'now'::timestamp,
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
CONSTRAINT unique_project_entity_type UNIQUE (project_id, entity_id, entity_type)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE FUNCTION update_update_time_at_column() RETURNS trigger
|
||||||
|
LANGUAGE plpgsql
|
||||||
|
AS $$
|
||||||
|
BEGIN
|
||||||
|
NEW.update_time = NOW();
|
||||||
|
RETURN NEW;
|
||||||
|
END;
|
||||||
|
$$;
|
||||||
|
|
||||||
|
CREATE TRIGGER project_member_update_time_at_modtime BEFORE UPDATE ON project_member FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
|
||||||
|
|
||||||
|
insert into project_member (project_id, entity_id, role, entity_type) values
|
||||||
|
(1, 1, 1, 'u');
|
||||||
|
|
||||||
|
create table project_metadata (
|
||||||
|
id SERIAL NOT NULL,
|
||||||
|
project_id int NOT NULL,
|
||||||
|
name varchar(255) NOT NULL,
|
||||||
|
value varchar(255),
|
||||||
|
creation_time timestamp default 'now'::timestamp,
|
||||||
|
update_time timestamp default 'now'::timestamp,
|
||||||
|
deleted boolean DEFAULT false NOT NULL,
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
CONSTRAINT unique_project_id_and_name UNIQUE (project_id,name),
|
||||||
|
FOREIGN KEY (project_id) REFERENCES project(project_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TRIGGER project_metadata_update_time_at_modtime BEFORE UPDATE ON project_metadata FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
|
||||||
|
|
||||||
|
insert into project_metadata (project_id, name, value, creation_time, update_time, deleted) values
|
||||||
|
(1, 'public', 'true', NOW(), NOW(), false);
|
||||||
|
|
||||||
|
create table user_group (
|
||||||
|
id SERIAL NOT NULL,
|
||||||
|
group_name varchar(255) NOT NULL,
|
||||||
|
group_type smallint default 0,
|
||||||
|
ldap_group_dn varchar(512) NOT NULL,
|
||||||
|
creation_time timestamp default 'now'::timestamp,
|
||||||
|
update_time timestamp default 'now'::timestamp,
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TRIGGER user_group_update_time_at_modtime BEFORE UPDATE ON user_group FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
|
||||||
|
|
||||||
|
create table access_log (
|
||||||
|
log_id SERIAL NOT NULL,
|
||||||
|
username varchar (255) NOT NULL,
|
||||||
|
project_id int NOT NULL,
|
||||||
|
repo_name varchar (256),
|
||||||
|
repo_tag varchar (128),
|
||||||
|
GUID varchar(64),
|
||||||
|
operation varchar(20) NOT NULL,
|
||||||
|
op_time timestamp,
|
||||||
|
primary key (log_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX pid_optime ON access_log (project_id, op_time);
|
||||||
|
|
||||||
|
create table repository (
|
||||||
|
repository_id SERIAL NOT NULL,
|
||||||
|
name varchar(255) NOT NULL,
|
||||||
|
project_id int NOT NULL,
|
||||||
|
description text,
|
||||||
|
pull_count int DEFAULT 0 NOT NULL,
|
||||||
|
star_count int DEFAULT 0 NOT NULL,
|
||||||
|
creation_time timestamp default 'now'::timestamp,
|
||||||
|
update_time timestamp default 'now'::timestamp,
|
||||||
|
primary key (repository_id),
|
||||||
|
UNIQUE (name)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TRIGGER repository_update_time_at_modtime BEFORE UPDATE ON repository FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
|
||||||
|
|
||||||
|
create table replication_policy (
|
||||||
|
id SERIAL NOT NULL,
|
||||||
|
name varchar(256),
|
||||||
|
project_id int NOT NULL,
|
||||||
|
target_id int NOT NULL,
|
||||||
|
enabled boolean NOT NULL DEFAULT true,
|
||||||
|
description text,
|
||||||
|
deleted boolean DEFAULT false NOT NULL,
|
||||||
|
cron_str varchar(256),
|
||||||
|
filters varchar(1024),
|
||||||
|
replicate_deletion boolean DEFAULT false NOT NULL,
|
||||||
|
start_time timestamp NULL,
|
||||||
|
creation_time timestamp default 'now'::timestamp,
|
||||||
|
update_time timestamp default 'now'::timestamp,
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TRIGGER replication_policy_update_time_at_modtime BEFORE UPDATE ON replication_policy FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
|
||||||
|
|
||||||
|
create table replication_target (
|
||||||
|
id SERIAL NOT NULL,
|
||||||
|
name varchar(64),
|
||||||
|
url varchar(64),
|
||||||
|
username varchar(255),
|
||||||
|
password varchar(128),
|
||||||
|
/*
|
||||||
|
target_type indicates the type of target registry,
|
||||||
|
0 means it's a harbor instance,
|
||||||
|
1 means it's a regulart registry
|
||||||
|
*/
|
||||||
|
target_type SMALLINT NOT NULL DEFAULT 0,
|
||||||
|
insecure boolean NOT NULL DEFAULT false,
|
||||||
|
creation_time timestamp default 'now'::timestamp,
|
||||||
|
update_time timestamp default 'now'::timestamp,
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TRIGGER replication_target_update_time_at_modtime BEFORE UPDATE ON replication_target FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
|
||||||
|
|
||||||
|
create table replication_job (
|
||||||
|
id SERIAL NOT NULL,
|
||||||
|
status varchar(64) NOT NULL,
|
||||||
|
policy_id int NOT NULL,
|
||||||
|
repository varchar(256) NOT NULL,
|
||||||
|
operation varchar(64) NOT NULL,
|
||||||
|
tags varchar(16384),
|
||||||
|
/*
|
||||||
|
New job service only records uuid, for compatibility in this table both IDs are stored.
|
||||||
|
*/
|
||||||
|
job_uuid varchar(64),
|
||||||
|
creation_time timestamp default 'now'::timestamp,
|
||||||
|
update_time timestamp default 'now'::timestamp,
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX policy ON replication_job (policy_id);
|
||||||
|
CREATE INDEX poid_uptime ON replication_job (policy_id, update_time);
|
||||||
|
CREATE INDEX poid_status ON replication_job (policy_id, status);
|
||||||
|
|
||||||
|
CREATE TRIGGER replication_job_update_time_at_modtime BEFORE UPDATE ON replication_job FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
|
||||||
|
|
||||||
|
create table replication_immediate_trigger (
|
||||||
|
id SERIAL NOT NULL,
|
||||||
|
policy_id int NOT NULL,
|
||||||
|
namespace varchar(256) NOT NULL,
|
||||||
|
on_push boolean NOT NULL DEFAULT false,
|
||||||
|
on_deletion boolean NOT NULL DEFAULT false,
|
||||||
|
creation_time timestamp default 'now'::timestamp,
|
||||||
|
update_time timestamp default 'now'::timestamp,
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TRIGGER replication_immediate_trigger_update_time_at_modtime BEFORE UPDATE ON replication_immediate_trigger FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
|
||||||
|
|
||||||
|
create table img_scan_job (
|
||||||
|
id SERIAL NOT NULL,
|
||||||
|
status varchar(64) NOT NULL,
|
||||||
|
repository varchar(256) NOT NULL,
|
||||||
|
tag varchar(128) NOT NULL,
|
||||||
|
digest varchar(128),
|
||||||
|
/*
|
||||||
|
New job service only records uuid, for compatibility in this table both IDs are stored.
|
||||||
|
*/
|
||||||
|
job_uuid varchar(64),
|
||||||
|
creation_time timestamp default 'now'::timestamp,
|
||||||
|
update_time timestamp default 'now'::timestamp,
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX idx_status ON img_scan_job (status);
|
||||||
|
CREATE INDEX idx_digest ON img_scan_job (digest);
|
||||||
|
CREATE INDEX idx_uuid ON img_scan_job (job_uuid);
|
||||||
|
CREATE INDEX idx_repository_tag ON img_scan_job (repository,tag);
|
||||||
|
|
||||||
|
CREATE TRIGGER img_scan_job_update_time_at_modtime BEFORE UPDATE ON img_scan_job FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
|
||||||
|
|
||||||
|
create table img_scan_overview (
|
||||||
|
id SERIAL NOT NULL,
|
||||||
|
image_digest varchar(128) NOT NULL,
|
||||||
|
scan_job_id int NOT NULL,
|
||||||
|
/* 0 indicates none, the higher the number, the more severe the status */
|
||||||
|
severity int NOT NULL default 0,
|
||||||
|
/* the json string to store components severity status, currently use a json to be more flexible and avoid creating additional tables. */
|
||||||
|
components_overview varchar(2048),
|
||||||
|
/* primary key for querying details, in clair it should be the name of the "top layer" */
|
||||||
|
details_key varchar(128),
|
||||||
|
creation_time timestamp default 'now'::timestamp,
|
||||||
|
update_time timestamp default 'now'::timestamp,
|
||||||
|
PRIMARY KEY(id),
|
||||||
|
UNIQUE(image_digest)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TRIGGER img_scan_overview_update_time_at_modtime BEFORE UPDATE ON img_scan_overview FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
|
||||||
|
|
||||||
|
create table clair_vuln_timestamp (
|
||||||
|
id SERIAL NOT NULL,
|
||||||
|
namespace varchar(128) NOT NULL,
|
||||||
|
last_update timestamp NOT NULL,
|
||||||
|
PRIMARY KEY(id),
|
||||||
|
UNIQUE(namespace)
|
||||||
|
);
|
||||||
|
|
||||||
|
create table properties (
|
||||||
|
id SERIAL NOT NULL,
|
||||||
|
k varchar(64) NOT NULL,
|
||||||
|
v varchar(128) NOT NULL,
|
||||||
|
PRIMARY KEY(id),
|
||||||
|
UNIQUE (k)
|
||||||
|
);
|
||||||
|
|
||||||
|
create table harbor_label (
|
||||||
|
id SERIAL NOT NULL,
|
||||||
|
name varchar(128) NOT NULL,
|
||||||
|
description text,
|
||||||
|
color varchar(16),
|
||||||
|
/*
|
||||||
|
's' for system level labels
|
||||||
|
'u' for user level labels
|
||||||
|
*/
|
||||||
|
level char(1) NOT NULL,
|
||||||
|
/*
|
||||||
|
'g' for global labels
|
||||||
|
'p' for project labels
|
||||||
|
*/
|
||||||
|
scope char(1) NOT NULL,
|
||||||
|
project_id int,
|
||||||
|
creation_time timestamp default 'now'::timestamp,
|
||||||
|
update_time timestamp default 'now'::timestamp,
|
||||||
|
PRIMARY KEY(id),
|
||||||
|
CONSTRAINT unique_label UNIQUE (name,scope, project_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TRIGGER harbor_label_update_time_at_modtime BEFORE UPDATE ON harbor_label FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
|
||||||
|
|
||||||
|
create table harbor_resource_label (
|
||||||
|
id SERIAL NOT NULL,
|
||||||
|
label_id int NOT NULL,
|
||||||
|
/*
|
||||||
|
the resource_id is the ID of project when the resource_type is p
|
||||||
|
the resource_id is the ID of repository when the resource_type is r
|
||||||
|
*/
|
||||||
|
resource_id int,
|
||||||
|
/*
|
||||||
|
the resource_name is the name of image when the resource_type is i
|
||||||
|
*/
|
||||||
|
resource_name varchar(256),
|
||||||
|
/*
|
||||||
|
'p' for project
|
||||||
|
'r' for repository
|
||||||
|
'i' for image
|
||||||
|
*/
|
||||||
|
resource_type char(1) NOT NULL,
|
||||||
|
creation_time timestamp default 'now'::timestamp,
|
||||||
|
update_time timestamp default 'now'::timestamp,
|
||||||
|
PRIMARY KEY(id),
|
||||||
|
CONSTRAINT unique_label_resource UNIQUE (label_id,resource_id, resource_name, resource_type)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TRIGGER harbor_resource_label_update_time_at_modtime BEFORE UPDATE ON harbor_resource_label FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS alembic_version (
|
||||||
|
version_num varchar(32) NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
insert into alembic_version values ('1.5.0');
|
||||||
|
|
@ -29,7 +29,7 @@ insert into role (role_code, name) values
|
|||||||
('RS', 'guest');
|
('RS', 'guest');
|
||||||
|
|
||||||
|
|
||||||
create table user (
|
create table harbor_user (
|
||||||
user_id INTEGER PRIMARY KEY,
|
user_id INTEGER PRIMARY KEY,
|
||||||
/*
|
/*
|
||||||
The max length of username controlled by API is 20,
|
The max length of username controlled by API is 20,
|
||||||
@ -56,7 +56,7 @@ create table user (
|
|||||||
UNIQUE (email)
|
UNIQUE (email)
|
||||||
);
|
);
|
||||||
|
|
||||||
insert into user (username, email, password, realname, comment, deleted, sysadmin_flag, creation_time, update_time) values
|
insert into harbor_user (username, email, password, realname, comment, deleted, sysadmin_flag, creation_time, update_time) values
|
||||||
('admin', 'admin@example.com', '', 'system admin', 'admin user',0, 1, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP),
|
('admin', 'admin@example.com', '', 'system admin', 'admin user',0, 1, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP),
|
||||||
('anonymous', 'anonymous@example.com', '', 'anonymous user', 'anonymous user', 1, 0, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP);
|
('anonymous', 'anonymous@example.com', '', 'anonymous user', 'anonymous user', 1, 0, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP);
|
||||||
|
|
||||||
@ -80,7 +80,7 @@ create table project (
|
|||||||
creation_time timestamp,
|
creation_time timestamp,
|
||||||
update_time timestamp,
|
update_time timestamp,
|
||||||
deleted tinyint (1) DEFAULT 0 NOT NULL,
|
deleted tinyint (1) DEFAULT 0 NOT NULL,
|
||||||
FOREIGN KEY (owner_id) REFERENCES user(user_id),
|
FOREIGN KEY (owner_id) REFERENCES harbor_user(user_id),
|
||||||
UNIQUE (name)
|
UNIQUE (name)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
sudo -E -u \#10000 sh -c "/usr/bin/env /migrations/migrate.sh && /bin/notary-server -config=/etc/notary/server-config.json -logf=logfmt"
|
sudo -E -u \#10000 sh -c "/usr/bin/env /migrations/migrate.sh && /bin/notary-server -config=/etc/notary/server-config.postgres.json -logf=logfmt"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
FROM vmware/photon:1.0
|
FROM vmware/photon:1.0
|
||||||
|
|
||||||
RUN tdnf distro-sync -y \
|
RUN tdnf distro-sync -y || echo \
|
||||||
&& tdnf erase vim -y \
|
&& tdnf erase vim -y \
|
||||||
&& tdnf install -y shadow sudo \
|
&& tdnf install -y shadow sudo \
|
||||||
&& tdnf clean all \
|
&& tdnf clean all \
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
sudo -E -u \#10000 sh -c "/usr/bin/env && /migrations/migrate.sh && /bin/notary-signer -config=/etc/notary/signer-config.json -logf=logfmt"
|
sudo -E -u \#10000 sh -c "/usr/bin/env && /migrations/migrate.sh && /bin/notary-signer -config=/etc/notary/signer-config.postgres.json -logf=logfmt"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
FROM vmware/photon:1.0
|
FROM vmware/photon:1.0
|
||||||
|
|
||||||
RUN tdnf distro-sync -y \
|
RUN tdnf distro-sync -y || echo \
|
||||||
&& tdnf erase vim -y \
|
&& tdnf erase vim -y \
|
||||||
&& tdnf install -y shadow sudo \
|
&& tdnf install -y shadow sudo \
|
||||||
&& tdnf clean all \
|
&& tdnf clean all \
|
||||||
|
@ -168,7 +168,7 @@ loglevel notice
|
|||||||
# Specify the log file name. Also the empty string can be used to force
|
# Specify the log file name. Also the empty string can be used to force
|
||||||
# Redis to log on the standard output. Note that if you use standard
|
# Redis to log on the standard output. Note that if you use standard
|
||||||
# output for logging but daemonize, logs will be sent to /dev/null
|
# output for logging but daemonize, logs will be sent to /dev/null
|
||||||
logfile "/var/log/redis/redis.log"
|
logfile ""
|
||||||
|
|
||||||
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
|
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
|
||||||
# and optionally update the other syslog parameters to suit your needs.
|
# and optionally update the other syslog parameters to suit your needs.
|
||||||
|
17
make/prepare
17
make/prepare
@ -516,9 +516,9 @@ if args.notary_mode:
|
|||||||
notary_config_dir = prep_conf_dir(config_dir, "notary")
|
notary_config_dir = prep_conf_dir(config_dir, "notary")
|
||||||
notary_temp_dir = os.path.join(templates_dir, "notary")
|
notary_temp_dir = os.path.join(templates_dir, "notary")
|
||||||
print("Copying sql file for notary DB")
|
print("Copying sql file for notary DB")
|
||||||
if os.path.exists(os.path.join(notary_config_dir, "mysql-initdb.d")):
|
# if os.path.exists(os.path.join(notary_config_dir, "postgresql-initdb.d")):
|
||||||
shutil.rmtree(os.path.join(notary_config_dir, "mysql-initdb.d"))
|
# shutil.rmtree(os.path.join(notary_config_dir, "postgresql-initdb.d"))
|
||||||
shutil.copytree(os.path.join(notary_temp_dir, "mysql-initdb.d"), os.path.join(notary_config_dir, "mysql-initdb.d"))
|
# shutil.copytree(os.path.join(notary_temp_dir, "postgresql-initdb.d"), os.path.join(notary_config_dir, "postgresql-initdb.d"))
|
||||||
if customize_crt == 'on' and openssl_installed():
|
if customize_crt == 'on' and openssl_installed():
|
||||||
try:
|
try:
|
||||||
temp_cert_dir = os.path.join(base_dir, "cert_tmp")
|
temp_cert_dir = os.path.join(base_dir, "cert_tmp")
|
||||||
@ -553,11 +553,10 @@ if args.notary_mode:
|
|||||||
mark_file(os.path.join(notary_config_dir, "notary-signer-ca.crt"))
|
mark_file(os.path.join(notary_config_dir, "notary-signer-ca.crt"))
|
||||||
mark_file(os.path.join(notary_config_dir, "root.crt"))
|
mark_file(os.path.join(notary_config_dir, "root.crt"))
|
||||||
print("Copying notary signer configuration file")
|
print("Copying notary signer configuration file")
|
||||||
shutil.copy2(os.path.join(notary_temp_dir, "signer-config.json"), notary_config_dir)
|
shutil.copy2(os.path.join(notary_temp_dir, "signer-config.postgres.json"), notary_config_dir)
|
||||||
render(os.path.join(notary_temp_dir, "server-config.json"),
|
render(os.path.join(notary_temp_dir, "server-config.postgres.json"),
|
||||||
os.path.join(notary_config_dir, "server-config.json"),
|
os.path.join(notary_config_dir, "server-config.postgres.json"),
|
||||||
token_endpoint=public_url)
|
token_endpoint=public_url)
|
||||||
|
|
||||||
print("Copying nginx configuration file for notary")
|
print("Copying nginx configuration file for notary")
|
||||||
shutil.copy2(os.path.join(templates_dir, "nginx", "notary.upstream.conf"), nginx_conf_d)
|
shutil.copy2(os.path.join(templates_dir, "nginx", "notary.upstream.conf"), nginx_conf_d)
|
||||||
render(os.path.join(templates_dir, "nginx", "notary.server.conf"),
|
render(os.path.join(templates_dir, "nginx", "notary.server.conf"),
|
||||||
@ -567,6 +566,7 @@ if args.notary_mode:
|
|||||||
|
|
||||||
default_alias = get_alias(secretkey_path)
|
default_alias = get_alias(secretkey_path)
|
||||||
render(os.path.join(notary_temp_dir, "signer_env"), os.path.join(notary_config_dir, "signer_env"), alias = default_alias)
|
render(os.path.join(notary_temp_dir, "signer_env"), os.path.join(notary_config_dir, "signer_env"), alias = default_alias)
|
||||||
|
shutil.copy2(os.path.join(notary_temp_dir, "server_env"), notary_config_dir)
|
||||||
|
|
||||||
if args.clair_mode:
|
if args.clair_mode:
|
||||||
clair_temp_dir = os.path.join(templates_dir, "clair")
|
clair_temp_dir = os.path.join(templates_dir, "clair")
|
||||||
@ -600,5 +600,4 @@ if args.ha_mode:
|
|||||||
prepare_ha(rcp, args)
|
prepare_ha(rcp, args)
|
||||||
|
|
||||||
FNULL.close()
|
FNULL.close()
|
||||||
print("The configuration files are ready, please use docker-compose to start the service.")
|
print("The configuration files are ready, please use docker-compose to start the service.")
|
||||||
|
|
@ -35,10 +35,10 @@ var (
|
|||||||
common.LDAPScope: true,
|
common.LDAPScope: true,
|
||||||
common.LDAPTimeout: true,
|
common.LDAPTimeout: true,
|
||||||
common.TokenExpiration: true,
|
common.TokenExpiration: true,
|
||||||
common.MySQLPort: true,
|
|
||||||
common.MaxJobWorkers: true,
|
common.MaxJobWorkers: true,
|
||||||
common.CfgExpiration: true,
|
common.CfgExpiration: true,
|
||||||
common.ClairDBPort: true,
|
common.ClairDBPort: true,
|
||||||
|
common.PostGreSQLPort: true,
|
||||||
}
|
}
|
||||||
boolKeys = map[string]bool{
|
boolKeys = map[string]bool{
|
||||||
common.WithClair: true,
|
common.WithClair: true,
|
||||||
|
@ -2,13 +2,14 @@ package database
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/vmware/harbor/src/common/models"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/vmware/harbor/src/common"
|
"github.com/vmware/harbor/src/common"
|
||||||
|
"github.com/vmware/harbor/src/common/models"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCfgStore_Name(t *testing.T) {
|
func TestCfgStore_Name(t *testing.T) {
|
||||||
driver,err := NewCfgStore()
|
driver, err := NewCfgStore()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create db configuration store %v", err)
|
t.Fatalf("Failed to create db configuration store %v", err)
|
||||||
}
|
}
|
||||||
@ -16,30 +17,30 @@ func TestCfgStore_Name(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWrapperConfig(t *testing.T) {
|
func TestWrapperConfig(t *testing.T) {
|
||||||
cfg:=[]*models.ConfigEntry{
|
cfg := []*models.ConfigEntry{
|
||||||
{
|
{
|
||||||
Key:common.CfgExpiration,
|
Key: common.CfgExpiration,
|
||||||
Value:"500",
|
Value: "500",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Key:common.WithNotary,
|
Key: common.WithNotary,
|
||||||
Value:"true",
|
Value: "true",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Key:common.MySQLHost,
|
Key: common.PostGreSQLHOST,
|
||||||
Value:"192.168.1.210",
|
Value: "192.168.1.210",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
result,err := WrapperConfig(cfg)
|
result, err := WrapperConfig(cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to wrapper config %v", err)
|
t.Fatalf("Failed to wrapper config %v", err)
|
||||||
}
|
}
|
||||||
withNotary,_ := result[common.WithNotary].(bool)
|
withNotary, _ := result[common.WithNotary].(bool)
|
||||||
assert.Equal(t,true, withNotary)
|
assert.Equal(t, true, withNotary)
|
||||||
|
|
||||||
mysqlhost, ok := result[common.MySQLHost].(string)
|
postgresqlhost, ok := result[common.PostGreSQLHOST].(string)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
assert.Equal(t, "192.168.1.210", mysqlhost)
|
assert.Equal(t, "192.168.1.210", postgresqlhost)
|
||||||
|
|
||||||
expiration, ok := result[common.CfgExpiration].(float64)
|
expiration, ok := result[common.CfgExpiration].(float64)
|
||||||
|
|
||||||
@ -49,26 +50,26 @@ func TestWrapperConfig(t *testing.T) {
|
|||||||
|
|
||||||
func TestTranslateConfig(t *testing.T) {
|
func TestTranslateConfig(t *testing.T) {
|
||||||
config := map[string]interface{}{}
|
config := map[string]interface{}{}
|
||||||
config[common.MySQLHost]="192.168.1.210"
|
config[common.PostGreSQLHOST] = "192.168.1.210"
|
||||||
|
|
||||||
entries,err := TranslateConfig(config)
|
entries, err := TranslateConfig(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to translate configuration %v", err)
|
t.Fatalf("Failed to translate configuration %v", err)
|
||||||
}
|
}
|
||||||
assert.Equal(t, "192.168.1.210",entries[0].Value)
|
assert.Equal(t, "192.168.1.210", entries[0].Value)
|
||||||
config =make(map[string]interface{})
|
config = make(map[string]interface{})
|
||||||
config[common.WithNotary]=true
|
config[common.WithNotary] = true
|
||||||
entries,err = TranslateConfig(config)
|
entries, err = TranslateConfig(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to translate configuration %v", err)
|
t.Fatalf("Failed to translate configuration %v", err)
|
||||||
}
|
}
|
||||||
assert.Equal(t, "true", entries[0].Value)
|
assert.Equal(t, "true", entries[0].Value)
|
||||||
|
|
||||||
config =make(map[string]interface{})
|
config = make(map[string]interface{})
|
||||||
config[common.CfgExpiration]=float64(500)
|
config[common.CfgExpiration] = float64(500)
|
||||||
entries,err = TranslateConfig(config)
|
entries, err = TranslateConfig(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to translate configuration %v", err)
|
t.Fatalf("Failed to translate configuration %v", err)
|
||||||
}
|
}
|
||||||
assert.Equal(t, "500", entries[0].Value)
|
assert.Equal(t, "500", entries[0].Value)
|
||||||
}
|
}
|
||||||
|
@ -47,7 +47,7 @@ var (
|
|||||||
attrs = []string{
|
attrs = []string{
|
||||||
common.EmailPassword,
|
common.EmailPassword,
|
||||||
common.LDAPSearchPwd,
|
common.LDAPSearchPwd,
|
||||||
common.MySQLPassword,
|
common.PostGreSQLPassword,
|
||||||
common.AdminInitialPassword,
|
common.AdminInitialPassword,
|
||||||
common.ClairDBPassword,
|
common.ClairDBPassword,
|
||||||
common.UAAClientSecret,
|
common.UAAClientSecret,
|
||||||
@ -61,22 +61,22 @@ var (
|
|||||||
env: "SELF_REGISTRATION",
|
env: "SELF_REGISTRATION",
|
||||||
parse: parseStringToBool,
|
parse: parseStringToBool,
|
||||||
},
|
},
|
||||||
common.DatabaseType: "DATABASE_TYPE",
|
common.DatabaseType: "DATABASE_TYPE",
|
||||||
common.MySQLHost: "MYSQL_HOST",
|
common.PostGreSQLHOST: "POSTGRESQL_HOST",
|
||||||
common.MySQLPort: &parser{
|
common.PostGreSQLPort: &parser{
|
||||||
env: "MYSQL_PORT",
|
env: "POSTGRESQL_PORT",
|
||||||
parse: parseStringToInt,
|
parse: parseStringToInt,
|
||||||
},
|
},
|
||||||
common.MySQLUsername: "MYSQL_USR",
|
common.PostGreSQLUsername: "POSTGRESQL_USERNAME",
|
||||||
common.MySQLPassword: "MYSQL_PWD",
|
common.PostGreSQLPassword: "POSTGRESQL_PASSWORD",
|
||||||
common.MySQLDatabase: "MYSQL_DATABASE",
|
common.PostGreSQLDatabase: "POSTGRESQL_DATABASE",
|
||||||
common.SQLiteFile: "SQLITE_FILE",
|
common.PostGreSQLSSLMode: "POSTGRESQL_SSLMODE",
|
||||||
common.LDAPURL: "LDAP_URL",
|
common.LDAPURL: "LDAP_URL",
|
||||||
common.LDAPSearchDN: "LDAP_SEARCH_DN",
|
common.LDAPSearchDN: "LDAP_SEARCH_DN",
|
||||||
common.LDAPSearchPwd: "LDAP_SEARCH_PWD",
|
common.LDAPSearchPwd: "LDAP_SEARCH_PWD",
|
||||||
common.LDAPBaseDN: "LDAP_BASE_DN",
|
common.LDAPBaseDN: "LDAP_BASE_DN",
|
||||||
common.LDAPFilter: "LDAP_FILTER",
|
common.LDAPFilter: "LDAP_FILTER",
|
||||||
common.LDAPUID: "LDAP_UID",
|
common.LDAPUID: "LDAP_UID",
|
||||||
common.LDAPScope: &parser{
|
common.LDAPScope: &parser{
|
||||||
env: "LDAP_SCOPE",
|
env: "LDAP_SCOPE",
|
||||||
parse: parseStringToInt,
|
parse: parseStringToInt,
|
||||||
@ -141,7 +141,10 @@ var (
|
|||||||
common.ClairDB: "CLAIR_DB",
|
common.ClairDB: "CLAIR_DB",
|
||||||
common.ClairDBUsername: "CLAIR_DB_USERNAME",
|
common.ClairDBUsername: "CLAIR_DB_USERNAME",
|
||||||
common.ClairDBHost: "CLAIR_DB_HOST",
|
common.ClairDBHost: "CLAIR_DB_HOST",
|
||||||
common.ClairDBPort: "CLAIR_DB_PORT",
|
common.ClairDBPort: &parser{
|
||||||
|
env: "CLAIR_DB_PORT",
|
||||||
|
parse: parseStringToInt,
|
||||||
|
},
|
||||||
common.UAAEndpoint: "UAA_ENDPOINT",
|
common.UAAEndpoint: "UAA_ENDPOINT",
|
||||||
common.UAAClientID: "UAA_CLIENTID",
|
common.UAAClientID: "UAA_CLIENTID",
|
||||||
common.UAAClientSecret: "UAA_CLIENTSECRET",
|
common.UAAClientSecret: "UAA_CLIENTSECRET",
|
||||||
@ -164,15 +167,16 @@ var (
|
|||||||
// configurations need read from environment variables
|
// configurations need read from environment variables
|
||||||
// every time the system startup
|
// every time the system startup
|
||||||
repeatLoadEnvs = map[string]interface{}{
|
repeatLoadEnvs = map[string]interface{}{
|
||||||
common.ExtEndpoint: "EXT_ENDPOINT",
|
common.ExtEndpoint: "EXT_ENDPOINT",
|
||||||
common.MySQLPassword: "MYSQL_PWD",
|
common.PostGreSQLHOST: "POSTGRESQL_HOST",
|
||||||
common.MySQLHost: "MYSQL_HOST",
|
common.PostGreSQLPort: &parser{
|
||||||
common.MySQLUsername: "MYSQL_USR",
|
env: "POSTGRESQL_PORT",
|
||||||
common.MySQLDatabase: "MYSQL_DATABASE",
|
|
||||||
common.MySQLPort: &parser{
|
|
||||||
env: "MYSQL_PORT",
|
|
||||||
parse: parseStringToInt,
|
parse: parseStringToInt,
|
||||||
},
|
},
|
||||||
|
common.PostGreSQLUsername: "POSTGRESQL_USERNAME",
|
||||||
|
common.PostGreSQLPassword: "POSTGRESQL_PASSWORD",
|
||||||
|
common.PostGreSQLDatabase: "POSTGRESQL_DATABASE",
|
||||||
|
common.PostGreSQLSSLMode: "POSTGRESQL_SSLMODE",
|
||||||
common.MaxJobWorkers: &parser{
|
common.MaxJobWorkers: &parser{
|
||||||
env: "MAX_JOB_WORKERS",
|
env: "MAX_JOB_WORKERS",
|
||||||
parse: parseStringToInt,
|
parse: parseStringToInt,
|
||||||
@ -383,16 +387,13 @@ func LoadFromEnv(cfgs map[string]interface{}, all bool) error {
|
|||||||
func GetDatabaseFromCfg(cfg map[string]interface{}) *models.Database {
|
func GetDatabaseFromCfg(cfg map[string]interface{}) *models.Database {
|
||||||
database := &models.Database{}
|
database := &models.Database{}
|
||||||
database.Type = cfg[common.DatabaseType].(string)
|
database.Type = cfg[common.DatabaseType].(string)
|
||||||
mysql := &models.MySQL{}
|
postgresql := &models.PostGreSQL{}
|
||||||
mysql.Host = cfg[common.MySQLHost].(string)
|
postgresql.Host = cfg[common.PostGreSQLHOST].(string)
|
||||||
mysql.Port = int(cfg[common.MySQLPort].(int))
|
postgresql.Port = int(cfg[common.PostGreSQLPort].(int))
|
||||||
mysql.Username = cfg[common.MySQLUsername].(string)
|
postgresql.Username = cfg[common.PostGreSQLUsername].(string)
|
||||||
mysql.Password = cfg[common.MySQLPassword].(string)
|
postgresql.Password = cfg[common.PostGreSQLPassword].(string)
|
||||||
mysql.Database = cfg[common.MySQLDatabase].(string)
|
postgresql.Database = cfg[common.PostGreSQLDatabase].(string)
|
||||||
database.MySQL = mysql
|
database.PostGreSQL = postgresql
|
||||||
sqlite := &models.SQLite{}
|
|
||||||
sqlite.File = cfg[common.SQLiteFile].(string)
|
|
||||||
database.SQLite = sqlite
|
|
||||||
return database
|
return database
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -128,18 +128,17 @@ func TestLoadFromEnv(t *testing.T) {
|
|||||||
|
|
||||||
func TestGetDatabaseFromCfg(t *testing.T) {
|
func TestGetDatabaseFromCfg(t *testing.T) {
|
||||||
cfg := map[string]interface{}{
|
cfg := map[string]interface{}{
|
||||||
common.DatabaseType: "mysql",
|
common.DatabaseType: "postgresql",
|
||||||
common.MySQLDatabase: "registry",
|
common.PostGreSQLDatabase: "registry",
|
||||||
common.MySQLHost: "127.0.0.1",
|
common.PostGreSQLHOST: "127.0.0.1",
|
||||||
common.MySQLPort: 3306,
|
common.PostGreSQLPort: 5432,
|
||||||
common.MySQLPassword: "1234",
|
common.PostGreSQLPassword: "root123",
|
||||||
common.MySQLUsername: "root",
|
common.PostGreSQLUsername: "postgres",
|
||||||
common.SQLiteFile: "/tmp/sqlite.db",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
database := GetDatabaseFromCfg(cfg)
|
database := GetDatabaseFromCfg(cfg)
|
||||||
|
|
||||||
assert.Equal(t, "mysql", database.Type)
|
assert.Equal(t, "postgresql", database.Type)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidLdapScope(t *testing.T) {
|
func TestValidLdapScope(t *testing.T) {
|
||||||
|
@ -41,12 +41,12 @@ const (
|
|||||||
ExtEndpoint = "ext_endpoint"
|
ExtEndpoint = "ext_endpoint"
|
||||||
AUTHMode = "auth_mode"
|
AUTHMode = "auth_mode"
|
||||||
DatabaseType = "database_type"
|
DatabaseType = "database_type"
|
||||||
MySQLHost = "mysql_host"
|
PostGreSQLHOST = "postgresql_host"
|
||||||
MySQLPort = "mysql_port"
|
PostGreSQLPort = "postgresql_port"
|
||||||
MySQLUsername = "mysql_username"
|
PostGreSQLUsername = "postgresql_username"
|
||||||
MySQLPassword = "mysql_password"
|
PostGreSQLPassword = "postgresql_password"
|
||||||
MySQLDatabase = "mysql_database"
|
PostGreSQLDatabase = "postgresql_database"
|
||||||
SQLiteFile = "sqlite_file"
|
PostGreSQLSSLMode = "postgresql_sslmode"
|
||||||
SelfRegistration = "self_registration"
|
SelfRegistration = "self_registration"
|
||||||
UIURL = "ui_url"
|
UIURL = "ui_url"
|
||||||
JobServiceURL = "jobservice_url"
|
JobServiceURL = "jobservice_url"
|
||||||
|
@ -47,7 +47,7 @@ func InitClairDB(clairDB *models.PostGreSQL) error {
|
|||||||
//Except for password other information will not be configurable, so keep it hard coded for 1.2.0.
|
//Except for password other information will not be configurable, so keep it hard coded for 1.2.0.
|
||||||
p := &pgsql{
|
p := &pgsql{
|
||||||
host: clairDB.Host,
|
host: clairDB.Host,
|
||||||
port: clairDB.Port,
|
port: strconv.Itoa(clairDB.Port),
|
||||||
usr: clairDB.Username,
|
usr: clairDB.Username,
|
||||||
pwd: clairDB.Password,
|
pwd: clairDB.Password,
|
||||||
database: clairDB.Database,
|
database: clairDB.Database,
|
||||||
@ -87,14 +87,13 @@ func InitDatabase(database *models.Database) error {
|
|||||||
|
|
||||||
func getDatabase(database *models.Database) (db Database, err error) {
|
func getDatabase(database *models.Database) (db Database, err error) {
|
||||||
switch database.Type {
|
switch database.Type {
|
||||||
case "", "mysql":
|
case "", "postgresql":
|
||||||
db = NewMySQL(database.MySQL.Host,
|
db = NewPQSQL(database.PostGreSQL.Host,
|
||||||
strconv.Itoa(database.MySQL.Port),
|
strconv.Itoa(database.PostGreSQL.Port),
|
||||||
database.MySQL.Username,
|
database.PostGreSQL.Username,
|
||||||
database.MySQL.Password,
|
database.PostGreSQL.Password,
|
||||||
database.MySQL.Database)
|
database.PostGreSQL.Database,
|
||||||
case "sqlite":
|
false)
|
||||||
db = NewSQLite(database.SQLite.File)
|
|
||||||
default:
|
default:
|
||||||
err = fmt.Errorf("invalid database: %s", database.Type)
|
err = fmt.Errorf("invalid database: %s", database.Type)
|
||||||
}
|
}
|
||||||
|
@ -51,7 +51,7 @@ func cleanByUser(username string) {
|
|||||||
from project_member
|
from project_member
|
||||||
where entity_id = (
|
where entity_id = (
|
||||||
select user_id
|
select user_id
|
||||||
from user
|
from harbor_user
|
||||||
where username = ?
|
where username = ?
|
||||||
) `, username)
|
) `, username)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -98,7 +98,7 @@ func cleanByUser(username string) {
|
|||||||
log.Error(err)
|
log.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = execUpdate(o, `delete from user where username = ?`, username)
|
err = execUpdate(o, `delete from harbor_user where username = ?`, username)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
o.Rollback()
|
o.Rollback()
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
@ -134,16 +134,13 @@ const publicityOn = 1
|
|||||||
const publicityOff = 0
|
const publicityOff = 0
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
databases := []string{"mysql", "sqlite"}
|
databases := []string{"postgresql"}
|
||||||
for _, database := range databases {
|
for _, database := range databases {
|
||||||
log.Infof("run test cases for database: %s", database)
|
log.Infof("run test cases for database: %s", database)
|
||||||
|
|
||||||
result := 1
|
result := 1
|
||||||
switch database {
|
switch database {
|
||||||
case "mysql":
|
case "postgresql":
|
||||||
PrepareTestForMySQL()
|
PrepareTestForPostgresSQL()
|
||||||
case "sqlite":
|
|
||||||
PrepareTestForSQLite()
|
|
||||||
default:
|
default:
|
||||||
log.Fatalf("invalid database: %s", database)
|
log.Fatalf("invalid database: %s", database)
|
||||||
}
|
}
|
||||||
@ -167,7 +164,7 @@ func clearAll() {
|
|||||||
tables := []string{"project_member",
|
tables := []string{"project_member",
|
||||||
"project_metadata", "access_log", "repository", "replication_policy",
|
"project_metadata", "access_log", "repository", "replication_policy",
|
||||||
"replication_target", "replication_job", "replication_immediate_trigger", "img_scan_job",
|
"replication_target", "replication_job", "replication_immediate_trigger", "img_scan_job",
|
||||||
"img_scan_overview", "clair_vuln_timestamp", "project", "user"}
|
"img_scan_overview", "clair_vuln_timestamp", "project", "harbor_user"}
|
||||||
for _, t := range tables {
|
for _, t := range tables {
|
||||||
if err := ClearTable(t); err != nil {
|
if err := ClearTable(t); err != nil {
|
||||||
log.Errorf("Failed to clear table: %s,error: %v", t, err)
|
log.Errorf("Failed to clear table: %s,error: %v", t, err)
|
||||||
@ -693,7 +690,7 @@ func TestGetRoleByID(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestToggleAdminRole(t *testing.T) {
|
func TestToggleAdminRole(t *testing.T) {
|
||||||
err := ToggleUserAdminRole(currentUser.UserID, 1)
|
err := ToggleUserAdminRole(currentUser.UserID, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error in toggle ToggleUserAdmin role: %v, user: %+v", err, currentUser)
|
t.Errorf("Error in toggle ToggleUserAdmin role: %v, user: %+v", err, currentUser)
|
||||||
}
|
}
|
||||||
@ -704,7 +701,7 @@ func TestToggleAdminRole(t *testing.T) {
|
|||||||
if !isAdmin {
|
if !isAdmin {
|
||||||
t.Errorf("User is not admin after toggled, user id: %d", currentUser.UserID)
|
t.Errorf("User is not admin after toggled, user id: %d", currentUser.UserID)
|
||||||
}
|
}
|
||||||
err = ToggleUserAdminRole(currentUser.UserID, 0)
|
err = ToggleUserAdminRole(currentUser.UserID, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error in toggle ToggleUserAdmin role: %v, user: %+v", err, currentUser)
|
t.Errorf("Error in toggle ToggleUserAdmin role: %v, user: %+v", err, currentUser)
|
||||||
}
|
}
|
||||||
@ -1195,7 +1192,7 @@ func TestDeleteRepPolicy(t *testing.T) {
|
|||||||
if err != nil && err != orm.ErrNoRows {
|
if err != nil && err != orm.ErrNoRows {
|
||||||
t.Errorf("Error occurred in GetRepPolicy:%v", err)
|
t.Errorf("Error occurred in GetRepPolicy:%v", err)
|
||||||
}
|
}
|
||||||
if p != nil && p.Deleted != 1 {
|
if p != nil && !p.Deleted {
|
||||||
t.Errorf("Able to find rep policy after deletion, id: %d", policyID)
|
t.Errorf("Able to find rep policy after deletion, id: %d", policyID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15,6 +15,8 @@
|
|||||||
package group
|
package group
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/vmware/harbor/src/common/dao"
|
"github.com/vmware/harbor/src/common/dao"
|
||||||
"github.com/vmware/harbor/src/common/models"
|
"github.com/vmware/harbor/src/common/models"
|
||||||
"github.com/vmware/harbor/src/common/utils/log"
|
"github.com/vmware/harbor/src/common/utils/log"
|
||||||
@ -23,11 +25,17 @@ import (
|
|||||||
// AddUserGroup - Add User Group
|
// AddUserGroup - Add User Group
|
||||||
func AddUserGroup(userGroup models.UserGroup) (int, error) {
|
func AddUserGroup(userGroup models.UserGroup) (int, error) {
|
||||||
o := dao.GetOrmer()
|
o := dao.GetOrmer()
|
||||||
id, err := o.Insert(&userGroup)
|
|
||||||
|
sql := "insert into user_group (group_name, group_type, ldap_group_dn, creation_time, update_time) values (?, ?, ?, ?, ?) RETURNING id"
|
||||||
|
var id int
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
err := o.Raw(sql, userGroup.GroupName, userGroup.GroupType, userGroup.LdapGroupDN, now, now).QueryRow(&id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
return int(id), err
|
|
||||||
|
return id, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// QueryUserGroup - Query User Group
|
// QueryUserGroup - Query User Group
|
||||||
|
@ -30,33 +30,31 @@ var createdUserGroupID int
|
|||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
|
|
||||||
//databases := []string{"mysql", "sqlite"}
|
//databases := []string{"mysql", "sqlite"}
|
||||||
databases := []string{"mysql"}
|
databases := []string{"postgresql"}
|
||||||
for _, database := range databases {
|
for _, database := range databases {
|
||||||
log.Infof("run test cases for database: %s", database)
|
log.Infof("run test cases for database: %s", database)
|
||||||
|
|
||||||
result := 1
|
result := 1
|
||||||
switch database {
|
switch database {
|
||||||
case "mysql":
|
case "postgresql":
|
||||||
dao.PrepareTestForMySQL()
|
dao.PrepareTestForPostgresSQL()
|
||||||
case "sqlite":
|
|
||||||
dao.PrepareTestForSQLite()
|
|
||||||
default:
|
default:
|
||||||
log.Fatalf("invalid database: %s", database)
|
log.Fatalf("invalid database: %s", database)
|
||||||
}
|
}
|
||||||
|
|
||||||
//Extract to test utils
|
//Extract to test utils
|
||||||
initSqls := []string{
|
initSqls := []string{
|
||||||
"insert into user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')",
|
"insert into harbor_user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')",
|
||||||
"insert into project (name, owner_id) values ('member_test_01', 1)",
|
"insert into project (name, owner_id) values ('member_test_01', 1)",
|
||||||
"insert into user_group (group_name, group_type, ldap_group_dn) values ('test_group_01', 1, 'CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com')",
|
"insert into user_group (group_name, group_type, ldap_group_dn) values ('test_group_01', 1, 'CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com')",
|
||||||
"update project set owner_id = (select user_id from user where username = 'member_test_01') where name = 'member_test_01'",
|
"update project set owner_id = (select user_id from harbor_user where username = 'member_test_01') where name = 'member_test_01'",
|
||||||
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from user where username = 'member_test_01'), 'u', 1)",
|
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from harbor_user where username = 'member_test_01'), 'u', 1)",
|
||||||
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_group_01'), 'g', 1)",
|
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_group_01'), 'g', 1)",
|
||||||
}
|
}
|
||||||
|
|
||||||
clearSqls := []string{
|
clearSqls := []string{
|
||||||
"delete from project where name='member_test_01'",
|
"delete from project where name='member_test_01'",
|
||||||
"delete from user where username='member_test_01' or username='pm_sample'",
|
"delete from harbor_user where username='member_test_01' or username='pm_sample'",
|
||||||
"delete from user_group",
|
"delete from user_group",
|
||||||
"delete from project_member",
|
"delete from project_member",
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,7 @@ import (
|
|||||||
|
|
||||||
type pgsql struct {
|
type pgsql struct {
|
||||||
host string
|
host string
|
||||||
port int
|
port string
|
||||||
usr string
|
usr string
|
||||||
pwd string
|
pwd string
|
||||||
database string
|
database string
|
||||||
@ -47,13 +47,25 @@ func (p *pgsql) Name() string {
|
|||||||
|
|
||||||
// String ...
|
// String ...
|
||||||
func (p *pgsql) String() string {
|
func (p *pgsql) String() string {
|
||||||
return fmt.Sprintf("type-%s host-%s port-%d databse-%s sslmode-%q",
|
return fmt.Sprintf("type-%s host-%s port-%s databse-%s sslmode-%q",
|
||||||
p.Name(), p.host, p.port, p.database, pgsqlSSLMode(p.sslmode))
|
p.Name(), p.host, p.port, p.database, pgsqlSSLMode(p.sslmode))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewPQSQL returns an instance of postgres
|
||||||
|
func NewPQSQL(host string, port string, usr string, pwd string, database string, sslmode bool) Database {
|
||||||
|
return &pgsql{
|
||||||
|
host: host,
|
||||||
|
port: port,
|
||||||
|
usr: usr,
|
||||||
|
pwd: pwd,
|
||||||
|
database: database,
|
||||||
|
sslmode: sslmode,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//Register registers pgSQL to orm with the info wrapped by the instance.
|
//Register registers pgSQL to orm with the info wrapped by the instance.
|
||||||
func (p *pgsql) Register(alias ...string) error {
|
func (p *pgsql) Register(alias ...string) error {
|
||||||
if err := utils.TestTCPConn(fmt.Sprintf("%s:%d", p.host, p.port), 60, 2); err != nil {
|
if err := utils.TestTCPConn(fmt.Sprintf("%s:%s", p.host, p.port), 60, 2); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -65,7 +77,7 @@ func (p *pgsql) Register(alias ...string) error {
|
|||||||
if len(alias) != 0 {
|
if len(alias) != 0 {
|
||||||
an = alias[0]
|
an = alias[0]
|
||||||
}
|
}
|
||||||
info := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s",
|
info := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=%s",
|
||||||
p.host, p.port, p.usr, p.pwd, p.database, pgsqlSSLMode(p.sslmode))
|
p.host, p.port, p.usr, p.pwd, p.database, pgsqlSSLMode(p.sslmode))
|
||||||
|
|
||||||
return orm.RegisterDataBase(an, "postgres", info)
|
return orm.RegisterDataBase(an, "postgres", info)
|
||||||
|
@ -27,7 +27,7 @@ func AddProjectMetadata(meta *models.ProjectMetadata) error {
|
|||||||
now := time.Now()
|
now := time.Now()
|
||||||
sql := `insert into project_metadata
|
sql := `insert into project_metadata
|
||||||
(project_id, name, value, creation_time, update_time, deleted)
|
(project_id, name, value, creation_time, update_time, deleted)
|
||||||
values (?, ?, ?, ?, ?, 0)`
|
values (?, ?, ?, ?, ?, false)`
|
||||||
_, err := GetOrmer().Raw(sql, meta.ProjectID, meta.Name, meta.Value,
|
_, err := GetOrmer().Raw(sql, meta.ProjectID, meta.Name, meta.Value,
|
||||||
now, now).Exec()
|
now, now).Exec()
|
||||||
return err
|
return err
|
||||||
@ -39,7 +39,7 @@ func AddProjectMetadata(meta *models.ProjectMetadata) error {
|
|||||||
func DeleteProjectMetadata(projectID int64, name ...string) error {
|
func DeleteProjectMetadata(projectID int64, name ...string) error {
|
||||||
params := make([]interface{}, 1)
|
params := make([]interface{}, 1)
|
||||||
sql := `update project_metadata
|
sql := `update project_metadata
|
||||||
set deleted = 1
|
set deleted = true
|
||||||
where project_id = ?`
|
where project_id = ?`
|
||||||
params = append(params, projectID)
|
params = append(params, projectID)
|
||||||
|
|
||||||
@ -56,7 +56,7 @@ func DeleteProjectMetadata(projectID int64, name ...string) error {
|
|||||||
func UpdateProjectMetadata(meta *models.ProjectMetadata) error {
|
func UpdateProjectMetadata(meta *models.ProjectMetadata) error {
|
||||||
sql := `update project_metadata
|
sql := `update project_metadata
|
||||||
set value = ?, update_time = ?
|
set value = ?, update_time = ?
|
||||||
where project_id = ? and name = ? and deleted = 0`
|
where project_id = ? and name = ? and deleted = false`
|
||||||
_, err := GetOrmer().Raw(sql, meta.Value, time.Now(), meta.ProjectID,
|
_, err := GetOrmer().Raw(sql, meta.Value, time.Now(), meta.ProjectID,
|
||||||
meta.Name).Exec()
|
meta.Name).Exec()
|
||||||
return err
|
return err
|
||||||
@ -70,7 +70,7 @@ func GetProjectMetadata(projectID int64, name ...string) ([]*models.ProjectMetad
|
|||||||
params := make([]interface{}, 1)
|
params := make([]interface{}, 1)
|
||||||
|
|
||||||
sql := `select * from project_metadata
|
sql := `select * from project_metadata
|
||||||
where project_id = ? and deleted = 0`
|
where project_id = ? and deleted = false`
|
||||||
params = append(params, projectID)
|
params = append(params, projectID)
|
||||||
|
|
||||||
if len(name) > 0 {
|
if len(name) > 0 {
|
||||||
@ -93,7 +93,7 @@ func paramPlaceholder(n int) string {
|
|||||||
// ListProjectMetadata ...
|
// ListProjectMetadata ...
|
||||||
func ListProjectMetadata(name, value string) ([]*models.ProjectMetadata, error) {
|
func ListProjectMetadata(name, value string) ([]*models.ProjectMetadata, error) {
|
||||||
sql := `select * from project_metadata
|
sql := `select * from project_metadata
|
||||||
where name = ? and value = ? and deleted = 0`
|
where name = ? and value = ? and deleted = false`
|
||||||
metadatas := []*models.ProjectMetadata{}
|
metadatas := []*models.ProjectMetadata{}
|
||||||
_, err := GetOrmer().Raw(sql, name, value).QueryRows(&metadatas)
|
_, err := GetOrmer().Raw(sql, name, value).QueryRows(&metadatas)
|
||||||
return metadatas, err
|
return metadatas, err
|
||||||
|
@ -25,20 +25,13 @@ import (
|
|||||||
|
|
||||||
// AddProject adds a project to the database along with project roles information and access log records.
|
// AddProject adds a project to the database along with project roles information and access log records.
|
||||||
func AddProject(project models.Project) (int64, error) {
|
func AddProject(project models.Project) (int64, error) {
|
||||||
|
|
||||||
o := GetOrmer()
|
o := GetOrmer()
|
||||||
p, err := o.Raw("insert into project (owner_id, name, creation_time, update_time, deleted) values (?, ?, ?, ?, ?)").Prepare()
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
|
sql := "insert into project (owner_id, name, creation_time, update_time, deleted) values (?, ?, ?, ?, ?) RETURNING project_id"
|
||||||
|
var projectID int64
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
r, err := p.Exec(project.OwnerID, project.Name, now, now, project.Deleted)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
projectID, err := r.LastInsertId()
|
err := o.Raw(sql, project.OwnerID, project.Name, now, now, project.Deleted).QueryRow(&projectID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -53,9 +46,9 @@ func AddProject(project models.Project) (int64, error) {
|
|||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if pmID == 0 {
|
if pmID == 0 {
|
||||||
return projectID, fmt.Errorf("Failed to add project member, pmid=0")
|
return projectID, err
|
||||||
}
|
}
|
||||||
return projectID, err
|
return projectID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func addProjectMember(member models.Member) (int, error) {
|
func addProjectMember(member models.Member) (int, error) {
|
||||||
@ -72,16 +65,13 @@ func addProjectMember(member models.Member) (int, error) {
|
|||||||
return 0, fmt.Errorf("Invalid project_id, member: %+v", member)
|
return 0, fmt.Errorf("Invalid project_id, member: %+v", member)
|
||||||
}
|
}
|
||||||
|
|
||||||
sql := "insert into project_member (project_id, entity_id , role, entity_type) values (?, ?, ?, ?)"
|
var pmID int
|
||||||
r, err := o.Raw(sql, member.ProjectID, member.EntityID, member.Role, member.EntityType).Exec()
|
sql := "insert into project_member (project_id, entity_id , role, entity_type) values (?, ?, ?, ?) RETURNING id"
|
||||||
|
err := o.Raw(sql, member.ProjectID, member.EntityID, member.Role, member.EntityType).QueryRow(&pmID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
pmid, err := r.LastInsertId()
|
return pmID, err
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return int(pmid), err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetProjectByID ...
|
// GetProjectByID ...
|
||||||
@ -89,7 +79,7 @@ func GetProjectByID(id int64) (*models.Project, error) {
|
|||||||
o := GetOrmer()
|
o := GetOrmer()
|
||||||
|
|
||||||
sql := `select p.project_id, p.name, u.username as owner_name, p.owner_id, p.creation_time, p.update_time
|
sql := `select p.project_id, p.name, u.username as owner_name, p.owner_id, p.creation_time, p.update_time
|
||||||
from project p left join user u on p.owner_id = u.user_id where p.deleted = 0 and p.project_id = ?`
|
from project p left join harbor_user u on p.owner_id = u.user_id where p.deleted = false and p.project_id = ?`
|
||||||
queryParam := make([]interface{}, 1)
|
queryParam := make([]interface{}, 1)
|
||||||
queryParam = append(queryParam, id)
|
queryParam = append(queryParam, id)
|
||||||
|
|
||||||
@ -111,7 +101,7 @@ func GetProjectByID(id int64) (*models.Project, error) {
|
|||||||
func GetProjectByName(name string) (*models.Project, error) {
|
func GetProjectByName(name string) (*models.Project, error) {
|
||||||
o := GetOrmer()
|
o := GetOrmer()
|
||||||
var p []models.Project
|
var p []models.Project
|
||||||
n, err := o.Raw(`select * from project where name = ? and deleted = 0`, name).QueryRows(&p)
|
n, err := o.Raw(`select * from project where name = ? and deleted = false`, name).QueryRows(&p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -146,10 +136,12 @@ func GetTotalOfProjects(query *models.ProjectQueryParam) (int64, error) {
|
|||||||
// GetProjects returns a project list according to the query conditions
|
// GetProjects returns a project list according to the query conditions
|
||||||
func GetProjects(query *models.ProjectQueryParam) ([]*models.Project, error) {
|
func GetProjects(query *models.ProjectQueryParam) ([]*models.Project, error) {
|
||||||
sql, params := projectQueryConditions(query)
|
sql, params := projectQueryConditions(query)
|
||||||
|
if query == nil {
|
||||||
|
sql += ` order by p.name`
|
||||||
|
}
|
||||||
|
|
||||||
sql = `select distinct p.project_id, p.name, p.owner_id,
|
sql = `select distinct p.project_id, p.name, p.owner_id,
|
||||||
p.creation_time, p.update_time ` + sql
|
p.creation_time, p.update_time ` + sql
|
||||||
|
|
||||||
var projects []*models.Project
|
var projects []*models.Project
|
||||||
_, err := GetOrmer().Raw(sql, params).QueryRows(&projects)
|
_, err := GetOrmer().Raw(sql, params).QueryRows(&projects)
|
||||||
return projects, err
|
return projects, err
|
||||||
@ -161,7 +153,7 @@ func projectQueryConditions(query *models.ProjectQueryParam) (string, []interfac
|
|||||||
sql := ` from project as p`
|
sql := ` from project as p`
|
||||||
|
|
||||||
if query == nil {
|
if query == nil {
|
||||||
sql += ` where p.deleted=0 order by p.name`
|
sql += ` where p.deleted=false`
|
||||||
return sql, params
|
return sql, params
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -172,17 +164,17 @@ func projectQueryConditions(query *models.ProjectQueryParam) (string, []interfac
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(query.Owner) != 0 {
|
if len(query.Owner) != 0 {
|
||||||
sql += ` join user u1
|
sql += ` join harbor_user u1
|
||||||
on p.owner_id = u1.user_id`
|
on p.owner_id = u1.user_id`
|
||||||
}
|
}
|
||||||
|
|
||||||
if query.Member != nil && len(query.Member.Name) != 0 {
|
if query.Member != nil && len(query.Member.Name) != 0 {
|
||||||
sql += ` join project_member pm
|
sql += ` join project_member pm
|
||||||
on p.project_id = pm.project_id
|
on p.project_id = pm.project_id
|
||||||
join user u2
|
join harbor_user u2
|
||||||
on pm.entity_id=u2.user_id`
|
on pm.entity_id=u2.user_id`
|
||||||
}
|
}
|
||||||
sql += ` where p.deleted=0`
|
sql += ` where p.deleted=false`
|
||||||
|
|
||||||
if len(query.Owner) != 0 {
|
if len(query.Owner) != 0 {
|
||||||
sql += ` and u1.username=?`
|
sql += ` and u1.username=?`
|
||||||
@ -220,10 +212,8 @@ func projectQueryConditions(query *models.ProjectQueryParam) (string, []interfac
|
|||||||
params = append(params, query.ProjectIDs)
|
params = append(params, query.ProjectIDs)
|
||||||
}
|
}
|
||||||
|
|
||||||
sql += ` order by p.name`
|
|
||||||
|
|
||||||
if query.Pagination != nil && query.Pagination.Size > 0 {
|
if query.Pagination != nil && query.Pagination.Size > 0 {
|
||||||
sql += ` limit ?`
|
sql += ` order by p.name limit ?`
|
||||||
params = append(params, query.Pagination.Size)
|
params = append(params, query.Pagination.Size)
|
||||||
|
|
||||||
if query.Pagination.Page > 0 {
|
if query.Pagination.Page > 0 {
|
||||||
@ -245,7 +235,7 @@ func DeleteProject(id int64) error {
|
|||||||
name := fmt.Sprintf("%s#%d", project.Name, project.ProjectID)
|
name := fmt.Sprintf("%s#%d", project.Name, project.ProjectID)
|
||||||
|
|
||||||
sql := `update project
|
sql := `update project
|
||||||
set deleted = 1, name = ?
|
set deleted = true, name = ?
|
||||||
where project_id = ?`
|
where project_id = ?`
|
||||||
_, err = GetOrmer().Raw(sql, name, id).Exec()
|
_, err = GetOrmer().Raw(sql, name, id).Exec()
|
||||||
return err
|
return err
|
||||||
|
@ -35,9 +35,9 @@ func GetProjectMember(queryMember models.Member) ([]*models.Member, error) {
|
|||||||
on pm.project_id = ? and ug.id = pm.entity_id join role r on pm.role = r.role_id where pm.entity_type = 'g')
|
on pm.project_id = ? and ug.id = pm.entity_id join role r on pm.role = r.role_id where pm.entity_type = 'g')
|
||||||
union
|
union
|
||||||
(select pm.id as id, pm.project_id as project_id, u.user_id as entity_id, u.username as entity_name, u.creation_time, u.update_time, r.name as rolename,
|
(select pm.id as id, pm.project_id as project_id, u.user_id as entity_id, u.username as entity_name, u.creation_time, u.update_time, r.name as rolename,
|
||||||
r.role_id as role, pm.entity_type as entity_type from user u join project_member pm
|
r.role_id as role, pm.entity_type as entity_type from harbor_user u join project_member pm
|
||||||
on pm.project_id = ? and u.user_id = pm.entity_id
|
on pm.project_id = ? and u.user_id = pm.entity_id
|
||||||
join role r on pm.role = r.role_id where u.deleted = 0 and pm.entity_type = 'u')) as a where a.project_id = ? `
|
join role r on pm.role = r.role_id where u.deleted = false and pm.entity_type = 'u')) as a where a.project_id = ? `
|
||||||
|
|
||||||
queryParam := make([]interface{}, 1)
|
queryParam := make([]interface{}, 1)
|
||||||
// used ProjectID already
|
// used ProjectID already
|
||||||
@ -89,16 +89,14 @@ func AddProjectMember(member models.Member) (int, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
sql := "insert into project_member (project_id, entity_id , role, entity_type) values (?, ?, ?, ?)"
|
|
||||||
r, err := o.Raw(sql, member.ProjectID, member.EntityID, member.Role, member.EntityType).Exec()
|
var pmid int
|
||||||
|
sql := "insert into project_member (project_id, entity_id , role, entity_type) values (?, ?, ?, ?) RETURNING id"
|
||||||
|
err = o.Raw(sql, member.ProjectID, member.EntityID, member.Role, member.EntityType).QueryRow(&pmid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
pmid, err := r.LastInsertId()
|
return pmid, err
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return int(pmid), err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateProjectMemberRole updates the record in table project_member, only role can be changed
|
// UpdateProjectMemberRole updates the record in table project_member, only role can be changed
|
||||||
@ -127,9 +125,9 @@ func SearchMemberByName(projectID int64, entityName string) ([]*models.Member, e
|
|||||||
r.name as rolename,
|
r.name as rolename,
|
||||||
pm.role, pm.entity_id, pm.entity_type
|
pm.role, pm.entity_id, pm.entity_type
|
||||||
from project_member pm
|
from project_member pm
|
||||||
left join user u on pm.entity_id = u.user_id and pm.entity_type = 'u'
|
left join harbor_user u on pm.entity_id = u.user_id and pm.entity_type = 'u'
|
||||||
left join role r on pm.role = r.role_id
|
left join role r on pm.role = r.role_id
|
||||||
where u.deleted = 0 and pm.project_id = ? and u.username like ? order by entity_name )
|
where u.deleted = false and pm.project_id = ? and u.username like ? order by entity_name )
|
||||||
union
|
union
|
||||||
(select pm.id, pm.project_id,
|
(select pm.id, pm.project_id,
|
||||||
ug.group_name as entity_name,
|
ug.group_name as entity_name,
|
||||||
|
@ -31,33 +31,31 @@ import (
|
|||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
|
|
||||||
//databases := []string{"mysql", "sqlite"}
|
//databases := []string{"mysql", "sqlite"}
|
||||||
databases := []string{"mysql"}
|
databases := []string{"postgresql"}
|
||||||
for _, database := range databases {
|
for _, database := range databases {
|
||||||
log.Infof("run test cases for database: %s", database)
|
log.Infof("run test cases for database: %s", database)
|
||||||
|
|
||||||
result := 1
|
result := 1
|
||||||
switch database {
|
switch database {
|
||||||
case "mysql":
|
case "postgresql":
|
||||||
dao.PrepareTestForMySQL()
|
dao.PrepareTestForPostgresSQL()
|
||||||
case "sqlite":
|
|
||||||
dao.PrepareTestForSQLite()
|
|
||||||
default:
|
default:
|
||||||
log.Fatalf("invalid database: %s", database)
|
log.Fatalf("invalid database: %s", database)
|
||||||
}
|
}
|
||||||
|
|
||||||
//Extract to test utils
|
//Extract to test utils
|
||||||
initSqls := []string{
|
initSqls := []string{
|
||||||
"insert into user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')",
|
"insert into harbor_user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')",
|
||||||
"insert into project (name, owner_id) values ('member_test_01', 1)",
|
"insert into project (name, owner_id) values ('member_test_01', 1)",
|
||||||
"insert into user_group (group_name, group_type, ldap_group_dn) values ('test_group_01', 1, 'CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com')",
|
"insert into user_group (group_name, group_type, ldap_group_dn) values ('test_group_01', 1, 'CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com')",
|
||||||
"update project set owner_id = (select user_id from user where username = 'member_test_01') where name = 'member_test_01'",
|
"update project set owner_id = (select user_id from harbor_user where username = 'member_test_01') where name = 'member_test_01'",
|
||||||
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from user where username = 'member_test_01'), 'u', 1)",
|
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from harbor_user where username = 'member_test_01'), 'u', 1)",
|
||||||
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_group_01'), 'g', 1)",
|
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_group_01'), 'g', 1)",
|
||||||
}
|
}
|
||||||
|
|
||||||
clearSqls := []string{
|
clearSqls := []string{
|
||||||
"delete from project where name='member_test_01'",
|
"delete from project where name='member_test_01'",
|
||||||
"delete from user where username='member_test_01' or username='pm_sample'",
|
"delete from harbor_user where username='member_test_01' or username='pm_sample'",
|
||||||
"delete from user_group",
|
"delete from user_group",
|
||||||
"delete from project_member",
|
"delete from project_member",
|
||||||
}
|
}
|
||||||
|
@ -49,8 +49,8 @@ func TestDeleteProject(t *testing.T) {
|
|||||||
t.Fatalf("failed to get project: %v", err)
|
t.Fatalf("failed to get project: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.Deleted != 1 {
|
if !p.Deleted {
|
||||||
t.Errorf("unexpeced deleted column: %d != %d", p.Deleted, 1)
|
t.Errorf("unexpeced deleted column: %t != %t", p.Deleted, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
deletedName := fmt.Sprintf("%s#%d", name, id)
|
deletedName := fmt.Sprintf("%s#%d", name, id)
|
||||||
@ -96,16 +96,16 @@ func Test_projectQueryConditions(t *testing.T) {
|
|||||||
[]interface{}{}},
|
[]interface{}{}},
|
||||||
{"Query with valid projectID",
|
{"Query with valid projectID",
|
||||||
args{query: &models.ProjectQueryParam{ProjectIDs: []int64{2, 3}, Owner: "admin"}},
|
args{query: &models.ProjectQueryParam{ProjectIDs: []int64{2, 3}, Owner: "admin"}},
|
||||||
` from project as p join user u1
|
` from project as p join harbor_user u1
|
||||||
on p.owner_id = u1.user_id where p.deleted=0 and u1.username=? and p.project_id in ( ?,? ) order by p.name`,
|
on p.owner_id = u1.user_id where p.deleted=false and u1.username=? and p.project_id in ( ?,? )`,
|
||||||
[]interface{}{2, 3}},
|
[]interface{}{2, 3}},
|
||||||
{"Query with valid page and member",
|
{"Query with valid page and member",
|
||||||
args{query: &models.ProjectQueryParam{ProjectIDs: []int64{2, 3}, Owner: "admin", Name: "sample", Member: &models.MemberQuery{Name: "name", Role: 1}, Pagination: &models.Pagination{Page: 1, Size: 20}}},
|
args{query: &models.ProjectQueryParam{ProjectIDs: []int64{2, 3}, Owner: "admin", Name: "sample", Member: &models.MemberQuery{Name: "name", Role: 1}, Pagination: &models.Pagination{Page: 1, Size: 20}}},
|
||||||
` from project as p join user u1
|
` from project as p join harbor_user u1
|
||||||
on p.owner_id = u1.user_id join project_member pm
|
on p.owner_id = u1.user_id join project_member pm
|
||||||
on p.project_id = pm.project_id
|
on p.project_id = pm.project_id
|
||||||
join user u2
|
join harbor_user u2
|
||||||
on pm.entity_id=u2.user_id where p.deleted=0 and u1.username=? and p.name like ? and u2.username=? and pm.role = ? and p.project_id in ( ?,? ) order by p.name limit ? offset ?`,
|
on pm.entity_id=u2.user_id where p.deleted=false and u1.username=? and p.name like ? and u2.username=? and pm.role = ? and p.project_id in ( ?,? ) order by p.name limit ? offset ?`,
|
||||||
[]interface{}{1, []int64{2, 3}, 20, 0}},
|
[]interface{}{1, []int64{2, 3}, 20, 0}},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
@ -24,27 +24,21 @@ import (
|
|||||||
|
|
||||||
// Register is used for user to register, the password is encrypted before the record is inserted into database.
|
// Register is used for user to register, the password is encrypted before the record is inserted into database.
|
||||||
func Register(user models.User) (int64, error) {
|
func Register(user models.User) (int64, error) {
|
||||||
|
|
||||||
o := GetOrmer()
|
o := GetOrmer()
|
||||||
p, err := o.Raw("insert into user (username, password, realname, email, comment, salt, sysadmin_flag, creation_time, update_time) values (?, ?, ?, ?, ?, ?, ?, ?, ?)").Prepare()
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
defer p.Close()
|
|
||||||
|
|
||||||
salt := utils.GenerateRandomString()
|
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
r, err := p.Exec(user.Username, utils.Encrypt(user.Password, salt), user.Realname, user.Email, user.Comment, salt, user.HasAdminRole, now, now)
|
salt := utils.GenerateRandomString()
|
||||||
|
sql := `insert into harbor_user
|
||||||
|
(username, password, realname, email, comment, salt, sysadmin_flag, creation_time, update_time)
|
||||||
|
values (?, ?, ?, ?, ?, ?, ?, ?, ?) RETURNING user_id`
|
||||||
|
var userID int64
|
||||||
|
err := o.Raw(sql, user.Username, utils.Encrypt(user.Password, salt), user.Realname, user.Email,
|
||||||
|
user.Comment, salt, user.HasAdminRole, now, now).QueryRow(&userID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
userID, err := r.LastInsertId()
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return userID, nil
|
return userID, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// UserExists returns whether a user exists according username or Email.
|
// UserExists returns whether a user exists according username or Email.
|
||||||
@ -56,7 +50,7 @@ func UserExists(user models.User, target string) (bool, error) {
|
|||||||
|
|
||||||
o := GetOrmer()
|
o := GetOrmer()
|
||||||
|
|
||||||
sql := `select user_id from user where 1=1 `
|
sql := `select user_id from harbor_user where 1=1 `
|
||||||
queryParam := make([]interface{}, 1)
|
queryParam := make([]interface{}, 1)
|
||||||
|
|
||||||
switch target {
|
switch target {
|
||||||
|
@ -27,7 +27,15 @@ import (
|
|||||||
// AddRepTarget ...
|
// AddRepTarget ...
|
||||||
func AddRepTarget(target models.RepTarget) (int64, error) {
|
func AddRepTarget(target models.RepTarget) (int64, error) {
|
||||||
o := GetOrmer()
|
o := GetOrmer()
|
||||||
return o.Insert(&target)
|
|
||||||
|
sql := "insert into replication_target (name, url, username, password, insecure, target_type) values (?, ?, ?, ?, ?, ?) RETURNING id"
|
||||||
|
|
||||||
|
var targetID int64
|
||||||
|
err := o.Raw(sql, target.Name, target.URL, target.Username, target.Password, target.Insecure, target.Type).QueryRow(&targetID)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return targetID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRepTarget ...
|
// GetRepTarget ...
|
||||||
@ -75,8 +83,13 @@ func DeleteRepTarget(id int64) error {
|
|||||||
// UpdateRepTarget ...
|
// UpdateRepTarget ...
|
||||||
func UpdateRepTarget(target models.RepTarget) error {
|
func UpdateRepTarget(target models.RepTarget) error {
|
||||||
o := GetOrmer()
|
o := GetOrmer()
|
||||||
target.UpdateTime = time.Now()
|
|
||||||
_, err := o.Update(&target, "URL", "Name", "Username", "Password", "Insecure", "UpdateTime")
|
sql := `update replication_target
|
||||||
|
set url = ?, name = ?, username = ?, password = ?, insecure = ?, update_time = ?
|
||||||
|
where id = ?`
|
||||||
|
|
||||||
|
_, err := o.Raw(sql, target.URL, target.Name, target.Username, target.Password, target.Insecure, time.Now(), target.ID).Exec()
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -89,7 +102,7 @@ func FilterRepTargets(name string) ([]*models.RepTarget, error) {
|
|||||||
sql := `select * from replication_target `
|
sql := `select * from replication_target `
|
||||||
if len(name) != 0 {
|
if len(name) != 0 {
|
||||||
sql += `where name like ? `
|
sql += `where name like ? `
|
||||||
args = append(args, `%`+Escape(name)+`%`)
|
args = append(args, "%"+Escape(name)+"%")
|
||||||
}
|
}
|
||||||
sql += `order by creation_time`
|
sql += `order by creation_time`
|
||||||
|
|
||||||
@ -106,25 +119,27 @@ func FilterRepTargets(name string) ([]*models.RepTarget, error) {
|
|||||||
func AddRepPolicy(policy models.RepPolicy) (int64, error) {
|
func AddRepPolicy(policy models.RepPolicy) (int64, error) {
|
||||||
o := GetOrmer()
|
o := GetOrmer()
|
||||||
sql := `insert into replication_policy (name, project_id, target_id, enabled, description, cron_str, creation_time, update_time, filters, replicate_deletion)
|
sql := `insert into replication_policy (name, project_id, target_id, enabled, description, cron_str, creation_time, update_time, filters, replicate_deletion)
|
||||||
values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`
|
values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) RETURNING id`
|
||||||
params := []interface{}{}
|
params := []interface{}{}
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
params = append(params, policy.Name, policy.ProjectID, policy.TargetID, 1,
|
|
||||||
|
params = append(params, policy.Name, policy.ProjectID, policy.TargetID, true,
|
||||||
policy.Description, policy.Trigger, now, now, policy.Filters,
|
policy.Description, policy.Trigger, now, now, policy.Filters,
|
||||||
policy.ReplicateDeletion)
|
policy.ReplicateDeletion)
|
||||||
|
|
||||||
result, err := o.Raw(sql, params...).Exec()
|
var policyID int64
|
||||||
|
err := o.Raw(sql, params...).QueryRow(&policyID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return result.LastInsertId()
|
return policyID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRepPolicy ...
|
// GetRepPolicy ...
|
||||||
func GetRepPolicy(id int64) (*models.RepPolicy, error) {
|
func GetRepPolicy(id int64) (*models.RepPolicy, error) {
|
||||||
o := GetOrmer()
|
o := GetOrmer()
|
||||||
sql := `select * from replication_policy where id = ? and deleted = 0`
|
sql := `select * from replication_policy where id = ? and deleted = false`
|
||||||
|
|
||||||
var policy models.RepPolicy
|
var policy models.RepPolicy
|
||||||
|
|
||||||
@ -140,7 +155,7 @@ func GetRepPolicy(id int64) (*models.RepPolicy, error) {
|
|||||||
|
|
||||||
// GetTotalOfRepPolicies returns the total count of replication policies
|
// GetTotalOfRepPolicies returns the total count of replication policies
|
||||||
func GetTotalOfRepPolicies(name string, projectID int64) (int64, error) {
|
func GetTotalOfRepPolicies(name string, projectID int64) (int64, error) {
|
||||||
qs := GetOrmer().QueryTable(&models.RepPolicy{}).Filter("deleted", 0)
|
qs := GetOrmer().QueryTable(&models.RepPolicy{}).Filter("deleted", false)
|
||||||
|
|
||||||
if len(name) != 0 {
|
if len(name) != 0 {
|
||||||
qs = qs.Filter("name__icontains", name)
|
qs = qs.Filter("name__icontains", name)
|
||||||
@ -166,23 +181,23 @@ func FilterRepPolicies(name string, projectID, page, pageSize int64) ([]*models.
|
|||||||
count(rj.status) as error_job_count
|
count(rj.status) as error_job_count
|
||||||
from replication_policy rp
|
from replication_policy rp
|
||||||
left join replication_target rt on rp.target_id=rt.id
|
left join replication_target rt on rp.target_id=rt.id
|
||||||
left join replication_job rj on rp.id=rj.policy_id and (rj.status="error"
|
left join replication_job rj on rp.id=rj.policy_id and (rj.status='error'
|
||||||
or rj.status="retrying")
|
or rj.status='retrying')
|
||||||
where rp.deleted = 0 `
|
where rp.deleted = false `
|
||||||
|
|
||||||
if len(name) != 0 && projectID != 0 {
|
if len(name) != 0 && projectID != 0 {
|
||||||
sql += `and rp.name like ? and rp.project_id = ? `
|
sql += `and rp.name like ? and rp.project_id = ? `
|
||||||
args = append(args, `%`+Escape(name)+`%`)
|
args = append(args, "%"+Escape(name)+"%")
|
||||||
args = append(args, projectID)
|
args = append(args, projectID)
|
||||||
} else if len(name) != 0 {
|
} else if len(name) != 0 {
|
||||||
sql += `and rp.name like ? `
|
sql += `and rp.name like ? `
|
||||||
args = append(args, `%`+Escape(name)+`%`)
|
args = append(args, "%"+Escape(name)+"%")
|
||||||
} else if projectID != 0 {
|
} else if projectID != 0 {
|
||||||
sql += `and rp.project_id = ? `
|
sql += `and rp.project_id = ? `
|
||||||
args = append(args, projectID)
|
args = append(args, projectID)
|
||||||
}
|
}
|
||||||
|
|
||||||
sql += `group by rp.id order by rp.creation_time`
|
sql += `group by rt.name, rp.id order by rp.creation_time`
|
||||||
|
|
||||||
if page > 0 && pageSize > 0 {
|
if page > 0 && pageSize > 0 {
|
||||||
sql += ` limit ? offset ?`
|
sql += ` limit ? offset ?`
|
||||||
@ -200,7 +215,7 @@ func FilterRepPolicies(name string, projectID, page, pageSize int64) ([]*models.
|
|||||||
// GetRepPolicyByName ...
|
// GetRepPolicyByName ...
|
||||||
func GetRepPolicyByName(name string) (*models.RepPolicy, error) {
|
func GetRepPolicyByName(name string) (*models.RepPolicy, error) {
|
||||||
o := GetOrmer()
|
o := GetOrmer()
|
||||||
sql := `select * from replication_policy where deleted = 0 and name = ?`
|
sql := `select * from replication_policy where deleted = false and name = ?`
|
||||||
|
|
||||||
var policy models.RepPolicy
|
var policy models.RepPolicy
|
||||||
|
|
||||||
@ -217,7 +232,7 @@ func GetRepPolicyByName(name string) (*models.RepPolicy, error) {
|
|||||||
// GetRepPolicyByProject ...
|
// GetRepPolicyByProject ...
|
||||||
func GetRepPolicyByProject(projectID int64) ([]*models.RepPolicy, error) {
|
func GetRepPolicyByProject(projectID int64) ([]*models.RepPolicy, error) {
|
||||||
o := GetOrmer()
|
o := GetOrmer()
|
||||||
sql := `select * from replication_policy where deleted = 0 and project_id = ?`
|
sql := `select * from replication_policy where deleted = false and project_id = ?`
|
||||||
|
|
||||||
var policies []*models.RepPolicy
|
var policies []*models.RepPolicy
|
||||||
|
|
||||||
@ -231,7 +246,7 @@ func GetRepPolicyByProject(projectID int64) ([]*models.RepPolicy, error) {
|
|||||||
// GetRepPolicyByTarget ...
|
// GetRepPolicyByTarget ...
|
||||||
func GetRepPolicyByTarget(targetID int64) ([]*models.RepPolicy, error) {
|
func GetRepPolicyByTarget(targetID int64) ([]*models.RepPolicy, error) {
|
||||||
o := GetOrmer()
|
o := GetOrmer()
|
||||||
sql := `select * from replication_policy where deleted = 0 and target_id = ?`
|
sql := `select * from replication_policy where deleted = false and target_id = ?`
|
||||||
|
|
||||||
var policies []*models.RepPolicy
|
var policies []*models.RepPolicy
|
||||||
|
|
||||||
@ -245,7 +260,7 @@ func GetRepPolicyByTarget(targetID int64) ([]*models.RepPolicy, error) {
|
|||||||
// GetRepPolicyByProjectAndTarget ...
|
// GetRepPolicyByProjectAndTarget ...
|
||||||
func GetRepPolicyByProjectAndTarget(projectID, targetID int64) ([]*models.RepPolicy, error) {
|
func GetRepPolicyByProjectAndTarget(projectID, targetID int64) ([]*models.RepPolicy, error) {
|
||||||
o := GetOrmer()
|
o := GetOrmer()
|
||||||
sql := `select * from replication_policy where deleted = 0 and project_id = ? and target_id = ?`
|
sql := `select * from replication_policy where deleted = false and project_id = ? and target_id = ?`
|
||||||
|
|
||||||
var policies []*models.RepPolicy
|
var policies []*models.RepPolicy
|
||||||
|
|
||||||
@ -259,9 +274,13 @@ func GetRepPolicyByProjectAndTarget(projectID, targetID int64) ([]*models.RepPol
|
|||||||
// UpdateRepPolicy ...
|
// UpdateRepPolicy ...
|
||||||
func UpdateRepPolicy(policy *models.RepPolicy) error {
|
func UpdateRepPolicy(policy *models.RepPolicy) error {
|
||||||
o := GetOrmer()
|
o := GetOrmer()
|
||||||
policy.UpdateTime = time.Now()
|
|
||||||
_, err := o.Update(policy, "ProjectID", "TargetID", "Name", "Description",
|
sql := `update replication_policy
|
||||||
"Trigger", "Filters", "ReplicateDeletion", "UpdateTime")
|
set project_id = ?, target_id = ?, name = ?, description = ?, cron_str = ?, filters = ?, replicate_deletion = ?, update_time = ?
|
||||||
|
where id = ?`
|
||||||
|
|
||||||
|
_, err := o.Raw(sql, policy.ProjectID, policy.TargetID, policy.Name, policy.Description, policy.Trigger, policy.Filters, policy.ReplicateDeletion, time.Now(), policy.ID).Exec()
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -270,7 +289,7 @@ func DeleteRepPolicy(id int64) error {
|
|||||||
o := GetOrmer()
|
o := GetOrmer()
|
||||||
policy := &models.RepPolicy{
|
policy := &models.RepPolicy{
|
||||||
ID: id,
|
ID: id,
|
||||||
Deleted: 1,
|
Deleted: true,
|
||||||
UpdateTime: time.Now(),
|
UpdateTime: time.Now(),
|
||||||
}
|
}
|
||||||
_, err := o.Update(policy, "Deleted")
|
_, err := o.Update(policy, "Deleted")
|
||||||
|
@ -70,7 +70,7 @@ func IsAdminRole(userIDOrUsername interface{}) (bool, error) {
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return user.HasAdminRole == 1, nil
|
return user.HasAdminRole, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRoleByID ...
|
// GetRoleByID ...
|
||||||
|
@ -27,32 +27,40 @@ var defaultRegistered = false
|
|||||||
|
|
||||||
// PrepareTestForMySQL is for test only.
|
// PrepareTestForMySQL is for test only.
|
||||||
func PrepareTestForMySQL() {
|
func PrepareTestForMySQL() {
|
||||||
dbHost := os.Getenv("MYSQL_HOST")
|
}
|
||||||
|
|
||||||
|
// PrepareTestForSQLite is for test only.
|
||||||
|
func PrepareTestForSQLite() {
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrepareTestForPostgresSQL is for test only.
|
||||||
|
func PrepareTestForPostgresSQL() {
|
||||||
|
dbHost := os.Getenv("POSTGRESQL_HOST")
|
||||||
if len(dbHost) == 0 {
|
if len(dbHost) == 0 {
|
||||||
log.Fatalf("environment variable MYSQL_HOST is not set")
|
log.Fatalf("environment variable POSTGRESQL_HOST is not set")
|
||||||
}
|
}
|
||||||
dbUser := os.Getenv("MYSQL_USR")
|
dbUser := os.Getenv("POSTGRESQL_USR")
|
||||||
if len(dbUser) == 0 {
|
if len(dbUser) == 0 {
|
||||||
log.Fatalf("environment variable MYSQL_USR is not set")
|
log.Fatalf("environment variable POSTGRESQL_USR is not set")
|
||||||
}
|
}
|
||||||
dbPortStr := os.Getenv("MYSQL_PORT")
|
dbPortStr := os.Getenv("POSTGRESQL_PORT")
|
||||||
if len(dbPortStr) == 0 {
|
if len(dbPortStr) == 0 {
|
||||||
log.Fatalf("environment variable MYSQL_PORT is not set")
|
log.Fatalf("environment variable POSTGRESQL_PORT is not set")
|
||||||
}
|
}
|
||||||
dbPort, err := strconv.Atoi(dbPortStr)
|
dbPort, err := strconv.Atoi(dbPortStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("invalid MYSQL_PORT: %v", err)
|
log.Fatalf("invalid POSTGRESQL_PORT: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
dbPassword := os.Getenv("MYSQL_PWD")
|
dbPassword := os.Getenv("POSTGRESQL_PWD")
|
||||||
dbDatabase := os.Getenv("MYSQL_DATABASE")
|
dbDatabase := os.Getenv("POSTGRESQL_DATABASE")
|
||||||
if len(dbDatabase) == 0 {
|
if len(dbDatabase) == 0 {
|
||||||
log.Fatalf("environment variable MYSQL_DATABASE is not set")
|
log.Fatalf("environment variable POSTGRESQL_DATABASE is not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
database := &models.Database{
|
database := &models.Database{
|
||||||
Type: "mysql",
|
Type: "postgresql",
|
||||||
MySQL: &models.MySQL{
|
PostGreSQL: &models.PostGreSQL{
|
||||||
Host: dbHost,
|
Host: dbHost,
|
||||||
Port: dbPort,
|
Port: dbPort,
|
||||||
Username: dbUser,
|
Username: dbUser,
|
||||||
@ -61,23 +69,7 @@ func PrepareTestForMySQL() {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("MYSQL_HOST: %s, MYSQL_USR: %s, MYSQL_PORT: %d, MYSQL_PWD: %s\n", dbHost, dbUser, dbPort, dbPassword)
|
log.Infof("POSTGRES_HOST: %s, POSTGRES_USR: %s, POSTGRES_PORT: %d, POSTGRES_PWD: %s\n", dbHost, dbUser, dbPort, dbPassword)
|
||||||
initDatabaseForTest(database)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrepareTestForSQLite is for test only.
|
|
||||||
func PrepareTestForSQLite() {
|
|
||||||
file := os.Getenv("SQLITE_FILE")
|
|
||||||
if len(file) == 0 {
|
|
||||||
log.Fatalf("environment variable SQLITE_FILE is not set")
|
|
||||||
}
|
|
||||||
|
|
||||||
database := &models.Database{
|
|
||||||
Type: "sqlite",
|
|
||||||
SQLite: &models.SQLite{
|
|
||||||
File: file,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
initDatabaseForTest(database)
|
initDatabaseForTest(database)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -34,8 +34,8 @@ func GetUser(query models.User) (*models.User, error) {
|
|||||||
|
|
||||||
sql := `select user_id, username, email, realname, comment, reset_uuid, salt,
|
sql := `select user_id, username, email, realname, comment, reset_uuid, salt,
|
||||||
sysadmin_flag, creation_time, update_time
|
sysadmin_flag, creation_time, update_time
|
||||||
from user u
|
from harbor_user u
|
||||||
where deleted = 0 `
|
where deleted = false `
|
||||||
queryParam := make([]interface{}, 1)
|
queryParam := make([]interface{}, 1)
|
||||||
if query.UserID != 0 {
|
if query.UserID != 0 {
|
||||||
sql += ` and user_id = ? `
|
sql += ` and user_id = ? `
|
||||||
@ -79,7 +79,7 @@ func LoginByDb(auth models.AuthModel) (*models.User, error) {
|
|||||||
o := GetOrmer()
|
o := GetOrmer()
|
||||||
|
|
||||||
var users []models.User
|
var users []models.User
|
||||||
n, err := o.Raw(`select * from user where (username = ? or email = ?) and deleted = 0`,
|
n, err := o.Raw(`select * from harbor_user where (username = ? or email = ?) and deleted = false`,
|
||||||
auth.Principal, auth.Principal).QueryRows(&users)
|
auth.Principal, auth.Principal).QueryRows(&users)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -134,10 +134,10 @@ func userQueryConditions(query *models.UserQuery) orm.QuerySeter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ToggleUserAdminRole gives a user admin role.
|
// ToggleUserAdminRole gives a user admin role.
|
||||||
func ToggleUserAdminRole(userID, hasAdmin int) error {
|
func ToggleUserAdminRole(userID int, hasAdmin bool) error {
|
||||||
o := GetOrmer()
|
o := GetOrmer()
|
||||||
queryParams := make([]interface{}, 1)
|
queryParams := make([]interface{}, 1)
|
||||||
sql := `update user set sysadmin_flag = ? where user_id = ?`
|
sql := `update harbor_user set sysadmin_flag = ? where user_id = ?`
|
||||||
queryParams = append(queryParams, hasAdmin)
|
queryParams = append(queryParams, hasAdmin)
|
||||||
queryParams = append(queryParams, userID)
|
queryParams = append(queryParams, userID)
|
||||||
r, err := o.Raw(sql, queryParams).Exec()
|
r, err := o.Raw(sql, queryParams).Exec()
|
||||||
@ -164,9 +164,9 @@ func ChangeUserPassword(u models.User, oldPassword ...string) (err error) {
|
|||||||
salt := utils.GenerateRandomString()
|
salt := utils.GenerateRandomString()
|
||||||
if len(oldPassword) == 0 {
|
if len(oldPassword) == 0 {
|
||||||
//In some cases, it may no need to check old password, just as Linux change password policies.
|
//In some cases, it may no need to check old password, just as Linux change password policies.
|
||||||
r, err = o.Raw(`update user set password=?, salt=? where user_id=?`, utils.Encrypt(u.Password, salt), salt, u.UserID).Exec()
|
r, err = o.Raw(`update harbor_user set password=?, salt=? where user_id=?`, utils.Encrypt(u.Password, salt), salt, u.UserID).Exec()
|
||||||
} else {
|
} else {
|
||||||
r, err = o.Raw(`update user set password=?, salt=? where user_id=? and password = ?`, utils.Encrypt(u.Password, salt), salt, u.UserID, utils.Encrypt(oldPassword[0], u.Salt)).Exec()
|
r, err = o.Raw(`update harbor_user set password=?, salt=? where user_id=? and password = ?`, utils.Encrypt(u.Password, salt), salt, u.UserID, utils.Encrypt(oldPassword[0], u.Salt)).Exec()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -186,7 +186,7 @@ func ChangeUserPassword(u models.User, oldPassword ...string) (err error) {
|
|||||||
// ResetUserPassword ...
|
// ResetUserPassword ...
|
||||||
func ResetUserPassword(u models.User) error {
|
func ResetUserPassword(u models.User) error {
|
||||||
o := GetOrmer()
|
o := GetOrmer()
|
||||||
r, err := o.Raw(`update user set password=?, reset_uuid=? where reset_uuid=?`, utils.Encrypt(u.Password, u.Salt), "", u.ResetUUID).Exec()
|
r, err := o.Raw(`update harbor_user set password=?, reset_uuid=? where reset_uuid=?`, utils.Encrypt(u.Password, u.Salt), "", u.ResetUUID).Exec()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -203,7 +203,7 @@ func ResetUserPassword(u models.User) error {
|
|||||||
// UpdateUserResetUUID ...
|
// UpdateUserResetUUID ...
|
||||||
func UpdateUserResetUUID(u models.User) error {
|
func UpdateUserResetUUID(u models.User) error {
|
||||||
o := GetOrmer()
|
o := GetOrmer()
|
||||||
_, err := o.Raw(`update user set reset_uuid=? where email=?`, u.ResetUUID, u.Email).Exec()
|
_, err := o.Raw(`update harbor_user set reset_uuid=? where email=?`, u.ResetUUID, u.Email).Exec()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -218,7 +218,7 @@ func CheckUserPassword(query models.User) (*models.User, error) {
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
sql := `select user_id, username, salt from user where deleted = 0 and username = ? and password = ?`
|
sql := `select user_id, username, salt from harbor_user where deleted = false and username = ? and password = ?`
|
||||||
queryParam := make([]interface{}, 1)
|
queryParam := make([]interface{}, 1)
|
||||||
queryParam = append(queryParam, currentUser.Username)
|
queryParam = append(queryParam, currentUser.Username)
|
||||||
queryParam = append(queryParam, utils.Encrypt(query.Password, currentUser.Salt))
|
queryParam = append(queryParam, utils.Encrypt(query.Password, currentUser.Salt))
|
||||||
@ -251,8 +251,8 @@ func DeleteUser(userID int) error {
|
|||||||
name := fmt.Sprintf("%s#%d", user.Username, user.UserID)
|
name := fmt.Sprintf("%s#%d", user.Username, user.UserID)
|
||||||
email := fmt.Sprintf("%s#%d", user.Email, user.UserID)
|
email := fmt.Sprintf("%s#%d", user.Email, user.UserID)
|
||||||
|
|
||||||
_, err = o.Raw(`update user
|
_, err = o.Raw(`update harbor_user
|
||||||
set deleted = 1, username = ?, email = ?
|
set deleted = true, username = ?, email = ?
|
||||||
where user_id = ?`, name, email, userID).Exec()
|
where user_id = ?`, name, email, userID).Exec()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -50,13 +50,13 @@ func TestDeleteUser(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
user := &models.User{}
|
user := &models.User{}
|
||||||
sql := "select * from user where user_id = ?"
|
sql := "select * from harbor_user where user_id = ?"
|
||||||
if err = GetOrmer().Raw(sql, id).
|
if err = GetOrmer().Raw(sql, id).
|
||||||
QueryRow(user); err != nil {
|
QueryRow(user); err != nil {
|
||||||
t.Fatalf("failed to query user: %v", err)
|
t.Fatalf("failed to query user: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if user.Deleted != 1 {
|
if user.Deleted != true {
|
||||||
t.Error("user is not deleted")
|
t.Error("user is not deleted")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -35,10 +35,19 @@ type DatabaseWatchItemDAO struct{}
|
|||||||
|
|
||||||
// Add a WatchItem
|
// Add a WatchItem
|
||||||
func (d *DatabaseWatchItemDAO) Add(item *models.WatchItem) (int64, error) {
|
func (d *DatabaseWatchItemDAO) Add(item *models.WatchItem) (int64, error) {
|
||||||
|
o := GetOrmer()
|
||||||
|
|
||||||
|
var triggerID int64
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
item.CreationTime = now
|
|
||||||
item.UpdateTime = now
|
sql := "insert into replication_immediate_trigger (policy_id, namespace, on_deletion, on_push, creation_time, update_time) values (?, ?, ?, ?, ?, ?) RETURNING id"
|
||||||
return GetOrmer().Insert(item)
|
|
||||||
|
err := o.Raw(sql, item.PolicyID, item.Namespace, item.OnDeletion, item.OnPush, now, now).QueryRow(&triggerID)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return triggerID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteByPolicyID deletes the WatchItem specified by policy ID
|
// DeleteByPolicyID deletes the WatchItem specified by policy ID
|
||||||
@ -51,9 +60,9 @@ func (d *DatabaseWatchItemDAO) DeleteByPolicyID(policyID int64) error {
|
|||||||
func (d *DatabaseWatchItemDAO) Get(namespace, operation string) ([]models.WatchItem, error) {
|
func (d *DatabaseWatchItemDAO) Get(namespace, operation string) ([]models.WatchItem, error) {
|
||||||
qs := GetOrmer().QueryTable(&models.WatchItem{}).Filter("Namespace", namespace)
|
qs := GetOrmer().QueryTable(&models.WatchItem{}).Filter("Namespace", namespace)
|
||||||
if operation == "push" {
|
if operation == "push" {
|
||||||
qs = qs.Filter("OnPush", 1)
|
qs = qs.Filter("OnPush", true)
|
||||||
} else if operation == "delete" {
|
} else if operation == "delete" {
|
||||||
qs = qs.Filter("OnDeletion", 1)
|
qs = qs.Filter("OnDeletion", true)
|
||||||
}
|
}
|
||||||
|
|
||||||
items := []models.WatchItem{}
|
items := []models.WatchItem{}
|
||||||
|
@ -25,7 +25,7 @@ type AccessLog struct {
|
|||||||
ProjectID int64 `orm:"column(project_id)" json:"project_id"`
|
ProjectID int64 `orm:"column(project_id)" json:"project_id"`
|
||||||
RepoName string `orm:"column(repo_name)" json:"repo_name"`
|
RepoName string `orm:"column(repo_name)" json:"repo_name"`
|
||||||
RepoTag string `orm:"column(repo_tag)" json:"repo_tag"`
|
RepoTag string `orm:"column(repo_tag)" json:"repo_tag"`
|
||||||
GUID string `orm:"column(GUID)" json:"guid"`
|
GUID string `orm:"column(guid)" json:"guid"`
|
||||||
Operation string `orm:"column(operation)" json:"operation"`
|
Operation string `orm:"column(operation)" json:"operation"`
|
||||||
OpTime time.Time `orm:"column(op_time)" json:"op_time"`
|
OpTime time.Time `orm:"column(op_time)" json:"op_time"`
|
||||||
}
|
}
|
||||||
|
@ -25,9 +25,8 @@ type Authentication struct {
|
|||||||
|
|
||||||
// Database ...
|
// Database ...
|
||||||
type Database struct {
|
type Database struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
MySQL *MySQL `json:"mysql,omitempty"`
|
PostGreSQL *PostGreSQL `json:"postgresql,omitempty"`
|
||||||
SQLite *SQLite `json:"sqlite,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// MySQL ...
|
// MySQL ...
|
||||||
|
@ -40,5 +40,5 @@ type ProjectMetadata struct {
|
|||||||
Value string `orm:"column(value)" json:"value"`
|
Value string `orm:"column(value)" json:"value"`
|
||||||
CreationTime time.Time `orm:"column(creation_time)" json:"creation_time"`
|
CreationTime time.Time `orm:"column(creation_time)" json:"creation_time"`
|
||||||
UpdateTime time.Time `orm:"column(update_time)" json:"update_time"`
|
UpdateTime time.Time `orm:"column(update_time)" json:"update_time"`
|
||||||
Deleted int `orm:"column(deleted)" json:"deleted"`
|
Deleted bool `orm:"column(deleted)" json:"deleted"`
|
||||||
}
|
}
|
||||||
|
@ -29,7 +29,7 @@ type Project struct {
|
|||||||
Name string `orm:"column(name)" json:"name"`
|
Name string `orm:"column(name)" json:"name"`
|
||||||
CreationTime time.Time `orm:"column(creation_time)" json:"creation_time"`
|
CreationTime time.Time `orm:"column(creation_time)" json:"creation_time"`
|
||||||
UpdateTime time.Time `orm:"column(update_time)" json:"update_time"`
|
UpdateTime time.Time `orm:"column(update_time)" json:"update_time"`
|
||||||
Deleted int `orm:"column(deleted)" json:"deleted"`
|
Deleted bool `orm:"column(deleted)" json:"deleted"`
|
||||||
OwnerName string `orm:"-" json:"owner_name"`
|
OwnerName string `orm:"-" json:"owner_name"`
|
||||||
Togglable bool `orm:"-" json:"togglable"`
|
Togglable bool `orm:"-" json:"togglable"`
|
||||||
Role int `orm:"-" json:"current_user_role_id"`
|
Role int `orm:"-" json:"current_user_role_id"`
|
||||||
|
@ -48,7 +48,7 @@ type RepPolicy struct {
|
|||||||
ReplicateDeletion bool `orm:"column(replicate_deletion)"`
|
ReplicateDeletion bool `orm:"column(replicate_deletion)"`
|
||||||
CreationTime time.Time `orm:"column(creation_time);auto_now_add"`
|
CreationTime time.Time `orm:"column(creation_time);auto_now_add"`
|
||||||
UpdateTime time.Time `orm:"column(update_time);auto_now"`
|
UpdateTime time.Time `orm:"column(update_time);auto_now"`
|
||||||
Deleted int `orm:"column(deleted)"`
|
Deleted bool `orm:"column(deleted)"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// RepJob is the model for a replication job, which is the execution unit on job service, currently it is used to transfer/remove
|
// RepJob is the model for a replication job, which is the execution unit on job service, currently it is used to transfer/remove
|
||||||
|
@ -19,7 +19,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// UserTable is the name of table in DB that holds the user object
|
// UserTable is the name of table in DB that holds the user object
|
||||||
const UserTable = "user"
|
const UserTable = "harbor_user"
|
||||||
|
|
||||||
// User holds the details of a user.
|
// User holds the details of a user.
|
||||||
type User struct {
|
type User struct {
|
||||||
@ -29,13 +29,13 @@ type User struct {
|
|||||||
Password string `orm:"column(password)" json:"password"`
|
Password string `orm:"column(password)" json:"password"`
|
||||||
Realname string `orm:"column(realname)" json:"realname"`
|
Realname string `orm:"column(realname)" json:"realname"`
|
||||||
Comment string `orm:"column(comment)" json:"comment"`
|
Comment string `orm:"column(comment)" json:"comment"`
|
||||||
Deleted int `orm:"column(deleted)" json:"deleted"`
|
Deleted bool `orm:"column(deleted)" json:"deleted"`
|
||||||
Rolename string `orm:"-" json:"role_name"`
|
Rolename string `orm:"-" json:"role_name"`
|
||||||
//if this field is named as "RoleID", beego orm can not map role_id
|
//if this field is named as "RoleID", beego orm can not map role_id
|
||||||
//to it.
|
//to it.
|
||||||
Role int `orm:"-" json:"role_id"`
|
Role int `orm:"-" json:"role_id"`
|
||||||
// RoleList []Role `json:"role_list"`
|
// RoleList []Role `json:"role_list"`
|
||||||
HasAdminRole int `orm:"column(sysadmin_flag)" json:"has_admin_role"`
|
HasAdminRole bool `orm:"column(sysadmin_flag)" json:"has_admin_role"`
|
||||||
ResetUUID string `orm:"column(reset_uuid)" json:"reset_uuid"`
|
ResetUUID string `orm:"column(reset_uuid)" json:"reset_uuid"`
|
||||||
Salt string `orm:"column(salt)" json:"-"`
|
Salt string `orm:"column(salt)" json:"-"`
|
||||||
CreationTime time.Time `orm:"column(creation_time)" json:"creation_time"`
|
CreationTime time.Time `orm:"column(creation_time)" json:"creation_time"`
|
||||||
|
@ -56,7 +56,7 @@ func (s *SecurityContext) IsSysAdmin() bool {
|
|||||||
if !s.IsAuthenticated() {
|
if !s.IsAuthenticated() {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return s.user.HasAdminRole == 1
|
return s.user.HasAdminRole
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsSolutionUser ...
|
// IsSolutionUser ...
|
||||||
|
@ -53,32 +53,32 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
dbHost := os.Getenv("MYSQL_HOST")
|
dbHost := os.Getenv("POSTGRESQL_HOST")
|
||||||
if len(dbHost) == 0 {
|
if len(dbHost) == 0 {
|
||||||
log.Fatalf("environment variable MYSQL_HOST is not set")
|
log.Fatalf("environment variable POSTGRES_HOST is not set")
|
||||||
}
|
}
|
||||||
dbPortStr := os.Getenv("MYSQL_PORT")
|
dbUser := os.Getenv("POSTGRESQL_USR")
|
||||||
|
if len(dbUser) == 0 {
|
||||||
|
log.Fatalf("environment variable POSTGRES_USR is not set")
|
||||||
|
}
|
||||||
|
dbPortStr := os.Getenv("POSTGRESQL_PORT")
|
||||||
if len(dbPortStr) == 0 {
|
if len(dbPortStr) == 0 {
|
||||||
log.Fatalf("environment variable MYSQL_PORT is not set")
|
log.Fatalf("environment variable POSTGRES_PORT is not set")
|
||||||
}
|
}
|
||||||
dbPort, err := strconv.Atoi(dbPortStr)
|
dbPort, err := strconv.Atoi(dbPortStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("invalid MYSQL_PORT: %v", err)
|
log.Fatalf("invalid POSTGRESQL_PORT: %v", err)
|
||||||
}
|
|
||||||
dbUser := os.Getenv("MYSQL_USR")
|
|
||||||
if len(dbUser) == 0 {
|
|
||||||
log.Fatalf("environment variable MYSQL_USR is not set")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dbPassword := os.Getenv("MYSQL_PWD")
|
dbPassword := os.Getenv("POSTGRESQL_PWD")
|
||||||
dbDatabase := os.Getenv("MYSQL_DATABASE")
|
dbDatabase := os.Getenv("POSTGRESQL_DATABASE")
|
||||||
if len(dbDatabase) == 0 {
|
if len(dbDatabase) == 0 {
|
||||||
log.Fatalf("environment variable MYSQL_DATABASE is not set")
|
log.Fatalf("environment variable POSTGRESQL_DATABASE is not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
database := &models.Database{
|
database := &models.Database{
|
||||||
Type: "mysql",
|
Type: "postgresql",
|
||||||
MySQL: &models.MySQL{
|
PostGreSQL: &models.PostGreSQL{
|
||||||
Host: dbHost,
|
Host: dbHost,
|
||||||
Port: dbPort,
|
Port: dbPort,
|
||||||
Username: dbUser,
|
Username: dbUser,
|
||||||
@ -87,7 +87,7 @@ func TestMain(m *testing.M) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("MYSQL_HOST: %s, MYSQL_USR: %s, MYSQL_PORT: %d, MYSQL_PWD: %s\n", dbHost, dbUser, dbPort, dbPassword)
|
log.Infof("POSTGRES_HOST: %s, POSTGRES_USR: %s, POSTGRES_PORT: %d, POSTGRES_PWD: %s\n", dbHost, dbUser, dbPort, dbPassword)
|
||||||
|
|
||||||
if err := dao.InitDatabase(database); err != nil {
|
if err := dao.InitDatabase(database); err != nil {
|
||||||
log.Fatalf("failed to initialize database: %v", err)
|
log.Fatalf("failed to initialize database: %v", err)
|
||||||
@ -197,7 +197,7 @@ func TestIsSysAdmin(t *testing.T) {
|
|||||||
// authenticated, admin
|
// authenticated, admin
|
||||||
ctx = NewSecurityContext(&models.User{
|
ctx = NewSecurityContext(&models.User{
|
||||||
Username: "test",
|
Username: "test",
|
||||||
HasAdminRole: 1,
|
HasAdminRole: true,
|
||||||
}, nil)
|
}, nil)
|
||||||
assert.True(t, ctx.IsSysAdmin())
|
assert.True(t, ctx.IsSysAdmin())
|
||||||
}
|
}
|
||||||
@ -229,7 +229,7 @@ func TestHasReadPerm(t *testing.T) {
|
|||||||
// private project, authenticated, system admin
|
// private project, authenticated, system admin
|
||||||
ctx = NewSecurityContext(&models.User{
|
ctx = NewSecurityContext(&models.User{
|
||||||
Username: "admin",
|
Username: "admin",
|
||||||
HasAdminRole: 1,
|
HasAdminRole: true,
|
||||||
}, pm)
|
}, pm)
|
||||||
assert.True(t, ctx.HasReadPerm(private.Name))
|
assert.True(t, ctx.HasReadPerm(private.Name))
|
||||||
}
|
}
|
||||||
@ -250,7 +250,7 @@ func TestHasWritePerm(t *testing.T) {
|
|||||||
// authenticated, system admin
|
// authenticated, system admin
|
||||||
ctx = NewSecurityContext(&models.User{
|
ctx = NewSecurityContext(&models.User{
|
||||||
Username: "admin",
|
Username: "admin",
|
||||||
HasAdminRole: 1,
|
HasAdminRole: true,
|
||||||
}, pm)
|
}, pm)
|
||||||
assert.True(t, ctx.HasReadPerm(private.Name))
|
assert.True(t, ctx.HasReadPerm(private.Name))
|
||||||
}
|
}
|
||||||
@ -267,7 +267,7 @@ func TestHasAllPerm(t *testing.T) {
|
|||||||
// authenticated, system admin
|
// authenticated, system admin
|
||||||
ctx = NewSecurityContext(&models.User{
|
ctx = NewSecurityContext(&models.User{
|
||||||
Username: "admin",
|
Username: "admin",
|
||||||
HasAdminRole: 1,
|
HasAdminRole: true,
|
||||||
}, pm)
|
}, pm)
|
||||||
assert.True(t, ctx.HasAllPerm(private.Name))
|
assert.True(t, ctx.HasAllPerm(private.Name))
|
||||||
}
|
}
|
||||||
|
@ -28,15 +28,14 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var adminServerLdapTestConfig = map[string]interface{}{
|
var adminServerLdapTestConfig = map[string]interface{}{
|
||||||
common.ExtEndpoint: "host01.com",
|
common.ExtEndpoint: "host01.com",
|
||||||
common.AUTHMode: "ldap_auth",
|
common.AUTHMode: "ldap_auth",
|
||||||
common.DatabaseType: "mysql",
|
common.DatabaseType: "postgresql",
|
||||||
common.MySQLHost: "127.0.0.1",
|
common.PostGreSQLHOST: "127.0.0.1",
|
||||||
common.MySQLPort: 3306,
|
common.PostGreSQLPort: 5432,
|
||||||
common.MySQLUsername: "root",
|
common.PostGreSQLUsername: "postgres",
|
||||||
common.MySQLPassword: "root123",
|
common.PostGreSQLPassword: "root123",
|
||||||
common.MySQLDatabase: "registry",
|
common.PostGreSQLDatabase: "registry",
|
||||||
common.SQLiteFile: "/tmp/registry.db",
|
|
||||||
//config.SelfRegistration: true,
|
//config.SelfRegistration: true,
|
||||||
common.LDAPURL: "ldap://127.0.0.1",
|
common.LDAPURL: "ldap://127.0.0.1",
|
||||||
common.LDAPSearchDN: "cn=admin,dc=example,dc=com",
|
common.LDAPSearchDN: "cn=admin,dc=example,dc=com",
|
||||||
@ -53,13 +52,12 @@ var adminServerLdapTestConfig = map[string]interface{}{
|
|||||||
var adminServerDefaultConfigWithVerifyCert = map[string]interface{}{
|
var adminServerDefaultConfigWithVerifyCert = map[string]interface{}{
|
||||||
common.ExtEndpoint: "https://host01.com",
|
common.ExtEndpoint: "https://host01.com",
|
||||||
common.AUTHMode: common.LDAPAuth,
|
common.AUTHMode: common.LDAPAuth,
|
||||||
common.DatabaseType: "mysql",
|
common.DatabaseType: "postgresql",
|
||||||
common.MySQLHost: "127.0.0.1",
|
common.PostGreSQLHOST: "127.0.0.1",
|
||||||
common.MySQLPort: 3306,
|
common.PostGreSQLPort: 5432,
|
||||||
common.MySQLUsername: "root",
|
common.PostGreSQLUsername: "postgres",
|
||||||
common.MySQLPassword: "root123",
|
common.PostGreSQLPassword: "root123",
|
||||||
common.MySQLDatabase: "registry",
|
common.PostGreSQLDatabase: "registry",
|
||||||
common.SQLiteFile: "/tmp/registry.db",
|
|
||||||
common.SelfRegistration: true,
|
common.SelfRegistration: true,
|
||||||
common.LDAPURL: "ldap://127.0.0.1:389",
|
common.LDAPURL: "ldap://127.0.0.1:389",
|
||||||
common.LDAPSearchDN: "cn=admin,dc=example,dc=com",
|
common.LDAPSearchDN: "cn=admin,dc=example,dc=com",
|
||||||
|
@ -26,13 +26,12 @@ import (
|
|||||||
var adminServerDefaultConfig = map[string]interface{}{
|
var adminServerDefaultConfig = map[string]interface{}{
|
||||||
common.ExtEndpoint: "https://host01.com",
|
common.ExtEndpoint: "https://host01.com",
|
||||||
common.AUTHMode: common.DBAuth,
|
common.AUTHMode: common.DBAuth,
|
||||||
common.DatabaseType: "mysql",
|
common.DatabaseType: "postgresql",
|
||||||
common.MySQLHost: "127.0.0.1",
|
common.PostGreSQLHOST: "127.0.0.1",
|
||||||
common.MySQLPort: 3306,
|
common.PostGreSQLPort: 5432,
|
||||||
common.MySQLUsername: "user01",
|
common.PostGreSQLUsername: "postgres",
|
||||||
common.MySQLPassword: "password",
|
common.PostGreSQLPassword: "root123",
|
||||||
common.MySQLDatabase: "registry",
|
common.PostGreSQLDatabase: "registry",
|
||||||
common.SQLiteFile: "/tmp/registry.db",
|
|
||||||
common.SelfRegistration: true,
|
common.SelfRegistration: true,
|
||||||
common.LDAPURL: "ldap://127.0.0.1",
|
common.LDAPURL: "ldap://127.0.0.1",
|
||||||
common.LDAPSearchDN: "uid=searchuser,ou=people,dc=mydomain,dc=com",
|
common.LDAPSearchDN: "uid=searchuser,ou=people,dc=mydomain,dc=com",
|
||||||
@ -65,10 +64,10 @@ var adminServerDefaultConfig = map[string]interface{}{
|
|||||||
common.WithNotary: false,
|
common.WithNotary: false,
|
||||||
common.WithClair: false,
|
common.WithClair: false,
|
||||||
common.ClairDBUsername: "postgres",
|
common.ClairDBUsername: "postgres",
|
||||||
common.ClairDBHost: "postgres",
|
common.ClairDBHost: "postgresql",
|
||||||
common.ClairDB: "postgres",
|
common.ClairDB: "postgres",
|
||||||
common.ClairDBPort: 5432,
|
common.ClairDBPort: 5432,
|
||||||
common.ClairDBPassword: "password",
|
common.ClairDBPassword: "root123",
|
||||||
common.UAAClientID: "testid",
|
common.UAAClientID: "testid",
|
||||||
common.UAAClientSecret: "testsecret",
|
common.UAAClientSecret: "testsecret",
|
||||||
common.UAAEndpoint: "10.192.168.5",
|
common.UAAEndpoint: "10.192.168.5",
|
||||||
|
@ -25,32 +25,32 @@ import (
|
|||||||
|
|
||||||
// InitDatabaseFromEnv is used to initialize database for testing
|
// InitDatabaseFromEnv is used to initialize database for testing
|
||||||
func InitDatabaseFromEnv() {
|
func InitDatabaseFromEnv() {
|
||||||
dbHost := os.Getenv("MYSQL_HOST")
|
dbHost := os.Getenv("POSTGRESQL_HOST")
|
||||||
if len(dbHost) == 0 {
|
if len(dbHost) == 0 {
|
||||||
log.Fatalf("environment variable MYSQL_HOST is not set")
|
log.Fatalf("environment variable POSTGRESQL_HOST is not set")
|
||||||
}
|
}
|
||||||
dbPortStr := os.Getenv("MYSQL_PORT")
|
dbUser := os.Getenv("POSTGRESQL_USR")
|
||||||
|
if len(dbUser) == 0 {
|
||||||
|
log.Fatalf("environment variable POSTGRESQL_USR is not set")
|
||||||
|
}
|
||||||
|
dbPortStr := os.Getenv("POSTGRESQL_PORT")
|
||||||
if len(dbPortStr) == 0 {
|
if len(dbPortStr) == 0 {
|
||||||
log.Fatalf("environment variable MYSQL_PORT is not set")
|
log.Fatalf("environment variable POSTGRESQL_PORT is not set")
|
||||||
}
|
}
|
||||||
dbPort, err := strconv.Atoi(dbPortStr)
|
dbPort, err := strconv.Atoi(dbPortStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("invalid MYSQL_PORT: %v", err)
|
log.Fatalf("invalid POSTGRESQL_PORT: %v", err)
|
||||||
}
|
|
||||||
dbUser := os.Getenv("MYSQL_USR")
|
|
||||||
if len(dbUser) == 0 {
|
|
||||||
log.Fatalf("environment variable MYSQL_USR is not set")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dbPassword := os.Getenv("MYSQL_PWD")
|
dbPassword := os.Getenv("POSTGRESQL_PWD")
|
||||||
dbDatabase := os.Getenv("MYSQL_DATABASE")
|
dbDatabase := os.Getenv("POSTGRESQL_DATABASE")
|
||||||
if len(dbDatabase) == 0 {
|
if len(dbDatabase) == 0 {
|
||||||
log.Fatalf("environment variable MYSQL_DATABASE is not set")
|
log.Fatalf("environment variable POSTGRESQL_DATABASE is not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
database := &models.Database{
|
database := &models.Database{
|
||||||
Type: "mysql",
|
Type: "postgresql",
|
||||||
MySQL: &models.MySQL{
|
PostGreSQL: &models.PostGreSQL{
|
||||||
Host: dbHost,
|
Host: dbHost,
|
||||||
Port: dbPort,
|
Port: dbPort,
|
||||||
Username: dbUser,
|
Username: dbUser,
|
||||||
@ -59,7 +59,7 @@ func InitDatabaseFromEnv() {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("MYSQL_HOST: %s, MYSQL_USR: %s, MYSQL_PORT: %d, MYSQL_PWD: %s\n", dbHost, dbUser, dbPort, dbPassword)
|
log.Infof("POSTGRES_HOST: %s, POSTGRES_USR: %s, POSTGRES_PORT: %d, POSTGRES_PWD: %s\n", dbHost, dbUser, dbPort, dbPassword)
|
||||||
|
|
||||||
if err := dao.InitDatabase(database); err != nil {
|
if err := dao.InitDatabase(database); err != nil {
|
||||||
log.Fatalf("failed to initialize database: %v", err)
|
log.Fatalf("failed to initialize database: %v", err)
|
||||||
|
@ -158,13 +158,13 @@ func (c *Context) GetLogger() logger.Interface {
|
|||||||
func getDBFromConfig(cfg map[string]interface{}) *models.Database {
|
func getDBFromConfig(cfg map[string]interface{}) *models.Database {
|
||||||
database := &models.Database{}
|
database := &models.Database{}
|
||||||
database.Type = cfg[common.DatabaseType].(string)
|
database.Type = cfg[common.DatabaseType].(string)
|
||||||
mysql := &models.MySQL{}
|
postgresql := &models.PostGreSQL{}
|
||||||
mysql.Host = cfg[common.MySQLHost].(string)
|
postgresql.Host = cfg[common.PostGreSQLHOST].(string)
|
||||||
mysql.Port = int(cfg[common.MySQLPort].(float64))
|
postgresql.Port = int(cfg[common.PostGreSQLPort].(float64))
|
||||||
mysql.Username = cfg[common.MySQLUsername].(string)
|
postgresql.Username = cfg[common.PostGreSQLUsername].(string)
|
||||||
mysql.Password = cfg[common.MySQLPassword].(string)
|
postgresql.Password = cfg[common.PostGreSQLPassword].(string)
|
||||||
mysql.Database = cfg[common.MySQLDatabase].(string)
|
postgresql.Database = cfg[common.PostGreSQLDatabase].(string)
|
||||||
database.MySQL = mysql
|
database.PostGreSQL = postgresql
|
||||||
|
|
||||||
return database
|
return database
|
||||||
}
|
}
|
||||||
|
@ -307,7 +307,7 @@ func validateCfg(c map[string]interface{}) (bool, error) {
|
|||||||
return false, fmt.Errorf("invalid %s: %d", k, n)
|
return false, fmt.Errorf("invalid %s: %d", k, n)
|
||||||
}
|
}
|
||||||
if (k == common.EmailPort ||
|
if (k == common.EmailPort ||
|
||||||
k == common.MySQLPort) && n > 65535 {
|
k == common.PostGreSQLPort) && n > 65535 {
|
||||||
return false, fmt.Errorf("invalid %s: %d", k, n)
|
return false, fmt.Errorf("invalid %s: %d", k, n)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -919,13 +919,13 @@ func (a testapi) UsersPut(userID int, profile apilib.UserProfile, authInfo usrIn
|
|||||||
}
|
}
|
||||||
|
|
||||||
//Update a registered user to be an administrator of Harbor.
|
//Update a registered user to be an administrator of Harbor.
|
||||||
func (a testapi) UsersToggleAdminRole(userID int, authInfo usrInfo, hasAdminRole int32) (int, error) {
|
func (a testapi) UsersToggleAdminRole(userID int, authInfo usrInfo, hasAdminRole bool) (int, error) {
|
||||||
_sling := sling.New().Put(a.basePath)
|
_sling := sling.New().Put(a.basePath)
|
||||||
// create path and map variables
|
// create path and map variables
|
||||||
path := "/api/users/" + fmt.Sprintf("%d", userID) + "/sysadmin"
|
path := "/api/users/" + fmt.Sprintf("%d", userID) + "/sysadmin"
|
||||||
_sling = _sling.Path(path)
|
_sling = _sling.Path(path)
|
||||||
type QueryParams struct {
|
type QueryParams struct {
|
||||||
HasAdminRole int32 `json:"has_admin_role,omitempty"`
|
HasAdminRole bool `json:"has_admin_role,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
_sling = _sling.BodyJSON(&QueryParams{HasAdminRole: hasAdminRole})
|
_sling = _sling.BodyJSON(&QueryParams{HasAdminRole: hasAdminRole})
|
||||||
|
@ -122,6 +122,10 @@ func (pma *ProjectMemberAPI) Post() {
|
|||||||
var request models.MemberReq
|
var request models.MemberReq
|
||||||
pma.DecodeJSONReq(&request)
|
pma.DecodeJSONReq(&request)
|
||||||
pmid, err := AddOrUpdateProjectMember(projectID, request)
|
pmid, err := AddOrUpdateProjectMember(projectID, request)
|
||||||
|
if err == auth.ErrorGroupNotExist || err == auth.ErrorUserNotExist {
|
||||||
|
pma.HandleNotFound(fmt.Sprintf("Failed to add project member, error: %v", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
pma.HandleInternalServerError(fmt.Sprintf("Failed to add project member, error: %v", err))
|
pma.HandleInternalServerError(fmt.Sprintf("Failed to add project member, error: %v", err))
|
||||||
return
|
return
|
||||||
|
@ -114,6 +114,20 @@ func TestProjectMemberAPI_Post(t *testing.T) {
|
|||||||
},
|
},
|
||||||
code: http.StatusCreated,
|
code: http.StatusCreated,
|
||||||
},
|
},
|
||||||
|
&codeCheckingCase{
|
||||||
|
request: &testingRequest{
|
||||||
|
method: http.MethodPost,
|
||||||
|
url: "/api/projects/1/members",
|
||||||
|
bodyJSON: &models.MemberReq{
|
||||||
|
Role: 1,
|
||||||
|
MemberUser: models.User{
|
||||||
|
Username: "notexistuser",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
credential: admin,
|
||||||
|
},
|
||||||
|
code: http.StatusNotFound,
|
||||||
|
},
|
||||||
&codeCheckingCase{
|
&codeCheckingCase{
|
||||||
request: &testingRequest{
|
request: &testingRequest{
|
||||||
method: http.MethodPost,
|
method: http.MethodPost,
|
||||||
|
@ -252,6 +252,8 @@ func (pa *RepPolicyAPI) Delete() {
|
|||||||
count, err := dao.GetTotalCountOfRepJobs(&models.RepJobQuery{
|
count, err := dao.GetTotalCountOfRepJobs(&models.RepJobQuery{
|
||||||
PolicyID: id,
|
PolicyID: id,
|
||||||
Statuses: []string{models.JobRunning, models.JobRetrying, models.JobPending},
|
Statuses: []string{models.JobRunning, models.JobRetrying, models.JobPending},
|
||||||
|
// only get the transfer and delete jobs, do not get schedule job
|
||||||
|
Operations: []string{models.RepOpTransfer, models.RepOpDelete},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed to filter jobs of policy %d: %v", id, err)
|
log.Errorf("failed to filter jobs of policy %d: %v", id, err)
|
||||||
|
@ -15,10 +15,11 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/vmware/harbor/src/common/api"
|
"github.com/vmware/harbor/src/common/api"
|
||||||
"github.com/vmware/harbor/tests/apitests/apilib"
|
"github.com/vmware/harbor/tests/apitests/apilib"
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/astaxie/beego"
|
"github.com/astaxie/beego"
|
||||||
)
|
)
|
||||||
@ -306,7 +307,7 @@ func TestUsersToggleAdminRole(t *testing.T) {
|
|||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
apiTest := newHarborAPI()
|
apiTest := newHarborAPI()
|
||||||
//case 1: toggle user2 admin role without admin auth
|
//case 1: toggle user2 admin role without admin auth
|
||||||
code, err := apiTest.UsersToggleAdminRole(testUser0002ID, *testUser0002Auth, int32(1))
|
code, err := apiTest.UsersToggleAdminRole(testUser0002ID, *testUser0002Auth, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error("Error occured while toggle user admin role", err.Error())
|
t.Error("Error occured while toggle user admin role", err.Error())
|
||||||
t.Log(err)
|
t.Log(err)
|
||||||
@ -314,7 +315,7 @@ func TestUsersToggleAdminRole(t *testing.T) {
|
|||||||
assert.Equal(403, code, "Toggle user admin role status should be 403")
|
assert.Equal(403, code, "Toggle user admin role status should be 403")
|
||||||
}
|
}
|
||||||
//case 2: toggle user2 admin role with admin auth
|
//case 2: toggle user2 admin role with admin auth
|
||||||
code, err = apiTest.UsersToggleAdminRole(testUser0002ID, *admin, int32(1))
|
code, err = apiTest.UsersToggleAdminRole(testUser0002ID, *admin, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error("Error occured while toggle user admin role", err.Error())
|
t.Error("Error occured while toggle user admin role", err.Error())
|
||||||
t.Log(err)
|
t.Log(err)
|
||||||
|
@ -27,13 +27,12 @@ var l = NewUserLock(2 * time.Second)
|
|||||||
var adminServerLdapTestConfig = map[string]interface{}{
|
var adminServerLdapTestConfig = map[string]interface{}{
|
||||||
common.ExtEndpoint: "host01.com",
|
common.ExtEndpoint: "host01.com",
|
||||||
common.AUTHMode: "ldap_auth",
|
common.AUTHMode: "ldap_auth",
|
||||||
common.DatabaseType: "mysql",
|
common.DatabaseType: "postgresql",
|
||||||
common.MySQLHost: "127.0.0.1",
|
common.PostGreSQLHOST: "127.0.0.1",
|
||||||
common.MySQLPort: 3306,
|
common.PostGreSQLPort: 5432,
|
||||||
common.MySQLUsername: "root",
|
common.PostGreSQLUsername: "postgres",
|
||||||
common.MySQLPassword: "root123",
|
common.PostGreSQLPassword: "root123",
|
||||||
common.MySQLDatabase: "registry",
|
common.PostGreSQLDatabase: "registry",
|
||||||
common.SQLiteFile: "/tmp/registry.db",
|
|
||||||
common.LDAPURL: "ldap://127.0.0.1",
|
common.LDAPURL: "ldap://127.0.0.1",
|
||||||
common.LDAPSearchDN: "cn=admin,dc=example,dc=com",
|
common.LDAPSearchDN: "cn=admin,dc=example,dc=com",
|
||||||
common.LDAPSearchPwd: "admin",
|
common.LDAPSearchPwd: "admin",
|
||||||
|
@ -31,6 +31,12 @@ const frozenTime time.Duration = 1500 * time.Millisecond
|
|||||||
|
|
||||||
var lock = NewUserLock(frozenTime)
|
var lock = NewUserLock(frozenTime)
|
||||||
|
|
||||||
|
// ErrorUserNotExist ...
|
||||||
|
var ErrorUserNotExist = errors.New("User does not exist")
|
||||||
|
|
||||||
|
// ErrorGroupNotExist ...
|
||||||
|
var ErrorGroupNotExist = errors.New("Group does not exist")
|
||||||
|
|
||||||
//ErrAuth is the type of error to indicate a failed authentication due to user's error.
|
//ErrAuth is the type of error to indicate a failed authentication due to user's error.
|
||||||
type ErrAuth struct {
|
type ErrAuth struct {
|
||||||
details string
|
details string
|
||||||
@ -200,6 +206,9 @@ func SearchGroup(groupKey string) (*models.UserGroup, error) {
|
|||||||
// SearchAndOnBoardUser ... Search user and OnBoard user, if user exist, return the ID of current user.
|
// SearchAndOnBoardUser ... Search user and OnBoard user, if user exist, return the ID of current user.
|
||||||
func SearchAndOnBoardUser(username string) (int, error) {
|
func SearchAndOnBoardUser(username string) (int, error) {
|
||||||
user, err := SearchUser(username)
|
user, err := SearchUser(username)
|
||||||
|
if user == nil {
|
||||||
|
return 0, ErrorUserNotExist
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -215,6 +224,9 @@ func SearchAndOnBoardUser(username string) (int, error) {
|
|||||||
// SearchAndOnBoardGroup ... if altGroupName is not empty, take the altGroupName as groupName in harbor DB
|
// SearchAndOnBoardGroup ... if altGroupName is not empty, take the altGroupName as groupName in harbor DB
|
||||||
func SearchAndOnBoardGroup(groupKey, altGroupName string) (int, error) {
|
func SearchAndOnBoardGroup(groupKey, altGroupName string) (int, error) {
|
||||||
userGroup, err := SearchGroup(groupKey)
|
userGroup, err := SearchGroup(groupKey)
|
||||||
|
if userGroup == nil {
|
||||||
|
return 0, ErrorGroupNotExist
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -29,15 +29,14 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var adminServerTestConfig = map[string]interface{}{
|
var adminServerTestConfig = map[string]interface{}{
|
||||||
common.ExtEndpoint: "host01.com",
|
common.ExtEndpoint: "host01.com",
|
||||||
common.AUTHMode: "db_auth",
|
common.AUTHMode: "db_auth",
|
||||||
common.DatabaseType: "mysql",
|
common.DatabaseType: "postgresql",
|
||||||
common.MySQLHost: "127.0.0.1",
|
common.PostGreSQLHOST: "127.0.0.1",
|
||||||
common.MySQLPort: 3306,
|
common.PostGreSQLPort: 5432,
|
||||||
common.MySQLUsername: "root",
|
common.PostGreSQLUsername: "postgres",
|
||||||
common.MySQLPassword: "root123",
|
common.PostGreSQLPassword: "root123",
|
||||||
common.MySQLDatabase: "registry",
|
common.PostGreSQLDatabase: "registry",
|
||||||
common.SQLiteFile: "/tmp/registry.db",
|
|
||||||
//config.SelfRegistration: true,
|
//config.SelfRegistration: true,
|
||||||
common.LDAPURL: "ldap://127.0.0.1",
|
common.LDAPURL: "ldap://127.0.0.1",
|
||||||
common.LDAPSearchDN: "cn=admin,dc=example,dc=com",
|
common.LDAPSearchDN: "cn=admin,dc=example,dc=com",
|
||||||
|
@ -32,15 +32,14 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var adminServerLdapTestConfig = map[string]interface{}{
|
var adminServerLdapTestConfig = map[string]interface{}{
|
||||||
common.ExtEndpoint: "host01.com",
|
common.ExtEndpoint: "host01.com",
|
||||||
common.AUTHMode: "ldap_auth",
|
common.AUTHMode: "ldap_auth",
|
||||||
common.DatabaseType: "mysql",
|
common.DatabaseType: "postgresql",
|
||||||
common.MySQLHost: "127.0.0.1",
|
common.PostGreSQLHOST: "127.0.0.1",
|
||||||
common.MySQLPort: 3306,
|
common.PostGreSQLPort: 5432,
|
||||||
common.MySQLUsername: "root",
|
common.PostGreSQLUsername: "postgres",
|
||||||
common.MySQLPassword: "root123",
|
common.PostGreSQLPassword: "root123",
|
||||||
common.MySQLDatabase: "registry",
|
common.PostGreSQLDatabase: "registry",
|
||||||
common.SQLiteFile: "/tmp/registry.db",
|
|
||||||
//config.SelfRegistration: true,
|
//config.SelfRegistration: true,
|
||||||
common.LDAPURL: "ldap://127.0.0.1",
|
common.LDAPURL: "ldap://127.0.0.1",
|
||||||
common.LDAPSearchDN: "cn=admin,dc=example,dc=com",
|
common.LDAPSearchDN: "cn=admin,dc=example,dc=com",
|
||||||
@ -113,14 +112,14 @@ func TestMain(m *testing.M) {
|
|||||||
"insert into user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')",
|
"insert into user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')",
|
||||||
"insert into project (name, owner_id) values ('member_test_01', 1)",
|
"insert into project (name, owner_id) values ('member_test_01', 1)",
|
||||||
"insert into user_group (group_name, group_type, group_property) values ('test_group_01', 1, 'CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com')",
|
"insert into user_group (group_name, group_type, group_property) values ('test_group_01', 1, 'CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com')",
|
||||||
"update project set owner_id = (select user_id from user where username = 'member_test_01') where name = 'member_test_01'",
|
"update project set owner_id = (select user_id from harbor_user where username = 'member_test_01') where name = 'member_test_01'",
|
||||||
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from user where username = 'member_test_01'), 'u', 1)",
|
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from harbor_user where username = 'member_test_01'), 'u', 1)",
|
||||||
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_group_01'), 'g', 1)",
|
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_group_01'), 'g', 1)",
|
||||||
}
|
}
|
||||||
|
|
||||||
clearSqls := []string{
|
clearSqls := []string{
|
||||||
"delete from project where name='member_test_01'",
|
"delete from project where name='member_test_01'",
|
||||||
"delete from user where username='member_test_01' or username='pm_sample'",
|
"delete from harbor_user where username='member_test_01' or username='pm_sample'",
|
||||||
"delete from user_group",
|
"delete from user_group",
|
||||||
"delete from project_member",
|
"delete from project_member",
|
||||||
}
|
}
|
||||||
|
@ -15,54 +15,20 @@
|
|||||||
package uaa
|
package uaa
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/vmware/harbor/src/common/dao"
|
"github.com/vmware/harbor/src/common/dao"
|
||||||
"github.com/vmware/harbor/src/common/models"
|
"github.com/vmware/harbor/src/common/models"
|
||||||
"github.com/vmware/harbor/src/common/utils/log"
|
"github.com/vmware/harbor/src/common/utils/test"
|
||||||
utilstest "github.com/vmware/harbor/src/common/utils/test"
|
utilstest "github.com/vmware/harbor/src/common/utils/test"
|
||||||
"github.com/vmware/harbor/src/common/utils/uaa"
|
"github.com/vmware/harbor/src/common/utils/uaa"
|
||||||
"github.com/vmware/harbor/src/ui/config"
|
"github.com/vmware/harbor/src/ui/config"
|
||||||
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"testing"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
dbHost := os.Getenv("MYSQL_HOST")
|
test.InitDatabaseFromEnv()
|
||||||
if len(dbHost) == 0 {
|
|
||||||
log.Fatalf("environment variable MYSQL_HOST is not set")
|
|
||||||
}
|
|
||||||
dbUser := os.Getenv("MYSQL_USR")
|
|
||||||
if len(dbUser) == 0 {
|
|
||||||
log.Fatalf("environment variable MYSQL_USR is not set")
|
|
||||||
}
|
|
||||||
dbPortStr := os.Getenv("MYSQL_PORT")
|
|
||||||
if len(dbPortStr) == 0 {
|
|
||||||
log.Fatalf("environment variable MYSQL_PORT is not set")
|
|
||||||
}
|
|
||||||
dbPort, err := strconv.Atoi(dbPortStr)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("invalid MYSQL_PORT: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
dbPassword := os.Getenv("MYSQL_PWD")
|
|
||||||
dbDatabase := os.Getenv("MYSQL_DATABASE")
|
|
||||||
if len(dbDatabase) == 0 {
|
|
||||||
log.Fatalf("environment variable MYSQL_DATABASE is not set")
|
|
||||||
}
|
|
||||||
|
|
||||||
database := &models.Database{
|
|
||||||
Type: "mysql",
|
|
||||||
MySQL: &models.MySQL{
|
|
||||||
Host: dbHost,
|
|
||||||
Port: dbPort,
|
|
||||||
Username: dbUser,
|
|
||||||
Password: dbPassword,
|
|
||||||
Database: dbDatabase,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
dao.InitDatabase(database)
|
|
||||||
server, err := utilstest.NewAdminserver(nil)
|
server, err := utilstest.NewAdminserver(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -93,7 +59,7 @@ func TestMain(m *testing.M) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
err = dao.ClearTable("user")
|
err = dao.ClearTable("harbor_user")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
@ -395,18 +395,17 @@ func Database() (*models.Database, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
database := &models.Database{}
|
database := &models.Database{}
|
||||||
database.Type = cfg[common.DatabaseType].(string)
|
database.Type = cfg[common.DatabaseType].(string)
|
||||||
mysql := &models.MySQL{}
|
|
||||||
mysql.Host = cfg[common.MySQLHost].(string)
|
postgresql := &models.PostGreSQL{}
|
||||||
mysql.Port = int(cfg[common.MySQLPort].(float64))
|
postgresql.Host = cfg[common.PostGreSQLHOST].(string)
|
||||||
mysql.Username = cfg[common.MySQLUsername].(string)
|
postgresql.Port = int(cfg[common.PostGreSQLPort].(float64))
|
||||||
mysql.Password = cfg[common.MySQLPassword].(string)
|
postgresql.Username = cfg[common.PostGreSQLUsername].(string)
|
||||||
mysql.Database = cfg[common.MySQLDatabase].(string)
|
postgresql.Password = cfg[common.PostGreSQLPassword].(string)
|
||||||
database.MySQL = mysql
|
postgresql.Database = cfg[common.PostGreSQLDatabase].(string)
|
||||||
sqlite := &models.SQLite{}
|
database.PostGreSQL = postgresql
|
||||||
sqlite.File = cfg[common.SQLiteFile].(string)
|
|
||||||
database.SQLite = sqlite
|
|
||||||
|
|
||||||
return database, nil
|
return database, nil
|
||||||
}
|
}
|
||||||
|
@ -72,7 +72,7 @@ func (cc *CommonController) Login() {
|
|||||||
|
|
||||||
cc.SetSession("userId", user.UserID)
|
cc.SetSession("userId", user.UserID)
|
||||||
cc.SetSession("username", user.Username)
|
cc.SetSession("username", user.Username)
|
||||||
cc.SetSession("isSysAdmin", user.HasAdminRole == 1)
|
cc.SetSession("isSysAdmin", user.HasAdminRole)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LogOut Habor UI
|
// LogOut Habor UI
|
||||||
|
@ -29,18 +29,17 @@ import (
|
|||||||
func TestReadonlyFilter(t *testing.T) {
|
func TestReadonlyFilter(t *testing.T) {
|
||||||
|
|
||||||
var defaultConfig = map[string]interface{}{
|
var defaultConfig = map[string]interface{}{
|
||||||
common.ExtEndpoint: "host01.com",
|
common.ExtEndpoint: "host01.com",
|
||||||
common.AUTHMode: "db_auth",
|
common.AUTHMode: "db_auth",
|
||||||
common.CfgExpiration: 5,
|
common.CfgExpiration: 5,
|
||||||
common.TokenExpiration: 30,
|
common.TokenExpiration: 30,
|
||||||
common.DatabaseType: "mysql",
|
common.DatabaseType: "postgresql",
|
||||||
common.MySQLHost: "127.0.0.1",
|
common.PostGreSQLDatabase: "registry",
|
||||||
common.MySQLPort: 3306,
|
common.PostGreSQLHOST: "127.0.0.1",
|
||||||
common.MySQLUsername: "root",
|
common.PostGreSQLPort: 5432,
|
||||||
common.MySQLPassword: "root123",
|
common.PostGreSQLPassword: "root123",
|
||||||
common.MySQLDatabase: "registry",
|
common.PostGreSQLUsername: "postgres",
|
||||||
common.SQLiteFile: "/tmp/registry.db",
|
common.ReadOnly: true,
|
||||||
common.ReadOnly: true,
|
|
||||||
}
|
}
|
||||||
adminServer, err := utilstest.NewAdminserver(defaultConfig)
|
adminServer, err := utilstest.NewAdminserver(defaultConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -234,7 +234,7 @@ func (s *sessionReqCtxModifier) Modify(ctx *beegoctx.Context) bool {
|
|||||||
}
|
}
|
||||||
isSysAdmin := ctx.Input.Session("isSysAdmin")
|
isSysAdmin := ctx.Input.Session("isSysAdmin")
|
||||||
if isSysAdmin != nil && isSysAdmin.(bool) {
|
if isSysAdmin != nil && isSysAdmin.(bool) {
|
||||||
user.HasAdminRole = 1
|
user.HasAdminRole = true
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debug("using local database project manager")
|
log.Debug("using local database project manager")
|
||||||
|
@ -27,7 +27,7 @@ import (
|
|||||||
"github.com/vmware/harbor/src/ui/promgr/pmsdriver"
|
"github.com/vmware/harbor/src/ui/promgr/pmsdriver"
|
||||||
)
|
)
|
||||||
|
|
||||||
const dupProjectPattern = `Duplicate entry '\w+' for key 'name'`
|
const dupProjectPattern = `duplicate key value violates unique constraint \"project_name_key\"`
|
||||||
|
|
||||||
type driver struct {
|
type driver struct {
|
||||||
}
|
}
|
||||||
|
@ -16,57 +16,16 @@ package local
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/vmware/harbor/src/common/dao"
|
|
||||||
"github.com/vmware/harbor/src/common/models"
|
"github.com/vmware/harbor/src/common/models"
|
||||||
errutil "github.com/vmware/harbor/src/common/utils/error"
|
errutil "github.com/vmware/harbor/src/common/utils/error"
|
||||||
"github.com/vmware/harbor/src/common/utils/log"
|
"github.com/vmware/harbor/src/common/utils/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
dbHost := os.Getenv("MYSQL_HOST")
|
test.InitDatabaseFromEnv()
|
||||||
if len(dbHost) == 0 {
|
|
||||||
log.Fatalf("environment variable MYSQL_HOST is not set")
|
|
||||||
}
|
|
||||||
dbPortStr := os.Getenv("MYSQL_PORT")
|
|
||||||
if len(dbPortStr) == 0 {
|
|
||||||
log.Fatalf("environment variable MYSQL_PORT is not set")
|
|
||||||
}
|
|
||||||
dbPort, err := strconv.Atoi(dbPortStr)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("invalid MYSQL_PORT: %v", err)
|
|
||||||
}
|
|
||||||
dbUser := os.Getenv("MYSQL_USR")
|
|
||||||
if len(dbUser) == 0 {
|
|
||||||
log.Fatalf("environment variable MYSQL_USR is not set")
|
|
||||||
}
|
|
||||||
|
|
||||||
dbPassword := os.Getenv("MYSQL_PWD")
|
|
||||||
dbDatabase := os.Getenv("MYSQL_DATABASE")
|
|
||||||
if len(dbDatabase) == 0 {
|
|
||||||
log.Fatalf("environment variable MYSQL_DATABASE is not set")
|
|
||||||
}
|
|
||||||
|
|
||||||
database := &models.Database{
|
|
||||||
Type: "mysql",
|
|
||||||
MySQL: &models.MySQL{
|
|
||||||
Host: dbHost,
|
|
||||||
Port: dbPort,
|
|
||||||
Username: dbUser,
|
|
||||||
Password: dbPassword,
|
|
||||||
Database: dbDatabase,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Infof("MYSQL_HOST: %s, MYSQL_USR: %s, MYSQL_PORT: %d, MYSQL_PWD: %s\n", dbHost, dbUser, dbPort, dbPassword)
|
|
||||||
|
|
||||||
if err := dao.InitDatabase(database); err != nil {
|
|
||||||
log.Fatalf("failed to initialize database: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
os.Exit(m.Run())
|
os.Exit(m.Run())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,17 +112,16 @@ func TestMatchListRepos(t *testing.T) {
|
|||||||
|
|
||||||
func TestPMSPolicyChecker(t *testing.T) {
|
func TestPMSPolicyChecker(t *testing.T) {
|
||||||
var defaultConfigAdmiral = map[string]interface{}{
|
var defaultConfigAdmiral = map[string]interface{}{
|
||||||
common.ExtEndpoint: "https://" + endpoint,
|
common.ExtEndpoint: "https://" + endpoint,
|
||||||
common.WithNotary: true,
|
common.WithNotary: true,
|
||||||
common.CfgExpiration: 5,
|
common.CfgExpiration: 5,
|
||||||
common.TokenExpiration: 30,
|
common.TokenExpiration: 30,
|
||||||
common.DatabaseType: "mysql",
|
common.DatabaseType: "postgresql",
|
||||||
common.MySQLHost: "127.0.0.1",
|
common.PostGreSQLHOST: "127.0.0.1",
|
||||||
common.MySQLPort: 3306,
|
common.PostGreSQLPort: 5432,
|
||||||
common.MySQLUsername: "root",
|
common.PostGreSQLUsername: "postgres",
|
||||||
common.MySQLPassword: "root123",
|
common.PostGreSQLPassword: "root123",
|
||||||
common.MySQLDatabase: "registry",
|
common.PostGreSQLDatabase: "registry",
|
||||||
common.SQLiteFile: "/tmp/registry.db",
|
|
||||||
}
|
}
|
||||||
adminServer, err := utilstest.NewAdminserver(defaultConfigAdmiral)
|
adminServer, err := utilstest.NewAdminserver(defaultConfigAdmiral)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "harbor-ui",
|
"name": "harbor-ui",
|
||||||
"version": "0.7.5",
|
"version": "0.7.10",
|
||||||
"description": "Harbor shared UI components based on Clarity and Angular4",
|
"description": "Harbor shared UI components based on Clarity and Angular4",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"start": "ng serve --host 0.0.0.0 --port 4500 --proxy-config proxy.config.json",
|
"start": "ng serve --host 0.0.0.0 --port 4500 --proxy-config proxy.config.json",
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "harbor-ui",
|
"name": "harbor-ui",
|
||||||
"version": "0.7.5",
|
"version": "0.7.10",
|
||||||
"description": "Harbor shared UI components based on Clarity and Angular4",
|
"description": "Harbor shared UI components based on Clarity and Angular4",
|
||||||
"author": "VMware",
|
"author": "VMware",
|
||||||
"module": "index.js",
|
"module": "index.js",
|
||||||
|
@ -69,9 +69,9 @@ export const CREATE_EDIT_RULE_TEMPLATE: string = `
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<label *ngIf="noEndpointInfo.length != 0" class="colorRed alertLabel">{{noEndpointInfo | translate}}</label>
|
<label *ngIf="noEndpointInfo.length != 0" class="colorRed alertLabel">{{noEndpointInfo | translate}}</label>
|
||||||
<span class="alertLabel goLink" *ngIf="noEndpointInfo.length != 0" (click)="goRegistry()">{{'SIDE_NAV.SYSTEM_MGMT.REGISTRY' | translate}}</span>
|
<span class="alertLabel goLink" *ngIf="noEndpointInfo.length != 0" (click)="goRegistry()">{{'REPLICATION.ENDPOINTS' | translate}}</span>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<!--Trigger-->
|
<!--Trigger-->
|
||||||
<div class="form-group form-group-override">
|
<div class="form-group form-group-override">
|
||||||
<label class="form-group-label-override">{{'REPLICATION.TRIGGER_MODE' | translate}}</label>
|
<label class="form-group-label-override">{{'REPLICATION.TRIGGER_MODE' | translate}}</label>
|
||||||
|
@ -94,7 +94,7 @@ export const DefaultServiceConfig: IServiceConfig = {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Define the configuration for harbor shareable module
|
* Define the configuration for harbor shareable module
|
||||||
*
|
*
|
||||||
* @export
|
* @export
|
||||||
* @interface HarborModuleConfig
|
* @interface HarborModuleConfig
|
||||||
*/
|
*/
|
||||||
@ -140,8 +140,6 @@ export interface HarborModuleConfig {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
|
||||||
*
|
|
||||||
* @export
|
* @export
|
||||||
* @param {AppConfigService} configService
|
* @param {AppConfigService} configService
|
||||||
* @returns
|
* @returns
|
||||||
|
@ -73,16 +73,16 @@ export const REPOSITORY_GRIDVIEW_TEMPLATE = `
|
|||||||
</div>
|
</div>
|
||||||
<div class="card-footer">
|
<div class="card-footer">
|
||||||
<clr-dropdown [clrCloseMenuOnItemClick]="false">
|
<clr-dropdown [clrCloseMenuOnItemClick]="false">
|
||||||
<button *ngIf="withAdmiral" type="button" class="btn btn-link" (click)="provisionItemEvent($event, item)">{{'REPOSITORY.DEPLOY' | translate}}</button>
|
<button *ngIf="withAdmiral" type="button" class="btn btn-link" (click)="provisionItemEvent($event, item)" [disabled]="!hasProjectAdminRole">{{'REPOSITORY.DEPLOY' | translate}}</button>
|
||||||
<button type="button" class="btn btn-link" (click)="$event.stopPropagation()" clrDropdownTrigger>
|
<button type="button" class="btn btn-link" (click)="$event.stopPropagation()" [disabled]="!hasProjectAdminRole" clrDropdownTrigger>
|
||||||
{{'REPOSITORY.ACTION' | translate}}
|
{{'REPOSITORY.ACTION' | translate}}
|
||||||
<clr-icon shape="caret down"></clr-icon>
|
<clr-icon shape="caret down"></clr-icon>
|
||||||
</button>
|
</button>
|
||||||
<clr-dropdown-menu clrPosition="top-left" *clrIfOpen>
|
<clr-dropdown-menu clrPosition="top-left" *clrIfOpen>
|
||||||
<button *ngIf="withAdmiral" type="button" class="btn btn-link" clrDropdownItem (click)="itemAddInfoEvent($event, item)">
|
<button *ngIf="withAdmiral" type="button" class="btn btn-link" clrDropdownItem (click)="itemAddInfoEvent($event, item)" [disabled]="!hasProjectAdminRole">
|
||||||
{{'REPOSITORY.ADDITIONAL_INFO' | translate}}
|
{{'REPOSITORY.ADDITIONAL_INFO' | translate}}
|
||||||
</button>
|
</button>
|
||||||
<button type="button" class="btn btn-link" clrDropdownItem (click)="deleteItemEvent($event, item)">
|
<button type="button" class="btn btn-link" clrDropdownItem (click)="deleteItemEvent($event, item)" [disabled]="!hasProjectAdminRole">
|
||||||
{{'REPOSITORY.DELETE' | translate}}
|
{{'REPOSITORY.DELETE' | translate}}
|
||||||
</button>
|
</button>
|
||||||
</clr-dropdown-menu>
|
</clr-dropdown-menu>
|
||||||
|
@ -9,7 +9,7 @@ import { REPOSITORY_GRIDVIEW_TEMPLATE } from './repository-gridview.component.ht
|
|||||||
import { REPOSITORY_GRIDVIEW_STYLE } from './repository-gridview.component.css';
|
import { REPOSITORY_GRIDVIEW_STYLE } from './repository-gridview.component.css';
|
||||||
import { Repository, SystemInfo, SystemInfoService, RepositoryService, RequestQueryParams, RepositoryItem, TagService } from '../service/index';
|
import { Repository, SystemInfo, SystemInfoService, RepositoryService, RequestQueryParams, RepositoryItem, TagService } from '../service/index';
|
||||||
import { ErrorHandler } from '../error-handler/error-handler';
|
import { ErrorHandler } from '../error-handler/error-handler';
|
||||||
import { toPromise, CustomComparator , DEFAULT_PAGE_SIZE, calculatePage, doFiltering, doSorting} from '../utils';
|
import { toPromise, CustomComparator , DEFAULT_PAGE_SIZE, calculatePage, doFiltering, doSorting, clone} from '../utils';
|
||||||
import { ConfirmationState, ConfirmationTargets, ConfirmationButtons } from '../shared/shared.const';
|
import { ConfirmationState, ConfirmationTargets, ConfirmationButtons } from '../shared/shared.const';
|
||||||
import { ConfirmationDialogComponent } from '../confirmation-dialog/confirmation-dialog.component';
|
import { ConfirmationDialogComponent } from '../confirmation-dialog/confirmation-dialog.component';
|
||||||
import { ConfirmationMessage } from '../confirmation-dialog/confirmation-message';
|
import { ConfirmationMessage } from '../confirmation-dialog/confirmation-message';
|
||||||
@ -266,12 +266,16 @@ export class RepositoryGridviewComponent implements OnChanges, OnInit {
|
|||||||
|
|
||||||
provisionItemEvent(evt: any, repo: RepositoryItem): void {
|
provisionItemEvent(evt: any, repo: RepositoryItem): void {
|
||||||
evt.stopPropagation();
|
evt.stopPropagation();
|
||||||
this.repoProvisionEvent.emit(repo);
|
let repoCopy = clone(repo)
|
||||||
|
repoCopy.name = this.registryUrl + ':443/' + repoCopy.name;
|
||||||
|
this.repoProvisionEvent.emit(repoCopy);
|
||||||
}
|
}
|
||||||
|
|
||||||
itemAddInfoEvent(evt: any, repo: RepositoryItem): void {
|
itemAddInfoEvent(evt: any, repo: RepositoryItem): void {
|
||||||
evt.stopPropagation();
|
evt.stopPropagation();
|
||||||
this.addInfoEvent.emit(repo);
|
let repoCopy = clone(repo)
|
||||||
|
repoCopy.name = this.registryUrl + ':443/' + repoCopy.name;
|
||||||
|
this.addInfoEvent.emit(repoCopy);
|
||||||
}
|
}
|
||||||
|
|
||||||
deleteItemEvent(evt: any, item: RepositoryItem): void {
|
deleteItemEvent(evt: any, item: RepositoryItem): void {
|
||||||
|
@ -88,6 +88,13 @@ export interface IServiceConfig {
|
|||||||
*/
|
*/
|
||||||
projectPolicyEndpoint?: string;
|
projectPolicyEndpoint?: string;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The base endpoint of service used to handle projects
|
||||||
|
* @type {string}
|
||||||
|
* @memberOf IServiceConfig
|
||||||
|
*/
|
||||||
|
projectBaseEndpoint?: string;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* To determine whether or not to enable the i18 multiple languages supporting.
|
* To determine whether or not to enable the i18 multiple languages supporting.
|
||||||
*
|
*
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
import { Observable } from 'rxjs/Observable';
|
import { Observable } from 'rxjs/Observable';
|
||||||
import { Injectable, Inject } from '@angular/core';
|
import { Injectable, Inject } from '@angular/core';
|
||||||
import 'rxjs/add/observable/of';
|
|
||||||
import { Http, Headers, RequestOptions } from '@angular/http';
|
import { Http, Headers, RequestOptions } from '@angular/http';
|
||||||
import { SERVICE_CONFIG, IServiceConfig } from '../service.config';
|
import 'rxjs/add/observable/of';
|
||||||
|
|
||||||
|
import { SERVICE_CONFIG, IServiceConfig } from '../service.config';
|
||||||
import { Project } from '../project-policy-config/project';
|
import { Project } from '../project-policy-config/project';
|
||||||
import { ProjectPolicy } from '../project-policy-config/project-policy-config.component';
|
import { ProjectPolicy } from '../project-policy-config/project-policy-config.component';
|
||||||
import {HTTP_JSON_OPTIONS, HTTP_GET_OPTIONS, buildHttpRequestOptions} from "../utils";
|
import {HTTP_JSON_OPTIONS, HTTP_GET_OPTIONS, buildHttpRequestOptions} from "../utils";
|
||||||
@ -40,6 +40,18 @@ export abstract class ProjectService {
|
|||||||
*/
|
*/
|
||||||
abstract updateProjectPolicy(projectId: number | string, projectPolicy: ProjectPolicy): Observable<any> | Promise<any> | any;
|
abstract updateProjectPolicy(projectId: number | string, projectPolicy: ProjectPolicy): Observable<any> | Promise<any> | any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get all projects
|
||||||
|
*
|
||||||
|
* @abstract
|
||||||
|
* @param {string} name
|
||||||
|
* @param {number} isPublic
|
||||||
|
* @param {number} page
|
||||||
|
* @param {number} pageSize
|
||||||
|
* @returns {(Observable<any> | Promise<any> | any)}
|
||||||
|
*
|
||||||
|
* @memberOf EndpointService
|
||||||
|
*/
|
||||||
abstract listProjects(name: string, isPublic: number, page?: number, pageSize?: number): Observable<Project[]> | Promise<Project[]> | Project[];
|
abstract listProjects(name: string, isPublic: number, page?: number, pageSize?: number): Observable<Project[]> | Promise<Project[]> | Project[];
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -64,14 +76,15 @@ export class ProjectDefaultService extends ProjectService {
|
|||||||
if (!projectId) {
|
if (!projectId) {
|
||||||
return Promise.reject('Bad argument');
|
return Promise.reject('Bad argument');
|
||||||
}
|
}
|
||||||
|
let baseUrl: string = this.config.projectBaseEndpoint ? this.config.projectBaseEndpoint : '/api/projects';
|
||||||
return this.http
|
return this.http
|
||||||
.get(`/api/projects/${projectId}`, HTTP_GET_OPTIONS)
|
.get(`${baseUrl}/${projectId}`, HTTP_GET_OPTIONS)
|
||||||
.map(response => response.json())
|
.map(response => response.json())
|
||||||
.catch(error => Observable.throw(error));
|
.catch(error => Observable.throw(error));
|
||||||
}
|
}
|
||||||
|
|
||||||
listProjects(name: string, isPublic: number, page?: number, pageSize?: number): Observable<Project[]> | Promise<Project[]> | Project[] {
|
public listProjects(name: string, isPublic: number, page?: number, pageSize?: number): Observable<Project[]> | Promise<Project[]> | Project[] {
|
||||||
|
let baseUrl: string = this.config.projectBaseEndpoint ? this.config.projectBaseEndpoint : '/api/projects';
|
||||||
let params = new RequestQueryParams();
|
let params = new RequestQueryParams();
|
||||||
if (page && pageSize) {
|
if (page && pageSize) {
|
||||||
params.set('page', page + '');
|
params.set('page', page + '');
|
||||||
@ -87,20 +100,24 @@ export class ProjectDefaultService extends ProjectService {
|
|||||||
|
|
||||||
// let options = new RequestOptions({ headers: this.getHeaders, search: params });
|
// let options = new RequestOptions({ headers: this.getHeaders, search: params });
|
||||||
return this.http
|
return this.http
|
||||||
.get(`/api/projects`, buildHttpRequestOptions(params))
|
.get(baseUrl, buildHttpRequestOptions(params))
|
||||||
.map(response => response.json())
|
.map(response => response.json())
|
||||||
.catch(error => Observable.throw(error));
|
.catch(error => Observable.throw(error));
|
||||||
}
|
}
|
||||||
|
|
||||||
public updateProjectPolicy(projectId: number | string, projectPolicy: ProjectPolicy): any {
|
public updateProjectPolicy(projectId: number | string, projectPolicy: ProjectPolicy): any {
|
||||||
|
let baseUrl: string = this.config.projectBaseEndpoint ? this.config.projectBaseEndpoint : '/api/projects';
|
||||||
return this.http
|
return this.http
|
||||||
.put(`/api/projects/${projectId}`, { 'metadata': {
|
.put(`${baseUrl}/${projectId}`, {
|
||||||
'public': projectPolicy.Public ? 'true' : 'false',
|
'metadata': {
|
||||||
'enable_content_trust': projectPolicy.ContentTrust ? 'true' : 'false',
|
'public': projectPolicy.Public ? 'true' : 'false',
|
||||||
'prevent_vul': projectPolicy.PreventVulImg ? 'true' : 'false',
|
'enable_content_trust': projectPolicy.ContentTrust ? 'true' : 'false',
|
||||||
'severity': projectPolicy.PreventVulImgSeverity,
|
'prevent_vul': projectPolicy.PreventVulImg ? 'true' : 'false',
|
||||||
'auto_scan': projectPolicy.ScanImgOnPush ? 'true' : 'false'
|
'severity': projectPolicy.PreventVulImgSeverity,
|
||||||
} }, HTTP_JSON_OPTIONS)
|
'auto_scan': projectPolicy.ScanImgOnPush ? 'true' : 'false'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
HTTP_JSON_OPTIONS)
|
||||||
.map(response => response.status)
|
.map(response => response.status)
|
||||||
.catch(error => Observable.throw(error));
|
.catch(error => Observable.throw(error));
|
||||||
}
|
}
|
||||||
|
@ -147,7 +147,7 @@ export class TagDefaultService extends TagService {
|
|||||||
return Promise.reject('Invalid parameters.');
|
return Promise.reject('Invalid parameters.');
|
||||||
}
|
}
|
||||||
|
|
||||||
let _addLabelToImageUrl = `/api/repositories/${repoName}/tags/${tagName}/labels`;
|
let _addLabelToImageUrl = `${this._baseUrl}/${repoName}/tags/${tagName}/labels`;
|
||||||
return this.http.post(_addLabelToImageUrl, {id: labelId}, HTTP_JSON_OPTIONS).toPromise()
|
return this.http.post(_addLabelToImageUrl, {id: labelId}, HTTP_JSON_OPTIONS).toPromise()
|
||||||
.then(response => response.status)
|
.then(response => response.status)
|
||||||
.catch(error => Promise.reject(error));
|
.catch(error => Promise.reject(error));
|
||||||
@ -159,7 +159,7 @@ export class TagDefaultService extends TagService {
|
|||||||
return Promise.reject('Invalid parameters.');
|
return Promise.reject('Invalid parameters.');
|
||||||
}
|
}
|
||||||
|
|
||||||
let _addLabelToImageUrl = `/api/repositories/${repoName}/tags/${tagName}/labels/${labelId}`;
|
let _addLabelToImageUrl = `${this._baseUrl}/${repoName}/tags/${tagName}/labels/${labelId}`;
|
||||||
return this.http.delete(_addLabelToImageUrl).toPromise()
|
return this.http.delete(_addLabelToImageUrl).toPromise()
|
||||||
.then(response => response.status)
|
.then(response => response.status)
|
||||||
.catch(error => Promise.reject(error));
|
.catch(error => Promise.reject(error));
|
||||||
|
@ -285,6 +285,6 @@ export function isEmptyObject(obj: any): boolean {
|
|||||||
* @returns {*}
|
* @returns {*}
|
||||||
*/
|
*/
|
||||||
export function clone(srcObj: any): any {
|
export function clone(srcObj: any): any {
|
||||||
if (!srcObj) return null;
|
if (!srcObj) { return null };
|
||||||
return JSON.parse(JSON.stringify(srcObj));
|
return JSON.parse(JSON.stringify(srcObj));
|
||||||
}
|
}
|
@ -30,7 +30,7 @@
|
|||||||
"clarity-icons": "^0.10.27",
|
"clarity-icons": "^0.10.27",
|
||||||
"clarity-ui": "^0.10.27",
|
"clarity-ui": "^0.10.27",
|
||||||
"core-js": "^2.4.1",
|
"core-js": "^2.4.1",
|
||||||
"harbor-ui": "0.7.9",
|
"harbor-ui": "0.7.10",
|
||||||
"intl": "^1.2.5",
|
"intl": "^1.2.5",
|
||||||
"mutationobserver-shim": "^0.3.2",
|
"mutationobserver-shim": "^0.3.2",
|
||||||
"ngx-cookie": "^1.0.0",
|
"ngx-cookie": "^1.0.0",
|
||||||
|
@ -95,7 +95,7 @@ export class HarborShellComponent implements OnInit, OnDestroy {
|
|||||||
|
|
||||||
public get isSystemAdmin(): boolean {
|
public get isSystemAdmin(): boolean {
|
||||||
let account = this.session.getCurrentUser();
|
let account = this.session.getCurrentUser();
|
||||||
return account != null && account.has_admin_role > 0;
|
return account != null && account.has_admin_role;
|
||||||
}
|
}
|
||||||
|
|
||||||
public get isUserExisting(): boolean {
|
public get isUserExisting(): boolean {
|
||||||
|
@ -103,7 +103,7 @@ export class NavigatorComponent implements OnInit {
|
|||||||
|
|
||||||
public get canDownloadCert(): boolean {
|
public get canDownloadCert(): boolean {
|
||||||
return this.session.getCurrentUser() &&
|
return this.session.getCurrentUser() &&
|
||||||
this.session.getCurrentUser().has_admin_role > 0 &&
|
this.session.getCurrentUser().has_admin_role &&
|
||||||
this.appConfigService.getConfig() &&
|
this.appConfigService.getConfig() &&
|
||||||
this.appConfigService.getConfig().has_ca_root;
|
this.appConfigService.getConfig().has_ca_root;
|
||||||
}
|
}
|
||||||
|
@ -70,7 +70,7 @@ export class ConfigurationComponent implements OnInit, OnDestroy {
|
|||||||
|
|
||||||
public get hasAdminRole(): boolean {
|
public get hasAdminRole(): boolean {
|
||||||
return this.session.getCurrentUser() &&
|
return this.session.getCurrentUser() &&
|
||||||
this.session.getCurrentUser().has_admin_role > 0;
|
this.session.getCurrentUser().has_admin_role;
|
||||||
}
|
}
|
||||||
|
|
||||||
public get hasCAFile(): boolean {
|
public get hasCAFile(): boolean {
|
||||||
@ -142,7 +142,7 @@ export class ConfigurationComponent implements OnInit, OnDestroy {
|
|||||||
// First load
|
// First load
|
||||||
// Double confirm the current use has admin role
|
// Double confirm the current use has admin role
|
||||||
let currentUser = this.session.getCurrentUser();
|
let currentUser = this.session.getCurrentUser();
|
||||||
if (currentUser && currentUser.has_admin_role > 0) {
|
if (currentUser && currentUser.has_admin_role) {
|
||||||
this.retrieveConfig();
|
this.retrieveConfig();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -97,7 +97,7 @@ export class ListProjectComponent implements OnDestroy {
|
|||||||
if (account) {
|
if (account) {
|
||||||
switch (this.appConfigService.getConfig().project_creation_restriction) {
|
switch (this.appConfigService.getConfig().project_creation_restriction) {
|
||||||
case "adminonly":
|
case "adminonly":
|
||||||
return (account.has_admin_role === 1);
|
return (account.has_admin_role);
|
||||||
case "everyone":
|
case "everyone":
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -107,7 +107,7 @@ export class ListProjectComponent implements OnDestroy {
|
|||||||
|
|
||||||
public get isSystemAdmin(): boolean {
|
public get isSystemAdmin(): boolean {
|
||||||
let account = this.session.getCurrentUser();
|
let account = this.session.getCurrentUser();
|
||||||
return account != null && account.has_admin_role > 0;
|
return account != null && account.has_admin_role;
|
||||||
}
|
}
|
||||||
|
|
||||||
public get canDelete(): boolean {
|
public get canDelete(): boolean {
|
||||||
|
@ -19,10 +19,10 @@
|
|||||||
"password": "",
|
"password": "",
|
||||||
"realname": "",
|
"realname": "",
|
||||||
"comment": "",
|
"comment": "",
|
||||||
"deleted": 0,
|
"deleted": false,
|
||||||
"role_name": "projectAdmin",
|
"role_name": "projectAdmin",
|
||||||
"role_id": 1,
|
"role_id": 1,
|
||||||
"has_admin_role": 0,
|
"has_admin_role": false,
|
||||||
"reset_uuid": "",
|
"reset_uuid": "",
|
||||||
"creation_time": "0001-01-01T00:00:00Z",
|
"creation_time": "0001-01-01T00:00:00Z",
|
||||||
"update_time": "0001-01-01T00:00:00Z"
|
"update_time": "0001-01-01T00:00:00Z"
|
||||||
|
@ -52,7 +52,7 @@ export class ProjectDetailComponent {
|
|||||||
|
|
||||||
public get isSystemAdmin(): boolean {
|
public get isSystemAdmin(): boolean {
|
||||||
let account = this.sessionService.getCurrentUser();
|
let account = this.sessionService.getCurrentUser();
|
||||||
return account && account.has_admin_role > 0;
|
return account && account.has_admin_role;
|
||||||
}
|
}
|
||||||
|
|
||||||
public get isSProjectAdmin(): boolean {
|
public get isSProjectAdmin(): boolean {
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user