mirror of
https://github.com/goharbor/harbor.git
synced 2025-01-19 22:21:24 +01:00
Merge pull request #4489 from reasonerjt/scan-job-migrate
UI trigger Image scan job by calling new job service API.
This commit is contained in:
commit
40a5269ace
@ -1,6 +1,6 @@
|
|||||||
PORT=8080
|
PORT=8080
|
||||||
LOG_LEVEL=debug
|
LOG_LEVEL=debug
|
||||||
EXT_ENDPOINT=$ui_url
|
EXT_ENDPOINT=$public_url
|
||||||
AUTH_MODE=$auth_mode
|
AUTH_MODE=$auth_mode
|
||||||
SELF_REGISTRATION=$self_registration
|
SELF_REGISTRATION=$self_registration
|
||||||
LDAP_URL=$ldap_url
|
LDAP_URL=$ldap_url
|
||||||
@ -22,8 +22,8 @@ MYSQL_PORT=$db_port
|
|||||||
MYSQL_USR=$db_user
|
MYSQL_USR=$db_user
|
||||||
MYSQL_PWD=$db_password
|
MYSQL_PWD=$db_password
|
||||||
MYSQL_DATABASE=registry
|
MYSQL_DATABASE=registry
|
||||||
REGISTRY_URL=http://registry:5000
|
REGISTRY_URL=$registry_url
|
||||||
TOKEN_SERVICE_URL=http://ui/service/token
|
TOKEN_SERVICE_URL=$token_service_url
|
||||||
EMAIL_HOST=$email_host
|
EMAIL_HOST=$email_host
|
||||||
EMAIL_PORT=$email_port
|
EMAIL_PORT=$email_port
|
||||||
EMAIL_USR=$email_usr
|
EMAIL_USR=$email_usr
|
||||||
@ -53,7 +53,9 @@ UAA_ENDPOINT=$uaa_endpoint
|
|||||||
UAA_CLIENTID=$uaa_clientid
|
UAA_CLIENTID=$uaa_clientid
|
||||||
UAA_CLIENTSECRET=$uaa_clientsecret
|
UAA_CLIENTSECRET=$uaa_clientsecret
|
||||||
UAA_VERIFY_CERT=$uaa_verify_cert
|
UAA_VERIFY_CERT=$uaa_verify_cert
|
||||||
UI_URL=http://ui:8080
|
UI_URL=$ui_url
|
||||||
JOBSERVICE_URL=http://jobservice:8080
|
JOBSERVICE_URL=$jobservice_url
|
||||||
|
CLAIR_URL=$clair_url
|
||||||
|
NOTARY_URL=$notary_url
|
||||||
REGISTRY_STORAGE_PROVIDER_NAME=$storage_provider_name
|
REGISTRY_STORAGE_PROVIDER_NAME=$storage_provider_name
|
||||||
READ_ONLY=false
|
READ_ONLY=false
|
||||||
|
@ -22,4 +22,9 @@ worker_pool:
|
|||||||
port: 6379
|
port: 6379
|
||||||
namespace: "namespace"
|
namespace: "namespace"
|
||||||
#Logger for job
|
#Logger for job
|
||||||
|
logger:
|
||||||
|
path: "/var/log/jobs"
|
||||||
|
level: "INFO"
|
||||||
|
archive_period: 14 #days
|
||||||
|
#Admin server endpoint
|
||||||
|
admin_server: "http://adminserver:8080/"
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
UI_SECRET=$ui_secret
|
UI_SECRET=$ui_secret
|
||||||
JOBSERVICE_SECRET=$jobservice_secret
|
JOBSERVICE_SECRET=$jobservice_secret
|
||||||
ADMINSERVER_URL=http://adminserver:8080
|
ADMINSERVER_URL=$adminserver_url
|
||||||
GODEBUG=netdns=cgo
|
GODEBUG=netdns=cgo
|
||||||
|
@ -20,14 +20,14 @@ http:
|
|||||||
auth:
|
auth:
|
||||||
token:
|
token:
|
||||||
issuer: harbor-token-issuer
|
issuer: harbor-token-issuer
|
||||||
realm: $ui_url/service/token
|
realm: $public_url/service/token
|
||||||
rootcertbundle: /etc/registry/root.crt
|
rootcertbundle: /etc/registry/root.crt
|
||||||
service: harbor-registry
|
service: harbor-registry
|
||||||
notifications:
|
notifications:
|
||||||
endpoints:
|
endpoints:
|
||||||
- name: harbor
|
- name: harbor
|
||||||
disabled: false
|
disabled: false
|
||||||
url: http://ui:8080/service/notifications
|
url: $ui_url/service/notifications
|
||||||
timeout: 3000ms
|
timeout: 3000ms
|
||||||
threshold: 5
|
threshold: 5
|
||||||
backoff: 1s
|
backoff: 1s
|
||||||
|
@ -31,7 +31,7 @@ http:
|
|||||||
auth:
|
auth:
|
||||||
token:
|
token:
|
||||||
issuer: harbor-token-issuer
|
issuer: harbor-token-issuer
|
||||||
realm: $ui_url/service/token
|
realm: $public_url/service/token
|
||||||
rootcertbundle: /etc/registry/root.crt
|
rootcertbundle: /etc/registry/root.crt
|
||||||
service: harbor-registry
|
service: harbor-registry
|
||||||
|
|
||||||
@ -39,7 +39,7 @@ notifications:
|
|||||||
endpoints:
|
endpoints:
|
||||||
- name: harbor
|
- name: harbor
|
||||||
disabled: false
|
disabled: false
|
||||||
url: http://ui:8080/service/notifications
|
url: $ui_url/service/notifications
|
||||||
timeout: 3000ms
|
timeout: 3000ms
|
||||||
threshold: 5
|
threshold: 5
|
||||||
backoff: 1s
|
backoff: 1s
|
||||||
|
@ -3,6 +3,6 @@ CONFIG_PATH=/etc/ui/app.conf
|
|||||||
UI_SECRET=$ui_secret
|
UI_SECRET=$ui_secret
|
||||||
JOBSERVICE_SECRET=$jobservice_secret
|
JOBSERVICE_SECRET=$jobservice_secret
|
||||||
GODEBUG=netdns=cgo
|
GODEBUG=netdns=cgo
|
||||||
ADMINSERVER_URL=http://adminserver:8080
|
ADMINSERVER_URL=$adminserver_url
|
||||||
UAA_CA_ROOT=/etc/ui/certificates/uaa_ca.pem
|
UAA_CA_ROOT=/etc/ui/certificates/uaa_ca.pem
|
||||||
_REDIS_URL=$redis_url
|
_REDIS_URL=$redis_url
|
||||||
|
@ -171,3 +171,4 @@ registry_storage_provider_name = filesystem
|
|||||||
#registry_storage_provider_config is a comma separated "key: value" pairs, e.g. "key1: value, key2: value2".
|
#registry_storage_provider_config is a comma separated "key: value" pairs, e.g. "key1: value, key2: value2".
|
||||||
#Refer to https://docs.docker.com/registry/configuration/#storage for all available configuration.
|
#Refer to https://docs.docker.com/registry/configuration/#storage for all available configuration.
|
||||||
registry_storage_provider_config =
|
registry_storage_provider_config =
|
||||||
|
|
||||||
|
@ -119,7 +119,7 @@ create table user_group
|
|||||||
id int NOT NULL AUTO_INCREMENT,
|
id int NOT NULL AUTO_INCREMENT,
|
||||||
group_name varchar(255) NOT NULL,
|
group_name varchar(255) NOT NULL,
|
||||||
group_type int default 0,
|
group_type int default 0,
|
||||||
group_property varchar(512) NOT NULL,
|
ldap_group_dn varchar(512) NOT NULL,
|
||||||
creation_time timestamp default CURRENT_TIMESTAMP,
|
creation_time timestamp default CURRENT_TIMESTAMP,
|
||||||
update_time timestamp default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
|
update_time timestamp default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
|
||||||
PRIMARY KEY (id)
|
PRIMARY KEY (id)
|
||||||
@ -193,6 +193,8 @@ create table replication_job (
|
|||||||
repository varchar(256) NOT NULL,
|
repository varchar(256) NOT NULL,
|
||||||
operation varchar(64) NOT NULL,
|
operation varchar(64) NOT NULL,
|
||||||
tags varchar(16384),
|
tags varchar(16384),
|
||||||
|
#New job service only records uuid, for compatibility in this table both IDs are stored.
|
||||||
|
job_uuid varchar(64),
|
||||||
creation_time timestamp default CURRENT_TIMESTAMP,
|
creation_time timestamp default CURRENT_TIMESTAMP,
|
||||||
update_time timestamp default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
|
update_time timestamp default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
|
||||||
PRIMARY KEY (id),
|
PRIMARY KEY (id),
|
||||||
@ -217,6 +219,8 @@ create table img_scan_job (
|
|||||||
repository varchar(256) NOT NULL,
|
repository varchar(256) NOT NULL,
|
||||||
tag varchar(128) NOT NULL,
|
tag varchar(128) NOT NULL,
|
||||||
digest varchar(128),
|
digest varchar(128),
|
||||||
|
#New job service only records uuid, for compatibility in this table both IDs are stored.
|
||||||
|
job_uuid varchar(64),
|
||||||
creation_time timestamp default CURRENT_TIMESTAMP,
|
creation_time timestamp default CURRENT_TIMESTAMP,
|
||||||
update_time timestamp default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
|
update_time timestamp default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
|
||||||
PRIMARY KEY (id)
|
PRIMARY KEY (id)
|
||||||
|
@ -64,7 +64,7 @@ create table user_group (
|
|||||||
id INTEGER PRIMARY KEY,
|
id INTEGER PRIMARY KEY,
|
||||||
group_name varchar(255) NOT NULL,
|
group_name varchar(255) NOT NULL,
|
||||||
group_type int default 0,
|
group_type int default 0,
|
||||||
group_property varchar(512) NOT NULL,
|
ldap_group_dn varchar(512) NOT NULL,
|
||||||
creation_time timestamp default CURRENT_TIMESTAMP,
|
creation_time timestamp default CURRENT_TIMESTAMP,
|
||||||
update_time timestamp default CURRENT_TIMESTAMP
|
update_time timestamp default CURRENT_TIMESTAMP
|
||||||
);
|
);
|
||||||
@ -183,6 +183,7 @@ create table replication_job (
|
|||||||
repository varchar(256) NOT NULL,
|
repository varchar(256) NOT NULL,
|
||||||
operation varchar(64) NOT NULL,
|
operation varchar(64) NOT NULL,
|
||||||
tags varchar(16384),
|
tags varchar(16384),
|
||||||
|
job_uuid varchar(64),
|
||||||
creation_time timestamp default CURRENT_TIMESTAMP,
|
creation_time timestamp default CURRENT_TIMESTAMP,
|
||||||
update_time timestamp default CURRENT_TIMESTAMP
|
update_time timestamp default CURRENT_TIMESTAMP
|
||||||
);
|
);
|
||||||
@ -204,6 +205,7 @@ create table img_scan_job (
|
|||||||
repository varchar(256) NOT NULL,
|
repository varchar(256) NOT NULL,
|
||||||
tag varchar(128) NOT NULL,
|
tag varchar(128) NOT NULL,
|
||||||
digest varchar(64),
|
digest varchar(64),
|
||||||
|
job_uuid varchar(64),
|
||||||
creation_time timestamp default CURRENT_TIMESTAMP,
|
creation_time timestamp default CURRENT_TIMESTAMP,
|
||||||
update_time timestamp default CURRENT_TIMESTAMP
|
update_time timestamp default CURRENT_TIMESTAMP
|
||||||
);
|
);
|
||||||
|
28
make/prepare
28
make/prepare
@ -195,7 +195,7 @@ reload_config = rcp.get("configuration", "reload_config") if rcp.has_option(
|
|||||||
"configuration", "reload_config") else "false"
|
"configuration", "reload_config") else "false"
|
||||||
hostname = rcp.get("configuration", "hostname")
|
hostname = rcp.get("configuration", "hostname")
|
||||||
protocol = rcp.get("configuration", "ui_url_protocol")
|
protocol = rcp.get("configuration", "ui_url_protocol")
|
||||||
ui_url = protocol + "://" + hostname
|
public_url = protocol + "://" + hostname
|
||||||
email_identity = rcp.get("configuration", "email_identity")
|
email_identity = rcp.get("configuration", "email_identity")
|
||||||
email_host = rcp.get("configuration", "email_server")
|
email_host = rcp.get("configuration", "email_server")
|
||||||
email_port = rcp.get("configuration", "email_server_port")
|
email_port = rcp.get("configuration", "email_server_port")
|
||||||
@ -270,7 +270,6 @@ storage_provider_name = rcp.get("configuration", "registry_storage_provider_name
|
|||||||
storage_provider_config = rcp.get("configuration", "registry_storage_provider_config").strip()
|
storage_provider_config = rcp.get("configuration", "registry_storage_provider_config").strip()
|
||||||
# yaml requires 1 or more spaces between the key and value
|
# yaml requires 1 or more spaces between the key and value
|
||||||
storage_provider_config = storage_provider_config.replace(":", ": ", 1)
|
storage_provider_config = storage_provider_config.replace(":", ": ", 1)
|
||||||
|
|
||||||
ui_secret = ''.join(random.choice(string.ascii_letters+string.digits) for i in range(16))
|
ui_secret = ''.join(random.choice(string.ascii_letters+string.digits) for i in range(16))
|
||||||
jobservice_secret = ''.join(random.choice(string.ascii_letters+string.digits) for i in range(16))
|
jobservice_secret = ''.join(random.choice(string.ascii_letters+string.digits) for i in range(16))
|
||||||
|
|
||||||
@ -298,6 +297,14 @@ job_conf_env = os.path.join(config_dir, "jobservice", "env")
|
|||||||
nginx_conf = os.path.join(config_dir, "nginx", "nginx.conf")
|
nginx_conf = os.path.join(config_dir, "nginx", "nginx.conf")
|
||||||
cert_dir = os.path.join(config_dir, "nginx", "cert")
|
cert_dir = os.path.join(config_dir, "nginx", "cert")
|
||||||
log_rotate_config = os.path.join(config_dir, "log", "logrotate.conf")
|
log_rotate_config = os.path.join(config_dir, "log", "logrotate.conf")
|
||||||
|
adminserver_url = "http://adminserver:8080"
|
||||||
|
registry_url = "http://registry:5000"
|
||||||
|
ui_url = "http://ui:8080"
|
||||||
|
token_service_url = "http://ui:8080/service/token"
|
||||||
|
|
||||||
|
jobservice_url = "http://jobservice:8080"
|
||||||
|
clair_url = "http://clair:6060"
|
||||||
|
notary_url = "http://notary-server:4443"
|
||||||
|
|
||||||
if protocol == "https":
|
if protocol == "https":
|
||||||
target_cert_path = os.path.join(cert_dir, os.path.basename(cert_path))
|
target_cert_path = os.path.join(cert_dir, os.path.basename(cert_path))
|
||||||
@ -317,6 +324,7 @@ else:
|
|||||||
render(os.path.join(templates_dir, "adminserver", "env"),
|
render(os.path.join(templates_dir, "adminserver", "env"),
|
||||||
adminserver_conf_env,
|
adminserver_conf_env,
|
||||||
reload_config=reload_config,
|
reload_config=reload_config,
|
||||||
|
public_url=public_url,
|
||||||
ui_url=ui_url,
|
ui_url=ui_url,
|
||||||
auth_mode=auth_mode,
|
auth_mode=auth_mode,
|
||||||
self_registration=self_registration,
|
self_registration=self_registration,
|
||||||
@ -363,14 +371,20 @@ render(os.path.join(templates_dir, "adminserver", "env"),
|
|||||||
uaa_clientid=uaa_clientid,
|
uaa_clientid=uaa_clientid,
|
||||||
uaa_clientsecret=uaa_clientsecret,
|
uaa_clientsecret=uaa_clientsecret,
|
||||||
uaa_verify_cert=uaa_verify_cert,
|
uaa_verify_cert=uaa_verify_cert,
|
||||||
storage_provider_name=storage_provider_name
|
storage_provider_name=storage_provider_name,
|
||||||
|
registry_url=registry_url,
|
||||||
|
token_service_url=token_service_url,
|
||||||
|
jobservice_url=jobservice_url,
|
||||||
|
clair_url=clair_url,
|
||||||
|
notary_url=notary_url
|
||||||
)
|
)
|
||||||
|
|
||||||
render(os.path.join(templates_dir, "ui", "env"),
|
render(os.path.join(templates_dir, "ui", "env"),
|
||||||
ui_conf_env,
|
ui_conf_env,
|
||||||
ui_secret=ui_secret,
|
ui_secret=ui_secret,
|
||||||
jobservice_secret=jobservice_secret,
|
jobservice_secret=jobservice_secret,
|
||||||
redis_url = redis_url
|
redis_url = redis_url,
|
||||||
|
adminserver_url = adminserver_url
|
||||||
)
|
)
|
||||||
|
|
||||||
registry_config_file = "config_ha.yml" if args.ha_mode else "config.yml"
|
registry_config_file = "config_ha.yml" if args.ha_mode else "config.yml"
|
||||||
@ -385,6 +399,7 @@ storage_provider_info = ('\n' + ' ' * 4).join(
|
|||||||
render(os.path.join(templates_dir, "registry", registry_config_file),
|
render(os.path.join(templates_dir, "registry", registry_config_file),
|
||||||
registry_conf,
|
registry_conf,
|
||||||
storage_provider_info=storage_provider_info,
|
storage_provider_info=storage_provider_info,
|
||||||
|
public_url=public_url,
|
||||||
ui_url=ui_url,
|
ui_url=ui_url,
|
||||||
redis_url=redis_url)
|
redis_url=redis_url)
|
||||||
|
|
||||||
@ -395,7 +410,8 @@ render(os.path.join(templates_dir, "db", "env"),
|
|||||||
render(os.path.join(templates_dir, "jobservice", "env"),
|
render(os.path.join(templates_dir, "jobservice", "env"),
|
||||||
job_conf_env,
|
job_conf_env,
|
||||||
ui_secret=ui_secret,
|
ui_secret=ui_secret,
|
||||||
jobservice_secret=jobservice_secret)
|
jobservice_secret=jobservice_secret,
|
||||||
|
adminserver_url=adminserver_url)
|
||||||
|
|
||||||
render(os.path.join(templates_dir, "log", "logrotate.conf"),
|
render(os.path.join(templates_dir, "log", "logrotate.conf"),
|
||||||
log_rotate_config,
|
log_rotate_config,
|
||||||
@ -522,7 +538,7 @@ if args.notary_mode:
|
|||||||
shutil.copy2(os.path.join(notary_temp_dir, "signer-config.json"), notary_config_dir)
|
shutil.copy2(os.path.join(notary_temp_dir, "signer-config.json"), notary_config_dir)
|
||||||
render(os.path.join(notary_temp_dir, "server-config.json"),
|
render(os.path.join(notary_temp_dir, "server-config.json"),
|
||||||
os.path.join(notary_config_dir, "server-config.json"),
|
os.path.join(notary_config_dir, "server-config.json"),
|
||||||
token_endpoint=ui_url)
|
token_endpoint=public_url)
|
||||||
|
|
||||||
print("Copying nginx configuration file for notary")
|
print("Copying nginx configuration file for notary")
|
||||||
shutil.copy2(os.path.join(templates_dir, "nginx", "notary.upstream.conf"), nginx_conf_d)
|
shutil.copy2(os.path.join(templates_dir, "nginx", "notary.upstream.conf"), nginx_conf_d)
|
||||||
|
@ -151,6 +151,9 @@ var (
|
|||||||
},
|
},
|
||||||
common.UIURL: "UI_URL",
|
common.UIURL: "UI_URL",
|
||||||
common.JobServiceURL: "JOBSERVICE_URL",
|
common.JobServiceURL: "JOBSERVICE_URL",
|
||||||
|
common.TokenServiceURL: "TOKEN_SERVICE_URL",
|
||||||
|
common.ClairURL: "CLAIR_URL",
|
||||||
|
common.NotaryURL: "NOTARY_URL",
|
||||||
common.RegistryStorageProviderName: "REGISTRY_STORAGE_PROVIDER_NAME",
|
common.RegistryStorageProviderName: "REGISTRY_STORAGE_PROVIDER_NAME",
|
||||||
common.ReadOnly: &parser{
|
common.ReadOnly: &parser{
|
||||||
env: "READ_ONLY",
|
env: "READ_ONLY",
|
||||||
@ -202,6 +205,12 @@ var (
|
|||||||
parse: parseStringToBool,
|
parse: parseStringToBool,
|
||||||
},
|
},
|
||||||
common.RegistryStorageProviderName: "REGISTRY_STORAGE_PROVIDER_NAME",
|
common.RegistryStorageProviderName: "REGISTRY_STORAGE_PROVIDER_NAME",
|
||||||
|
common.UIURL: "UI_URL",
|
||||||
|
common.JobServiceURL: "JOBSERVICE_URL",
|
||||||
|
common.RegistryURL: "REGISTRY_URL",
|
||||||
|
common.TokenServiceURL: "TOKEN_SERVICE_URL",
|
||||||
|
common.ClairURL: "CLAIR_URL",
|
||||||
|
common.NotaryURL: "NOTARY_URL",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -100,4 +100,10 @@ const (
|
|||||||
UserMember = "u"
|
UserMember = "u"
|
||||||
GroupMember = "g"
|
GroupMember = "g"
|
||||||
ReadOnly = "read_only"
|
ReadOnly = "read_only"
|
||||||
|
ClairURL = "clair_url"
|
||||||
|
NotaryURL = "notary_url"
|
||||||
|
DefaultAdminserverEndpoint = "http://adminserver:8080"
|
||||||
|
DefaultJobserviceEndpoint = "http://jobservice:8080"
|
||||||
|
DefaultUIEndpoint = "http://ui:8080"
|
||||||
|
DefaultNotaryEndpoint = "http://notary-server:4443"
|
||||||
)
|
)
|
||||||
|
@ -1086,6 +1086,16 @@ func TestAddRepJob(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSetRepJobUUID(t *testing.T) {
|
||||||
|
uuid := "u-rep-job-uuid"
|
||||||
|
assert := assert.New(t)
|
||||||
|
err := SetRepJobUUID(jobID, uuid)
|
||||||
|
assert.Nil(err)
|
||||||
|
j, err := GetRepJob(jobID)
|
||||||
|
assert.Nil(err)
|
||||||
|
assert.Equal(uuid, j.UUID)
|
||||||
|
}
|
||||||
|
|
||||||
func TestUpdateRepJobStatus(t *testing.T) {
|
func TestUpdateRepJobStatus(t *testing.T) {
|
||||||
err := UpdateRepJobStatus(jobID, models.JobFinished)
|
err := UpdateRepJobStatus(jobID, models.JobFinished)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1505,6 +1515,21 @@ func TestGetScanJobs(t *testing.T) {
|
|||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSetScanJobUUID(t *testing.T) {
|
||||||
|
uuid := "u-scan-job-uuid"
|
||||||
|
assert := assert.New(t)
|
||||||
|
id, err := AddScanJob(sj1)
|
||||||
|
assert.Nil(err)
|
||||||
|
err = SetScanJobUUID(id, uuid)
|
||||||
|
assert.Nil(err)
|
||||||
|
j, err := GetScanJob(id)
|
||||||
|
assert.Nil(err)
|
||||||
|
assert.Equal(uuid, j.UUID)
|
||||||
|
err = ClearTable(models.ScanJobTable)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func TestUpdateScanJobStatus(t *testing.T) {
|
func TestUpdateScanJobStatus(t *testing.T) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
id, err := AddScanJob(sj1)
|
id, err := AddScanJob(sj1)
|
||||||
|
94
src/common/dao/group/usergroup.go
Normal file
94
src/common/dao/group/usergroup.go
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package group
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/vmware/harbor/src/common/dao"
|
||||||
|
"github.com/vmware/harbor/src/common/models"
|
||||||
|
"github.com/vmware/harbor/src/common/utils/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AddUserGroup - Add User Group
|
||||||
|
func AddUserGroup(userGroup models.UserGroup) (int, error) {
|
||||||
|
o := dao.GetOrmer()
|
||||||
|
id, err := o.Insert(&userGroup)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return int(id), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUserGroup - Query User Group
|
||||||
|
func QueryUserGroup(query models.UserGroup) ([]*models.UserGroup, error) {
|
||||||
|
o := dao.GetOrmer()
|
||||||
|
sql := `select id, group_name, group_type, ldap_group_dn from user_group where 1=1 `
|
||||||
|
sqlParam := make([]interface{}, 1)
|
||||||
|
groups := []*models.UserGroup{}
|
||||||
|
if len(query.GroupName) != 0 {
|
||||||
|
sql += ` and group_name like ? `
|
||||||
|
sqlParam = append(sqlParam, `%`+dao.Escape(query.GroupName)+`%`)
|
||||||
|
}
|
||||||
|
|
||||||
|
if query.GroupType != 0 {
|
||||||
|
sql += ` and group_type = ? `
|
||||||
|
sqlParam = append(sqlParam, query.GroupType)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(query.LdapGroupDN) != 0 {
|
||||||
|
sql += ` and ldap_group_dn = ? `
|
||||||
|
sqlParam = append(sqlParam, query.LdapGroupDN)
|
||||||
|
}
|
||||||
|
_, err := o.Raw(sql, sqlParam).QueryRows(&groups)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return groups, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUserGroup ...
|
||||||
|
func GetUserGroup(id int) (*models.UserGroup, error) {
|
||||||
|
userGroup := models.UserGroup{ID: id}
|
||||||
|
o := dao.GetOrmer()
|
||||||
|
err := o.Read(&userGroup)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &userGroup, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteUserGroup ...
|
||||||
|
func DeleteUserGroup(id int) error {
|
||||||
|
userGroup := models.UserGroup{ID: id}
|
||||||
|
o := dao.GetOrmer()
|
||||||
|
_, err := o.Delete(&userGroup)
|
||||||
|
if err == nil {
|
||||||
|
//Delete all related project members
|
||||||
|
sql := `delete from project_member where entity_id = ? and entity_type='g'`
|
||||||
|
_, err := o.Raw(sql, id).Exec()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUserGroupName ...
|
||||||
|
func UpdateUserGroupName(id int, groupName string) error {
|
||||||
|
log.Debugf("Updating user_group with id:%v, name:%v", id, groupName)
|
||||||
|
o := dao.GetOrmer()
|
||||||
|
sql := "update user_group set group_name = ? where id = ? "
|
||||||
|
_, err := o.Raw(sql, groupName, id).Exec()
|
||||||
|
return err
|
||||||
|
}
|
218
src/common/dao/group/usergroup_test.go
Normal file
218
src/common/dao/group/usergroup_test.go
Normal file
@ -0,0 +1,218 @@
|
|||||||
|
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package group
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/vmware/harbor/src/common/dao"
|
||||||
|
"github.com/vmware/harbor/src/common/models"
|
||||||
|
"github.com/vmware/harbor/src/common/utils/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
var createdUserGroupID int
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
|
||||||
|
//databases := []string{"mysql", "sqlite"}
|
||||||
|
databases := []string{"mysql"}
|
||||||
|
for _, database := range databases {
|
||||||
|
log.Infof("run test cases for database: %s", database)
|
||||||
|
|
||||||
|
result := 1
|
||||||
|
switch database {
|
||||||
|
case "mysql":
|
||||||
|
dao.PrepareTestForMySQL()
|
||||||
|
case "sqlite":
|
||||||
|
dao.PrepareTestForSQLite()
|
||||||
|
default:
|
||||||
|
log.Fatalf("invalid database: %s", database)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Extract to test utils
|
||||||
|
initSqls := []string{
|
||||||
|
"insert into user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')",
|
||||||
|
"insert into project (name, owner_id) values ('member_test_01', 1)",
|
||||||
|
"insert into user_group (group_name, group_type, ldap_group_dn) values ('test_group_01', 1, 'CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com')",
|
||||||
|
"update project set owner_id = (select user_id from user where username = 'member_test_01') where name = 'member_test_01'",
|
||||||
|
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from user where username = 'member_test_01'), 'u', 1)",
|
||||||
|
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_group_01'), 'g', 1)",
|
||||||
|
}
|
||||||
|
|
||||||
|
clearSqls := []string{
|
||||||
|
"delete from project where name='member_test_01'",
|
||||||
|
"delete from user where username='member_test_01' or username='pm_sample'",
|
||||||
|
"delete from user_group",
|
||||||
|
"delete from project_member",
|
||||||
|
}
|
||||||
|
dao.PrepareTestData(clearSqls, initSqls)
|
||||||
|
|
||||||
|
result = m.Run()
|
||||||
|
|
||||||
|
if result != 0 {
|
||||||
|
os.Exit(result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddUserGroup(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
userGroup models.UserGroup
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
want int
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{"Insert an ldap user group", args{userGroup: models.UserGroup{GroupName: "sample_group", GroupType: 1, LdapGroupDN: "sample_ldap_dn_string"}}, 0, false},
|
||||||
|
{"Insert other user group", args{userGroup: models.UserGroup{GroupName: "other_group", GroupType: 3, LdapGroupDN: "other information"}}, 0, false},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := AddUserGroup(tt.args.userGroup)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("AddUserGroup() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if got <= 0 {
|
||||||
|
t.Errorf("Failed to add user group")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestQueryUserGroup(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
query models.UserGroup
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
want int
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{"Query all user group", args{query: models.UserGroup{GroupName: "test_group_01"}}, 1, false},
|
||||||
|
{"Query all ldap group", args{query: models.UserGroup{GroupType: 1}}, 2, false},
|
||||||
|
{"Query ldap group with group property", args{query: models.UserGroup{GroupType: 1, LdapGroupDN: "CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com"}}, 1, false},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := QueryUserGroup(tt.args.query)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("QueryUserGroup() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(got) != tt.want {
|
||||||
|
t.Errorf("QueryUserGroup() = %v, want %v", len(got), tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetUserGroup(t *testing.T) {
|
||||||
|
userGroup := models.UserGroup{GroupName: "insert_group", GroupType: 1, LdapGroupDN: "ldap_dn_string"}
|
||||||
|
result, err := AddUserGroup(userGroup)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error occurred when AddUserGroup: %v", err)
|
||||||
|
}
|
||||||
|
createdUserGroupID = result
|
||||||
|
type args struct {
|
||||||
|
id int
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
want string
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{"Get User Group", args{id: result}, "insert_group", false},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := GetUserGroup(tt.args.id)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("GetUserGroup() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if got.GroupName != tt.want {
|
||||||
|
t.Errorf("GetUserGroup() = %v, want %v", got.GroupName, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func TestUpdateUserGroup(t *testing.T) {
|
||||||
|
if createdUserGroupID == 0 {
|
||||||
|
fmt.Println("User group doesn't created, skip to test!")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
type args struct {
|
||||||
|
id int
|
||||||
|
groupName string
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{"Update user group", args{id: createdUserGroupID, groupName: "updated_groupname"}, false},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
fmt.Printf("id=%v", createdUserGroupID)
|
||||||
|
if err := UpdateUserGroupName(tt.args.id, tt.args.groupName); (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("UpdateUserGroup() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
userGroup, err := GetUserGroup(tt.args.id)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error occurred when GetUserGroup: %v", err)
|
||||||
|
}
|
||||||
|
if userGroup == nil {
|
||||||
|
t.Fatalf("Failed to get updated user group")
|
||||||
|
}
|
||||||
|
if userGroup.GroupName != tt.args.groupName {
|
||||||
|
t.Fatalf("Failed to update user group")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeleteUserGroup(t *testing.T) {
|
||||||
|
if createdUserGroupID == 0 {
|
||||||
|
fmt.Println("User group doesn't created, skip to test!")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type args struct {
|
||||||
|
id int
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{"Delete existing user group", args{id: createdUserGroupID}, false},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
if err := DeleteUserGroup(tt.args.id); (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("DeleteUserGroup() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
118
src/common/dao/project/projectmember.go
Normal file
118
src/common/dao/project/projectmember.go
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package project
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/vmware/harbor/src/common/dao"
|
||||||
|
"github.com/vmware/harbor/src/common/models"
|
||||||
|
"github.com/vmware/harbor/src/common/utils/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetProjectMember gets all members of the project.
|
||||||
|
func GetProjectMember(queryMember models.Member) ([]*models.Member, error) {
|
||||||
|
log.Debugf("Query condition %+v", queryMember)
|
||||||
|
if queryMember.ProjectID == 0 {
|
||||||
|
return nil, fmt.Errorf("Failed to query project member, query condition %v", queryMember)
|
||||||
|
}
|
||||||
|
|
||||||
|
o := dao.GetOrmer()
|
||||||
|
sql := ` select a.* from ((select pm.id as id, pm.project_id as project_id, ug.id as entity_id, ug.group_name as entity_name, ug.creation_time, ug.update_time, r.name as rolename,
|
||||||
|
r.role_id as role, pm.entity_type as entity_type from user_group ug join project_member pm
|
||||||
|
on pm.project_id = ? and ug.id = pm.entity_id join role r on pm.role = r.role_id where pm.entity_type = 'g')
|
||||||
|
union
|
||||||
|
(select pm.id as id, pm.project_id as project_id, u.user_id as entity_id, u.username as entity_name, u.creation_time, u.update_time, r.name as rolename,
|
||||||
|
r.role_id as role, pm.entity_type as entity_type from user u join project_member pm
|
||||||
|
on pm.project_id = ? and u.user_id = pm.entity_id
|
||||||
|
join role r on pm.role = r.role_id where u.deleted = 0 and pm.entity_type = 'u')) as a where a.project_id = ? `
|
||||||
|
|
||||||
|
queryParam := make([]interface{}, 1)
|
||||||
|
// used ProjectID already
|
||||||
|
queryParam = append(queryParam, queryMember.ProjectID)
|
||||||
|
queryParam = append(queryParam, queryMember.ProjectID)
|
||||||
|
queryParam = append(queryParam, queryMember.ProjectID)
|
||||||
|
|
||||||
|
if len(queryMember.Entityname) > 0 {
|
||||||
|
sql += " and a.entity_name = ? "
|
||||||
|
queryParam = append(queryParam, queryMember.Entityname)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(queryMember.EntityType) == 1 {
|
||||||
|
sql += " and a.entity_type = ? "
|
||||||
|
queryParam = append(queryParam, queryMember.EntityType)
|
||||||
|
}
|
||||||
|
|
||||||
|
if queryMember.EntityID > 0 {
|
||||||
|
sql += " and a.entity_id = ? "
|
||||||
|
queryParam = append(queryParam, queryMember.EntityID)
|
||||||
|
}
|
||||||
|
if queryMember.ID > 0 {
|
||||||
|
sql += " and a.id = ? "
|
||||||
|
queryParam = append(queryParam, queryMember.ID)
|
||||||
|
}
|
||||||
|
sql += ` order by a.entity_name `
|
||||||
|
members := []*models.Member{}
|
||||||
|
_, err := o.Raw(sql, queryParam).QueryRows(&members)
|
||||||
|
|
||||||
|
return members, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddProjectMember inserts a record to table project_member
|
||||||
|
func AddProjectMember(member models.Member) (int, error) {
|
||||||
|
|
||||||
|
log.Debugf("Adding project member %+v", member)
|
||||||
|
o := dao.GetOrmer()
|
||||||
|
|
||||||
|
if member.EntityID <= 0 {
|
||||||
|
return 0, fmt.Errorf("Invalid entity_id, member: %+v", member)
|
||||||
|
}
|
||||||
|
|
||||||
|
if member.ProjectID <= 0 {
|
||||||
|
return 0, fmt.Errorf("Invalid project_id, member: %+v", member)
|
||||||
|
}
|
||||||
|
|
||||||
|
sql := "insert into project_member (project_id, entity_id , role, entity_type) values (?, ?, ?, ?)"
|
||||||
|
r, err := o.Raw(sql, member.ProjectID, member.EntityID, member.Role, member.EntityType).Exec()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
pmid, err := r.LastInsertId()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return int(pmid), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateProjectMemberRole updates the record in table project_member, only role can be changed
|
||||||
|
func UpdateProjectMemberRole(pmID int, role int) error {
|
||||||
|
if role <= 0 || role >= 3 {
|
||||||
|
return fmt.Errorf("Failed to update project member, role is not in 0,1,2, role:%v", role)
|
||||||
|
}
|
||||||
|
o := dao.GetOrmer()
|
||||||
|
sql := "update project_member set role = ? where id = ? "
|
||||||
|
_, err := o.Raw(sql, role, pmID).Exec()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteProjectMemberByID - Delete Project Member by ID
|
||||||
|
func DeleteProjectMemberByID(pmid int) error {
|
||||||
|
o := dao.GetOrmer()
|
||||||
|
sql := "delete from project_member where id = ?"
|
||||||
|
if _, err := o.Raw(sql, pmid).Exec(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
253
src/common/dao/project/projectmember_test.go
Normal file
253
src/common/dao/project/projectmember_test.go
Normal file
@ -0,0 +1,253 @@
|
|||||||
|
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package project
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/vmware/harbor/src/common"
|
||||||
|
"github.com/vmware/harbor/src/common/dao"
|
||||||
|
"github.com/vmware/harbor/src/common/models"
|
||||||
|
"github.com/vmware/harbor/src/common/utils/log"
|
||||||
|
_ "github.com/vmware/harbor/src/ui/auth/db"
|
||||||
|
_ "github.com/vmware/harbor/src/ui/auth/ldap"
|
||||||
|
cfg "github.com/vmware/harbor/src/ui/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
|
||||||
|
//databases := []string{"mysql", "sqlite"}
|
||||||
|
databases := []string{"mysql"}
|
||||||
|
for _, database := range databases {
|
||||||
|
log.Infof("run test cases for database: %s", database)
|
||||||
|
|
||||||
|
result := 1
|
||||||
|
switch database {
|
||||||
|
case "mysql":
|
||||||
|
dao.PrepareTestForMySQL()
|
||||||
|
case "sqlite":
|
||||||
|
dao.PrepareTestForSQLite()
|
||||||
|
default:
|
||||||
|
log.Fatalf("invalid database: %s", database)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Extract to test utils
|
||||||
|
initSqls := []string{
|
||||||
|
"insert into user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')",
|
||||||
|
"insert into project (name, owner_id) values ('member_test_01', 1)",
|
||||||
|
"insert into user_group (group_name, group_type, ldap_group_dn) values ('test_group_01', 1, 'CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com')",
|
||||||
|
"update project set owner_id = (select user_id from user where username = 'member_test_01') where name = 'member_test_01'",
|
||||||
|
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from user where username = 'member_test_01'), 'u', 1)",
|
||||||
|
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_group_01'), 'g', 1)",
|
||||||
|
}
|
||||||
|
|
||||||
|
clearSqls := []string{
|
||||||
|
"delete from project where name='member_test_01'",
|
||||||
|
"delete from user where username='member_test_01' or username='pm_sample'",
|
||||||
|
"delete from user_group",
|
||||||
|
"delete from project_member",
|
||||||
|
}
|
||||||
|
dao.PrepareTestData(clearSqls, initSqls)
|
||||||
|
cfg.Init()
|
||||||
|
result = m.Run()
|
||||||
|
|
||||||
|
if result != 0 {
|
||||||
|
os.Exit(result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeleteProjectMemberByID(t *testing.T) {
|
||||||
|
currentProject, err := dao.GetProjectByName("member_test_01")
|
||||||
|
|
||||||
|
if currentProject == nil || err != nil {
|
||||||
|
fmt.Println("Failed to load project!")
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Load project %+v", currentProject)
|
||||||
|
}
|
||||||
|
var addMember = models.Member{
|
||||||
|
ProjectID: currentProject.ProjectID,
|
||||||
|
EntityID: 1,
|
||||||
|
EntityType: common.UserMember,
|
||||||
|
Role: models.DEVELOPER,
|
||||||
|
}
|
||||||
|
|
||||||
|
pmid, err := AddProjectMember(addMember)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to add project member error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
type args struct {
|
||||||
|
pmid int
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{"Delete created", args{pmid}, false},
|
||||||
|
{"Delete non exist", args{-13}, false},
|
||||||
|
{"Delete non exist", args{13}, false},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
if err := DeleteProjectMemberByID(tt.args.pmid); (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("DeleteProjectMemberByID() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
func TestAddProjectMember(t *testing.T) {
|
||||||
|
|
||||||
|
currentProject, err := dao.GetProjectByName("member_test_01")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error occurred when GetProjectByName: %v", err)
|
||||||
|
}
|
||||||
|
member := models.Member{
|
||||||
|
ProjectID: currentProject.ProjectID,
|
||||||
|
EntityID: 1,
|
||||||
|
EntityType: common.UserMember,
|
||||||
|
Role: models.PROJECTADMIN,
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("Current project id %v", currentProject.ProjectID)
|
||||||
|
pmid, err := AddProjectMember(member)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error occurred in AddProjectMember: %v", err)
|
||||||
|
}
|
||||||
|
if pmid == 0 {
|
||||||
|
t.Errorf("Error add project member, pmid=0")
|
||||||
|
}
|
||||||
|
|
||||||
|
queryMember := models.Member{
|
||||||
|
ProjectID: currentProject.ProjectID,
|
||||||
|
ID: pmid,
|
||||||
|
}
|
||||||
|
|
||||||
|
memberList, err := GetProjectMember(queryMember)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to query project member, %v, error: %v", queryMember, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(memberList) == 0 {
|
||||||
|
t.Errorf("Failed to query project member, %v", queryMember)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func TestUpdateProjectMemberRole(t *testing.T) {
|
||||||
|
currentProject, err := dao.GetProjectByName("member_test_01")
|
||||||
|
user := models.User{
|
||||||
|
Username: "pm_sample",
|
||||||
|
Email: "pm_sample@example.com",
|
||||||
|
Realname: "pm_sample",
|
||||||
|
Password: "1234567d",
|
||||||
|
}
|
||||||
|
o := dao.GetOrmer()
|
||||||
|
userID, err := o.Insert(&user)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error occurred when add user: %v", err)
|
||||||
|
}
|
||||||
|
member := models.Member{
|
||||||
|
ProjectID: currentProject.ProjectID,
|
||||||
|
EntityID: int(userID),
|
||||||
|
EntityType: common.UserMember,
|
||||||
|
Role: models.PROJECTADMIN,
|
||||||
|
}
|
||||||
|
|
||||||
|
pmid, err := AddProjectMember(member)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error occurred in UpdateProjectMember: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
UpdateProjectMemberRole(pmid, models.DEVELOPER)
|
||||||
|
|
||||||
|
queryMember := models.Member{
|
||||||
|
ProjectID: currentProject.ProjectID,
|
||||||
|
EntityID: int(userID),
|
||||||
|
EntityType: common.UserMember,
|
||||||
|
}
|
||||||
|
|
||||||
|
memberList, err := GetProjectMember(queryMember)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error occurred in GetProjectMember: %v", err)
|
||||||
|
}
|
||||||
|
if len(memberList) != 1 {
|
||||||
|
t.Errorf("Error occurred in Failed, size: %d, condition:%+v", len(memberList), queryMember)
|
||||||
|
}
|
||||||
|
memberItem := memberList[0]
|
||||||
|
if memberItem.Role != models.DEVELOPER || memberItem.Entityname != user.Username {
|
||||||
|
t.Errorf("member doesn't match!")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetProjectMember(t *testing.T) {
|
||||||
|
currentProject, err := dao.GetProjectByName("member_test_01")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error occurred when GetProjectByName: %v", err)
|
||||||
|
}
|
||||||
|
var memberList1 = []*models.Member{
|
||||||
|
&models.Member{
|
||||||
|
ID: 346,
|
||||||
|
Entityname: "admin",
|
||||||
|
Rolename: "projectAdmin",
|
||||||
|
Role: 1,
|
||||||
|
EntityID: 1,
|
||||||
|
EntityType: "u"},
|
||||||
|
}
|
||||||
|
var memberList2 = []*models.Member{
|
||||||
|
&models.Member{
|
||||||
|
ID: 398,
|
||||||
|
Entityname: "test_group_01",
|
||||||
|
Rolename: "projectAdmin",
|
||||||
|
Role: 1,
|
||||||
|
EntityType: "g"},
|
||||||
|
}
|
||||||
|
type args struct {
|
||||||
|
queryMember models.Member
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
want []*models.Member
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{"Query default project member", args{models.Member{ProjectID: currentProject.ProjectID, Entityname: "admin"}}, memberList1, false},
|
||||||
|
{"Query default project member group", args{models.Member{ProjectID: currentProject.ProjectID, Entityname: "test_group_01"}}, memberList2, false},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := GetProjectMember(tt.args.queryMember)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("GetProjectMember() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(got) != 1 {
|
||||||
|
t.Errorf("Error occurred when query project member")
|
||||||
|
}
|
||||||
|
itemGot := got[0]
|
||||||
|
itemWant := tt.want[0]
|
||||||
|
|
||||||
|
if itemGot.Entityname != itemWant.Entityname || itemGot.Role != itemWant.Role || itemGot.EntityType != itemWant.EntityType {
|
||||||
|
t.Errorf("test failed, got:%+v, want:%+v", itemGot, itemWant)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -391,6 +391,20 @@ func UpdateRepJobStatus(id int64, status string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetRepJobUUID ...
|
||||||
|
func SetRepJobUUID(id int64, uuid string) error {
|
||||||
|
o := GetOrmer()
|
||||||
|
j := models.RepJob{
|
||||||
|
ID: id,
|
||||||
|
UUID: uuid,
|
||||||
|
}
|
||||||
|
n, err := o.Update(&j, "UUID")
|
||||||
|
if n == 0 {
|
||||||
|
log.Warningf("no records are updated when updating replication job %d", id)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// ResetRunningJobs update all running jobs status to pending, including replication jobs and scan jobs.
|
// ResetRunningJobs update all running jobs status to pending, including replication jobs and scan jobs.
|
||||||
func ResetRunningJobs() error {
|
func ResetRunningJobs() error {
|
||||||
o := GetOrmer()
|
o := GetOrmer()
|
||||||
|
@ -84,6 +84,21 @@ func UpdateScanJobStatus(id int64, status string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetScanJobUUID set UUID to the record so it associates with the job in job service.
|
||||||
|
func SetScanJobUUID(id int64, uuid string) error {
|
||||||
|
o := GetOrmer()
|
||||||
|
sj := models.ScanJob{
|
||||||
|
ID: id,
|
||||||
|
UUID: uuid,
|
||||||
|
}
|
||||||
|
n, err := o.Update(&sj, "UUID")
|
||||||
|
if n == 0 {
|
||||||
|
log.Warningf("no records are updated when updating scan job %d", id)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func scanJobQs(limit ...int) orm.QuerySeter {
|
func scanJobQs(limit ...int) orm.QuerySeter {
|
||||||
o := GetOrmer()
|
o := GetOrmer()
|
||||||
l := -1
|
l := -1
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
package dao
|
package dao
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
@ -103,3 +104,23 @@ func initDatabaseForTest(db *models.Database) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PrepareTestData -- Clean and Create data
|
||||||
|
func PrepareTestData(clearSqls []string, initSqls []string) {
|
||||||
|
o := GetOrmer()
|
||||||
|
|
||||||
|
for _, sql := range clearSqls {
|
||||||
|
fmt.Printf("Exec sql:%v\n", sql)
|
||||||
|
_, err := o.Raw(sql).Exec()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("failed to clear database, sql:%v, error: %v", sql, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, sql := range initSqls {
|
||||||
|
_, err := o.Raw(sql).Exec()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("failed to init database, sql:%v, error: %v", sql, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
105
src/common/job/client.go
Normal file
105
src/common/job/client.go
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
package job
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
commonhttp "github.com/vmware/harbor/src/common/http"
|
||||||
|
"github.com/vmware/harbor/src/common/http/modifier/auth"
|
||||||
|
"github.com/vmware/harbor/src/common/job/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Client wraps interface to access jobservice.
|
||||||
|
type Client interface {
|
||||||
|
SubmitJob(*models.JobData) (string, error)
|
||||||
|
GetJobLog(uuid string) ([]byte, error)
|
||||||
|
//TODO actions or stop?
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultClient is the default implementation of Client interface
|
||||||
|
type DefaultClient struct {
|
||||||
|
endpoint string
|
||||||
|
client *commonhttp.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDefaultClient creates a default client based on endpoint and secret.
|
||||||
|
func NewDefaultClient(endpoint, secret string) *DefaultClient {
|
||||||
|
var c *commonhttp.Client
|
||||||
|
if len(secret) > 0 {
|
||||||
|
c = commonhttp.NewClient(nil, auth.NewSecretAuthorizer(secret))
|
||||||
|
} else {
|
||||||
|
c = commonhttp.NewClient(nil)
|
||||||
|
}
|
||||||
|
e := strings.TrimRight(endpoint, "/")
|
||||||
|
return &DefaultClient{
|
||||||
|
endpoint: e,
|
||||||
|
client: c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//SubmitJob call jobserivce API to submit a job and returns the job's UUID.
|
||||||
|
func (d *DefaultClient) SubmitJob(jd *models.JobData) (string, error) {
|
||||||
|
url := d.endpoint + "/api/v1/jobs"
|
||||||
|
jq := models.JobRequest{
|
||||||
|
Job: jd,
|
||||||
|
}
|
||||||
|
b, err := json.Marshal(jq)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(b))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
resp, err := d.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
data, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if resp.StatusCode != http.StatusAccepted {
|
||||||
|
return "", &commonhttp.Error{
|
||||||
|
Code: resp.StatusCode,
|
||||||
|
Message: string(data),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stats := &models.JobStats{}
|
||||||
|
if err := json.Unmarshal(data, stats); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return stats.Stats.JobID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//GetJobLog call jobserivce API to get the log of a job. It only accepts the UUID of the job
|
||||||
|
func (d *DefaultClient) GetJobLog(uuid string) ([]byte, error) {
|
||||||
|
url := d.endpoint + "/api/v1/jobs/" + uuid + "/log"
|
||||||
|
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp, err := d.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
data, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, &commonhttp.Error{
|
||||||
|
Code: resp.StatusCode,
|
||||||
|
Message: string(data),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO: builder, default client, etc.
|
@ -3,6 +3,8 @@ package job
|
|||||||
const (
|
const (
|
||||||
//ImageScanJob is name of scan job it will be used as key to register to job service.
|
//ImageScanJob is name of scan job it will be used as key to register to job service.
|
||||||
ImageScanJob = "IMAGE_SCAN"
|
ImageScanJob = "IMAGE_SCAN"
|
||||||
|
// GenericKind marks the job as a generic job, it will be contained in job metadata and passed to job service.
|
||||||
|
GenericKind = "Generic"
|
||||||
// ImageTransfer : the name of image transfer job in job service
|
// ImageTransfer : the name of image transfer job in job service
|
||||||
ImageTransfer = "IMAGE_TRANSFER"
|
ImageTransfer = "IMAGE_TRANSFER"
|
||||||
// ImageDelete : the name of image delete job in job service
|
// ImageDelete : the name of image delete job in job service
|
||||||
|
83
src/common/job/models/models.go
Normal file
83
src/common/job/models/models.go
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
// Copyright 2018 The Harbor Authors. All rights reserved.
|
||||||
|
|
||||||
|
package models
|
||||||
|
|
||||||
|
//Parameters for job execution.
|
||||||
|
type Parameters map[string]interface{}
|
||||||
|
|
||||||
|
//JobRequest is the request of launching a job.
|
||||||
|
type JobRequest struct {
|
||||||
|
Job *JobData `json:"job"`
|
||||||
|
}
|
||||||
|
|
||||||
|
//JobData keeps the basic info.
|
||||||
|
type JobData struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Parameters Parameters `json:"parameters"`
|
||||||
|
Metadata *JobMetadata `json:"metadata"`
|
||||||
|
StatusHook string `json:"status_hook"`
|
||||||
|
}
|
||||||
|
|
||||||
|
//JobMetadata stores the metadata of job.
|
||||||
|
type JobMetadata struct {
|
||||||
|
JobKind string `json:"kind"`
|
||||||
|
ScheduleDelay uint64 `json:"schedule_delay,omitempty"`
|
||||||
|
Cron string `json:"cron_spec,omitempty"`
|
||||||
|
IsUnique bool `json:"unique"`
|
||||||
|
}
|
||||||
|
|
||||||
|
//JobStats keeps the result of job launching.
|
||||||
|
type JobStats struct {
|
||||||
|
Stats *JobStatData `json:"job"`
|
||||||
|
}
|
||||||
|
|
||||||
|
//JobStatData keeps the stats of job
|
||||||
|
type JobStatData struct {
|
||||||
|
JobID string `json:"id"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
JobName string `json:"name"`
|
||||||
|
JobKind string `json:"kind"`
|
||||||
|
IsUnique bool `json:"unique"`
|
||||||
|
RefLink string `json:"ref_link,omitempty"`
|
||||||
|
CronSpec string `json:"cron_spec,omitempty"`
|
||||||
|
EnqueueTime int64 `json:"enqueue_time"`
|
||||||
|
UpdateTime int64 `json:"update_time"`
|
||||||
|
RunAt int64 `json:"run_at,omitempty"`
|
||||||
|
CheckIn string `json:"check_in,omitempty"`
|
||||||
|
CheckInAt int64 `json:"check_in_at,omitempty"`
|
||||||
|
DieAt int64 `json:"die_at,omitempty"`
|
||||||
|
HookStatus string `json:"hook_status,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
//JobPoolStats represents the healthy and status of all the running worker pools.
|
||||||
|
type JobPoolStats struct {
|
||||||
|
Pools []*JobPoolStatsData `json:"worker_pools"`
|
||||||
|
}
|
||||||
|
|
||||||
|
//JobPoolStatsData represent the healthy and status of the worker pool.
|
||||||
|
type JobPoolStatsData struct {
|
||||||
|
WorkerPoolID string `json:"worker_pool_id"`
|
||||||
|
StartedAt int64 `json:"started_at"`
|
||||||
|
HeartbeatAt int64 `json:"heartbeat_at"`
|
||||||
|
JobNames []string `json:"job_names"`
|
||||||
|
Concurrency uint `json:"concurrency"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
//JobActionRequest defines for triggering job action like stop/cancel.
|
||||||
|
type JobActionRequest struct {
|
||||||
|
Action string `json:"action"`
|
||||||
|
}
|
||||||
|
|
||||||
|
//JobStatusChange is designed for reporting the status change via hook.
|
||||||
|
type JobStatusChange struct {
|
||||||
|
JobID string `json:"job_id"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
CheckIn string `json:"check_in,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
//Message is designed for sub/pub messages
|
||||||
|
type Message struct {
|
||||||
|
Event string
|
||||||
|
Data interface{} //generic format
|
||||||
|
}
|
@ -5,6 +5,7 @@ type ScanJobParms struct {
|
|||||||
JobID int64 `json:"job_int_id"`
|
JobID int64 `json:"job_int_id"`
|
||||||
Repository string `json:"repository"`
|
Repository string `json:"repository"`
|
||||||
Tag string `json:"tag"`
|
Tag string `json:"tag"`
|
||||||
|
Digest string `json:"digest"`
|
||||||
Secret string `json:"job_service_secret"`
|
Secret string `json:"job_service_secret"`
|
||||||
RegistryURL string `json:"registry_url"`
|
RegistryURL string `json:"registry_url"`
|
||||||
ClairEndpoint string `json:"clair_endpoint"`
|
ClairEndpoint string `json:"clair_endpoint"`
|
||||||
|
@ -34,5 +34,6 @@ func init() {
|
|||||||
new(ProjectMetadata),
|
new(ProjectMetadata),
|
||||||
new(ConfigEntry),
|
new(ConfigEntry),
|
||||||
new(Label),
|
new(Label),
|
||||||
new(ResourceLabel))
|
new(ResourceLabel),
|
||||||
|
new(UserGroup))
|
||||||
}
|
}
|
||||||
|
@ -32,3 +32,11 @@ type UserMember struct {
|
|||||||
Rolename string `json:"role_name"`
|
Rolename string `json:"role_name"`
|
||||||
Role int `json:"role_id"`
|
Role int `json:"role_id"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MemberReq - Create Project Member Request
|
||||||
|
type MemberReq struct {
|
||||||
|
ProjectID int64 `json:"project_id"`
|
||||||
|
Role int `json:"role_id,omitempty"`
|
||||||
|
MemberUser User `json:"member_user,omitempty"`
|
||||||
|
MemberGroup UserGroup `json:"member_group,omitempty"`
|
||||||
|
}
|
||||||
|
@ -61,6 +61,7 @@ type RepJob struct {
|
|||||||
Operation string `orm:"column(operation)" json:"operation"`
|
Operation string `orm:"column(operation)" json:"operation"`
|
||||||
Tags string `orm:"column(tags)" json:"-"`
|
Tags string `orm:"column(tags)" json:"-"`
|
||||||
TagList []string `orm:"-" json:"tags"`
|
TagList []string `orm:"-" json:"tags"`
|
||||||
|
UUID string `orm:"column(job_uuid)" json:"-"`
|
||||||
// Policy RepPolicy `orm:"-" json:"policy"`
|
// Policy RepPolicy `orm:"-" json:"policy"`
|
||||||
CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"`
|
CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"`
|
||||||
UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"`
|
UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"`
|
||||||
|
@ -29,6 +29,7 @@ type ScanJob struct {
|
|||||||
Repository string `orm:"column(repository)" json:"repository"`
|
Repository string `orm:"column(repository)" json:"repository"`
|
||||||
Tag string `orm:"column(tag)" json:"tag"`
|
Tag string `orm:"column(tag)" json:"tag"`
|
||||||
Digest string `orm:"column(digest)" json:"digest"`
|
Digest string `orm:"column(digest)" json:"digest"`
|
||||||
|
UUID string `orm:"column(job_uuid)" json:"-"`
|
||||||
CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"`
|
CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"`
|
||||||
UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"`
|
UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"`
|
||||||
}
|
}
|
||||||
|
@ -11,4 +11,21 @@
|
|||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
package main
|
|
||||||
|
package models
|
||||||
|
|
||||||
|
// UserGroupTable is the name of table in DB that holds the user object
|
||||||
|
const UserGroupTable = "user_group"
|
||||||
|
|
||||||
|
// UserGroup ...
|
||||||
|
type UserGroup struct {
|
||||||
|
ID int `orm:"pk;auto;column(id)" json:"id,omitempty"`
|
||||||
|
GroupName string `orm:"column(group_name)" json:"group_name,omitempty"`
|
||||||
|
GroupType int `orm:"column(group_type)" json:"group_type,omitempty"`
|
||||||
|
LdapGroupDN string `orm:"column(ldap_group_dn)" json:"ldap_group_dn,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// TableName ...
|
||||||
|
func (u *UserGroup) TableName() string {
|
||||||
|
return UserGroupTable
|
||||||
|
}
|
@ -15,11 +15,9 @@
|
|||||||
package clair
|
package clair
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/vmware/harbor/src/common"
|
|
||||||
"github.com/vmware/harbor/src/common/dao"
|
"github.com/vmware/harbor/src/common/dao"
|
||||||
"github.com/vmware/harbor/src/common/models"
|
"github.com/vmware/harbor/src/common/models"
|
||||||
"github.com/vmware/harbor/src/common/utils/log"
|
"github.com/vmware/harbor/src/common/utils/log"
|
||||||
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
@ -44,7 +42,7 @@ func ParseClairSev(clairSev string) models.Severity {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UpdateScanOverview qeuries the vulnerability based on the layerName and update the record in img_scan_overview table based on digest.
|
// UpdateScanOverview qeuries the vulnerability based on the layerName and update the record in img_scan_overview table based on digest.
|
||||||
func UpdateScanOverview(digest, layerName string, l ...*log.Logger) error {
|
func UpdateScanOverview(digest, layerName string, clairEndpoint string, l ...*log.Logger) error {
|
||||||
var logger *log.Logger
|
var logger *log.Logger
|
||||||
if len(l) > 1 {
|
if len(l) > 1 {
|
||||||
return fmt.Errorf("More than one logger specified")
|
return fmt.Errorf("More than one logger specified")
|
||||||
@ -53,7 +51,7 @@ func UpdateScanOverview(digest, layerName string, l ...*log.Logger) error {
|
|||||||
} else {
|
} else {
|
||||||
logger = log.DefaultLogger()
|
logger = log.DefaultLogger()
|
||||||
}
|
}
|
||||||
client := NewClient(common.DefaultClairEndpoint, logger)
|
client := NewClient(clairEndpoint, logger)
|
||||||
res, err := client.GetResult(layerName)
|
res, err := client.GetResult(layerName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Failed to get result from Clair, error: %v", err)
|
logger.Errorf("Failed to get result from Clair, error: %v", err)
|
||||||
|
@ -76,6 +76,7 @@ var adminServerDefaultConfig = map[string]interface{}{
|
|||||||
common.UIURL: "http://myui:8888/",
|
common.UIURL: "http://myui:8888/",
|
||||||
common.JobServiceURL: "http://myjob:8888/",
|
common.JobServiceURL: "http://myjob:8888/",
|
||||||
common.ReadOnly: false,
|
common.ReadOnly: false,
|
||||||
|
common.NotaryURL: "http://notary-server:4443",
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAdminserver returns a mock admin server
|
// NewAdminserver returns a mock admin server
|
||||||
|
@ -46,7 +46,7 @@ func Init() error {
|
|||||||
|
|
||||||
adminServerURL := os.Getenv("ADMINSERVER_URL")
|
adminServerURL := os.Getenv("ADMINSERVER_URL")
|
||||||
if len(adminServerURL) == 0 {
|
if len(adminServerURL) == 0 {
|
||||||
adminServerURL = "http://adminserver"
|
adminServerURL = common.DefaultAdminserverEndpoint
|
||||||
}
|
}
|
||||||
log.Infof("initializing client for adminserver %s ...", adminServerURL)
|
log.Infof("initializing client for adminserver %s ...", adminServerURL)
|
||||||
cfg := &client.Config{
|
cfg := &client.Config{
|
||||||
@ -112,7 +112,7 @@ func LocalUIURL() string {
|
|||||||
cfg, err := mg.Get()
|
cfg, err := mg.Get()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warningf("Failed to Get job service UI URL from backend, error: %v, will return default value.")
|
log.Warningf("Failed to Get job service UI URL from backend, error: %v, will return default value.")
|
||||||
return "http://ui"
|
return common.DefaultUIEndpoint
|
||||||
}
|
}
|
||||||
return strings.TrimSuffix(cfg[common.UIURL].(string), "/")
|
return strings.TrimSuffix(cfg[common.UIURL].(string), "/")
|
||||||
|
|
||||||
@ -169,5 +169,12 @@ func InternalTokenServiceEndpoint() string {
|
|||||||
|
|
||||||
// ClairEndpoint returns the end point of clair instance, by default it's the one deployed within Harbor.
|
// ClairEndpoint returns the end point of clair instance, by default it's the one deployed within Harbor.
|
||||||
func ClairEndpoint() string {
|
func ClairEndpoint() string {
|
||||||
return common.DefaultClairEndpoint
|
cfg, err :=mg.Get()
|
||||||
}
|
if err != nil {
|
||||||
|
return common.DefaultClairEndpoint
|
||||||
|
}
|
||||||
|
if cfg[common.ClairURL] == nil {
|
||||||
|
return common.DefaultClairEndpoint
|
||||||
|
}
|
||||||
|
return cfg[common.ClairURL].(string)
|
||||||
|
}
|
@ -134,7 +134,8 @@ func (sh *SummarizeHandler) Enter() (string, error) {
|
|||||||
logger.Infof("Entered summarize handler")
|
logger.Infof("Entered summarize handler")
|
||||||
layerName := sh.Context.layers[len(sh.Context.layers)-1].Name
|
layerName := sh.Context.layers[len(sh.Context.layers)-1].Name
|
||||||
logger.Infof("Top layer's name: %s, will use it to get the vulnerability result of image", layerName)
|
logger.Infof("Top layer's name: %s, will use it to get the vulnerability result of image", layerName)
|
||||||
if err := clair.UpdateScanOverview(sh.Context.Digest, layerName); err != nil {
|
clairURL := config.ClairEndpoint()
|
||||||
|
if err := clair.UpdateScanOverview(sh.Context.Digest, layerName, clairURL); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
return models.JobFinished, nil
|
return models.JobFinished, nil
|
||||||
|
@ -28,7 +28,7 @@ const (
|
|||||||
jobServiceLoggerBasePath = "JOB_SERVICE_LOGGER_BASE_PATH"
|
jobServiceLoggerBasePath = "JOB_SERVICE_LOGGER_BASE_PATH"
|
||||||
jobServiceLoggerLevel = "JOB_SERVICE_LOGGER_LEVEL"
|
jobServiceLoggerLevel = "JOB_SERVICE_LOGGER_LEVEL"
|
||||||
jobServiceLoggerArchivePeriod = "JOB_SERVICE_LOGGER_ARCHIVE_PERIOD"
|
jobServiceLoggerArchivePeriod = "JOB_SERVICE_LOGGER_ARCHIVE_PERIOD"
|
||||||
jobServiceAdminServerEndpoint = "JOB_SERVICE_ADMIN_SERVER_ENDPOINT"
|
jobServiceAdminServerEndpoint = "ADMINSERVER_URL"
|
||||||
jobServiceAuthSecret = "JOBSERVICE_SECRET"
|
jobServiceAuthSecret = "JOBSERVICE_SECRET"
|
||||||
|
|
||||||
//JobServiceProtocolHTTPS points to the 'https' protocol
|
//JobServiceProtocolHTTPS points to the 'https' protocol
|
||||||
|
@ -64,7 +64,7 @@ func (cj *ClairJob) Run(ctx env.JobContext, params map[string]interface{}) error
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
imgDigest, _, payload, err := repoClient.PullManifest(jobParms.Tag, []string{schema2.MediaTypeManifest})
|
_, _, payload, err := repoClient.PullManifest(jobParms.Tag, []string{schema2.MediaTypeManifest})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Error pulling manifest for image %s:%s :%v", jobParms.Repository, jobParms.Tag, err)
|
logger.Errorf("Error pulling manifest for image %s:%s :%v", jobParms.Repository, jobParms.Tag, err)
|
||||||
return err
|
return err
|
||||||
@ -96,7 +96,7 @@ func (cj *ClairJob) Run(ctx env.JobContext, params map[string]interface{}) error
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
compOverview, sev := clair.TransformVuln(res)
|
compOverview, sev := clair.TransformVuln(res)
|
||||||
err = dao.UpdateImgScanOverview(imgDigest, layerName, sev, compOverview)
|
err = dao.UpdateImgScanOverview(jobParms.Digest, layerName, sev, compOverview)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ func Init() error {
|
|||||||
initKeyProvider()
|
initKeyProvider()
|
||||||
adminServerURL := os.Getenv("ADMINSERVER_URL")
|
adminServerURL := os.Getenv("ADMINSERVER_URL")
|
||||||
if len(adminServerURL) == 0 {
|
if len(adminServerURL) == 0 {
|
||||||
adminServerURL = "http://adminserver"
|
adminServerURL = common.DefaultAdminserverEndpoint
|
||||||
}
|
}
|
||||||
|
|
||||||
return InitByURL(adminServerURL)
|
return InitByURL(adminServerURL)
|
||||||
@ -295,19 +295,18 @@ func InternalJobServiceURL() string {
|
|||||||
cfg, err := mg.Get()
|
cfg, err := mg.Get()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warningf("Failed to Get job service URL from backend, error: %v, will return default value.")
|
log.Warningf("Failed to Get job service URL from backend, error: %v, will return default value.")
|
||||||
|
return common.DefaultJobserviceEndpoint
|
||||||
return "http://jobservice"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg[common.JobServiceURL] == nil {
|
if cfg[common.JobServiceURL] == nil {
|
||||||
return "http://jobservice"
|
return common.DefaultJobserviceEndpoint
|
||||||
}
|
}
|
||||||
return strings.TrimSuffix(cfg[common.JobServiceURL].(string), "/")
|
return strings.TrimSuffix(cfg[common.JobServiceURL].(string), "/")
|
||||||
}
|
}
|
||||||
|
|
||||||
// InternalTokenServiceEndpoint returns token service endpoint for internal communication between Harbor containers
|
// InternalTokenServiceEndpoint returns token service endpoint for internal communication between Harbor containers
|
||||||
func InternalTokenServiceEndpoint() string {
|
func InternalTokenServiceEndpoint() string {
|
||||||
uiURL := "http://ui"
|
uiURL := common.DefaultUIEndpoint
|
||||||
cfg, err := mg.Get()
|
cfg, err := mg.Get()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warningf("Failed to Get job service UI URL from backend, error: %v, will use default value.")
|
log.Warningf("Failed to Get job service UI URL from backend, error: %v, will use default value.")
|
||||||
@ -321,7 +320,15 @@ func InternalTokenServiceEndpoint() string {
|
|||||||
// InternalNotaryEndpoint returns notary server endpoint for internal communication between Harbor containers
|
// InternalNotaryEndpoint returns notary server endpoint for internal communication between Harbor containers
|
||||||
// This is currently a conventional value and can be unaccessible when Harbor is not deployed with Notary.
|
// This is currently a conventional value and can be unaccessible when Harbor is not deployed with Notary.
|
||||||
func InternalNotaryEndpoint() string {
|
func InternalNotaryEndpoint() string {
|
||||||
return "http://notary-server:4443"
|
cfg, err := mg.Get()
|
||||||
|
if err != nil {
|
||||||
|
log.Warningf("Failed to get Notary endpoint from backend, error: %v, will use default value.")
|
||||||
|
return common.DefaultNotaryEndpoint
|
||||||
|
}
|
||||||
|
if cfg[common.NotaryURL] == nil {
|
||||||
|
return common.DefaultNotaryEndpoint
|
||||||
|
}
|
||||||
|
return cfg[common.NotaryURL].(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
// InitialAdminPassword returns the initial password for administrator
|
// InitialAdminPassword returns the initial password for administrator
|
||||||
@ -401,7 +408,7 @@ func JobserviceSecret() string {
|
|||||||
func WithNotary() bool {
|
func WithNotary() bool {
|
||||||
cfg, err := mg.Get()
|
cfg, err := mg.Get()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Failed to get configuration, will return WithNotary == false")
|
log.Warningf("Failed to get configuration, will return WithNotary == false")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return cfg[common.WithNotary].(bool)
|
return cfg[common.WithNotary].(bool)
|
||||||
@ -419,7 +426,12 @@ func WithClair() bool {
|
|||||||
|
|
||||||
// ClairEndpoint returns the end point of clair instance, by default it's the one deployed within Harbor.
|
// ClairEndpoint returns the end point of clair instance, by default it's the one deployed within Harbor.
|
||||||
func ClairEndpoint() string {
|
func ClairEndpoint() string {
|
||||||
return common.DefaultClairEndpoint
|
cfg, err := mg.Get()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Failed to get configuration, use default clair endpoint")
|
||||||
|
return common.DefaultClairEndpoint
|
||||||
|
}
|
||||||
|
return cfg[common.ClairURL].(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClairDB return Clair db info
|
// ClairDB return Clair db info
|
||||||
|
@ -35,7 +35,7 @@ const (
|
|||||||
var rec *httptest.ResponseRecorder
|
var rec *httptest.ResponseRecorder
|
||||||
|
|
||||||
// NotaryEndpoint , exported for testing.
|
// NotaryEndpoint , exported for testing.
|
||||||
var NotaryEndpoint = config.InternalNotaryEndpoint()
|
var NotaryEndpoint =""
|
||||||
|
|
||||||
// MatchPullManifest checks if the request looks like a request to pull manifest. If it is returns the image and tag/sha256 digest as 2nd and 3rd return values
|
// MatchPullManifest checks if the request looks like a request to pull manifest. If it is returns the image and tag/sha256 digest as 2nd and 3rd return values
|
||||||
func MatchPullManifest(req *http.Request) (bool, string, string) {
|
func MatchPullManifest(req *http.Request) (bool, string, string) {
|
||||||
@ -294,6 +294,9 @@ func (vh vulnerableHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func matchNotaryDigest(img imageInfo) (bool, error) {
|
func matchNotaryDigest(img imageInfo) (bool, error) {
|
||||||
|
if NotaryEndpoint == "" {
|
||||||
|
NotaryEndpoint = config.InternalNotaryEndpoint()
|
||||||
|
}
|
||||||
targets, err := notary.GetInternalTargets(NotaryEndpoint, tokenUsername, img.repository)
|
targets, err := notary.GetInternalTargets(NotaryEndpoint, tokenUsername, img.repository)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
|
@ -32,7 +32,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
clairClient = clair.NewClient(config.ClairEndpoint(), nil)
|
clairClient *clair.Client
|
||||||
)
|
)
|
||||||
|
|
||||||
// Handler handles reqeust on /service/notifications/clair/, which listens to clair's notifications.
|
// Handler handles reqeust on /service/notifications/clair/, which listens to clair's notifications.
|
||||||
@ -43,7 +43,10 @@ type Handler struct {
|
|||||||
|
|
||||||
// Handle ...
|
// Handle ...
|
||||||
func (h *Handler) Handle() {
|
func (h *Handler) Handle() {
|
||||||
var ne models.ClairNotificationEnvelope
|
if clairClient == nil {
|
||||||
|
clairClient = clair.NewClient(config.ClairEndpoint(), nil)
|
||||||
|
}
|
||||||
|
var ne models.ClairNotificationEnvelope
|
||||||
if err := json.Unmarshal(h.Ctx.Input.CopyBody(1<<32), &ne); err != nil {
|
if err := json.Unmarshal(h.Ctx.Input.CopyBody(1<<32), &ne); err != nil {
|
||||||
log.Errorf("Failed to decode the request: %v", err)
|
log.Errorf("Failed to decode the request: %v", err)
|
||||||
return
|
return
|
||||||
@ -85,7 +88,7 @@ func (h *Handler) Handle() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, e := range l {
|
for _, e := range l {
|
||||||
if err := clair.UpdateScanOverview(e.Digest, e.DetailsKey); err != nil {
|
if err := clair.UpdateScanOverview(e.Digest, e.DetailsKey,config.ClairEndpoint()); err != nil {
|
||||||
log.Errorf("Failed to refresh scan overview for image: %s", e.Digest)
|
log.Errorf("Failed to refresh scan overview for image: %s", e.Digest)
|
||||||
} else {
|
} else {
|
||||||
log.Debugf("Refreshed scan overview for record with digest: %s", e.Digest)
|
log.Debugf("Refreshed scan overview for record with digest: %s", e.Digest)
|
||||||
|
179
src/ui/utils/job.go
Normal file
179
src/ui/utils/job.go
Normal file
@ -0,0 +1,179 @@
|
|||||||
|
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package utils contains methods to support security, cache, and webhook functions.
|
||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/vmware/harbor/src/common/dao"
|
||||||
|
"github.com/vmware/harbor/src/common/job"
|
||||||
|
jobmodels "github.com/vmware/harbor/src/common/job/models"
|
||||||
|
"github.com/vmware/harbor/src/common/models"
|
||||||
|
"github.com/vmware/harbor/src/common/utils/log"
|
||||||
|
"github.com/vmware/harbor/src/common/utils/registry"
|
||||||
|
"github.com/vmware/harbor/src/ui/config"
|
||||||
|
|
||||||
|
"encoding/json"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
cl sync.Mutex
|
||||||
|
jobServiceClient job.Client
|
||||||
|
)
|
||||||
|
|
||||||
|
// ScanAllImages scans all images of Harbor by submiting jobs to jobservice, the whole process will move on if failed to submit any job of a single image.
|
||||||
|
func ScanAllImages() error {
|
||||||
|
repos, err := dao.GetRepositories()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Failed to list all repositories, error: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Infof("Scanning all images on Harbor.")
|
||||||
|
|
||||||
|
go scanRepos(repos)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScanImagesByProjectID scans all images under a projet, the whole process will move on if failed to submit any job of a single image.
|
||||||
|
func ScanImagesByProjectID(id int64) error {
|
||||||
|
repos, err := dao.GetRepositories(&models.RepositoryQuery{
|
||||||
|
ProjectIDs: []int64{id},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Failed list repositories in project %d, error: %v", id, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Infof("Scanning all images in project: %d ", id)
|
||||||
|
go scanRepos(repos)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func scanRepos(repos []*models.RepoRecord) {
|
||||||
|
var repoClient *registry.Repository
|
||||||
|
var err error
|
||||||
|
var tags []string
|
||||||
|
for _, r := range repos {
|
||||||
|
repoClient, err = NewRepositoryClientForUI("harbor-ui", r.Name)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Failed to initialize client for repository: %s, error: %v, skip scanning", r.Name, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
tags, err = repoClient.ListTag()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Failed to get tags for repository: %s, error: %v, skip scanning.", r.Name, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, t := range tags {
|
||||||
|
if err = TriggerImageScan(r.Name, t); err != nil {
|
||||||
|
log.Errorf("Failed to scan image with repository: %s, tag: %s, error: %v.", r.Name, t, err)
|
||||||
|
} else {
|
||||||
|
log.Debugf("Triggered scan for image with repository: %s, tag: %s", r.Name, t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetJobServiceClient returns the job service client instance.
|
||||||
|
func GetJobServiceClient() job.Client {
|
||||||
|
cl.Lock()
|
||||||
|
defer cl.Unlock()
|
||||||
|
if jobServiceClient == nil {
|
||||||
|
jobServiceClient = job.NewDefaultClient(config.InternalJobServiceURL(), config.UISecret())
|
||||||
|
}
|
||||||
|
return jobServiceClient
|
||||||
|
}
|
||||||
|
|
||||||
|
// TriggerImageScan triggers an image scan job on jobservice.
|
||||||
|
func TriggerImageScan(repository string, tag string) error {
|
||||||
|
repoClient, err := NewRepositoryClientForUI("harbor-ui", repository)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
digest, _, err := repoClient.ManifestExist(tag)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Failed to get Manifest for %s:%s", repository, tag)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return triggerImageScan(repository, tag, digest, GetJobServiceClient())
|
||||||
|
}
|
||||||
|
|
||||||
|
func triggerImageScan(repository, tag, digest string, client job.Client) error {
|
||||||
|
id, err := dao.AddScanJob(models.ScanJob{
|
||||||
|
Repository: repository,
|
||||||
|
Digest: digest,
|
||||||
|
Tag: tag,
|
||||||
|
Status: models.JobPending,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = dao.SetScanJobForImg(digest, id)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
data, err := buildScanJobData(id, repository, tag, digest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
uuid, err := client.SubmitJob(data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = dao.SetScanJobUUID(id, uuid)
|
||||||
|
if err != nil {
|
||||||
|
log.Warningf("Failed to set UUID for scan job, ID: %d, repository: %s, tag: %s")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildScanJobData(jobID int64, repository, tag, digest string) (*jobmodels.JobData, error) {
|
||||||
|
regURL, err := config.RegistryURL()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// TODO:job service can get some parms from context.
|
||||||
|
parms := job.ScanJobParms{
|
||||||
|
ClairEndpoint: config.ClairEndpoint(),
|
||||||
|
JobID: jobID,
|
||||||
|
RegistryURL: regURL,
|
||||||
|
Repository: repository,
|
||||||
|
Secret: config.JobserviceSecret(),
|
||||||
|
Digest: digest,
|
||||||
|
Tag: tag,
|
||||||
|
TokenEndpoint: config.InternalTokenServiceEndpoint(),
|
||||||
|
}
|
||||||
|
parmsMap := make(map[string]interface{})
|
||||||
|
b, err := json.Marshal(parms)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = json.Unmarshal(b, &parmsMap)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
meta := jobmodels.JobMetadata{
|
||||||
|
JobKind: job.GenericKind,
|
||||||
|
IsUnique: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
data := &jobmodels.JobData{
|
||||||
|
Name: job.ImageScanJob,
|
||||||
|
Parameters: jobmodels.Parameters(parmsMap),
|
||||||
|
Metadata: &meta,
|
||||||
|
StatusHook: "",
|
||||||
|
}
|
||||||
|
|
||||||
|
return data, nil
|
||||||
|
}
|
@ -16,73 +16,16 @@
|
|||||||
package utils
|
package utils
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/vmware/harbor/src/common/dao"
|
|
||||||
"github.com/vmware/harbor/src/common/models"
|
"github.com/vmware/harbor/src/common/models"
|
||||||
"github.com/vmware/harbor/src/common/utils/log"
|
|
||||||
"github.com/vmware/harbor/src/common/utils/registry"
|
"github.com/vmware/harbor/src/common/utils/registry"
|
||||||
"github.com/vmware/harbor/src/common/utils/registry/auth"
|
"github.com/vmware/harbor/src/common/utils/registry/auth"
|
||||||
"github.com/vmware/harbor/src/ui/config"
|
"github.com/vmware/harbor/src/ui/config"
|
||||||
"github.com/vmware/harbor/src/ui/service/token"
|
"github.com/vmware/harbor/src/ui/service/token"
|
||||||
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ScanAllImages scans all images of Harbor by submiting jobs to jobservice, the whole process will move on if failed to submit any job of a single image.
|
|
||||||
func ScanAllImages() error {
|
|
||||||
repos, err := dao.GetRepositories()
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("Failed to list all repositories, error: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Infof("Scanning all images on Harbor.")
|
|
||||||
|
|
||||||
go scanRepos(repos)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ScanImagesByProjectID scans all images under a projet, the whole process will move on if failed to submit any job of a single image.
|
|
||||||
func ScanImagesByProjectID(id int64) error {
|
|
||||||
repos, err := dao.GetRepositories(&models.RepositoryQuery{
|
|
||||||
ProjectIDs: []int64{id},
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("Failed list repositories in project %d, error: %v", id, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Infof("Scanning all images in project: %d ", id)
|
|
||||||
go scanRepos(repos)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func scanRepos(repos []*models.RepoRecord) {
|
|
||||||
var repoClient *registry.Repository
|
|
||||||
var err error
|
|
||||||
var tags []string
|
|
||||||
for _, r := range repos {
|
|
||||||
repoClient, err = NewRepositoryClientForUI("harbor-ui", r.Name)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("Failed to initialize client for repository: %s, error: %v, skip scanning", r.Name, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
tags, err = repoClient.ListTag()
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("Failed to get tags for repository: %s, error: %v, skip scanning.", r.Name, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, t := range tags {
|
|
||||||
if err = TriggerImageScan(r.Name, t); err != nil {
|
|
||||||
log.Errorf("Failed to scan image with repository: %s, tag: %s, error: %v.", r.Name, t, err)
|
|
||||||
} else {
|
|
||||||
log.Debugf("Triggered scan for image with repository: %s, tag: %s", r.Name, t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// RequestAsUI is a shortcut to make a request attach UI secret and send the request.
|
// RequestAsUI is a shortcut to make a request attach UI secret and send the request.
|
||||||
// Do not use this when you want to handle the response
|
// Do not use this when you want to handle the response
|
||||||
func RequestAsUI(method, url string, body io.Reader, h ResponseHandler) error {
|
func RequestAsUI(method, url string, body io.Reader, h ResponseHandler) error {
|
||||||
@ -110,20 +53,6 @@ func AddUISecret(req *http.Request) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TriggerImageScan triggers an image scan job on jobservice.
|
|
||||||
func TriggerImageScan(repository string, tag string) error {
|
|
||||||
data := &models.ImageScanReq{
|
|
||||||
Repo: repository,
|
|
||||||
Tag: tag,
|
|
||||||
}
|
|
||||||
b, err := json.Marshal(&data)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
url := fmt.Sprintf("%s/api/jobs/scan", config.InternalJobServiceURL())
|
|
||||||
return RequestAsUI("POST", url, bytes.NewBuffer(b), NewStatusRespHandler(http.StatusOK))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRepositoryClientForUI creates a repository client that can only be used to
|
// NewRepositoryClientForUI creates a repository client that can only be used to
|
||||||
// access the internal registry
|
// access the internal registry
|
||||||
func NewRepositoryClientForUI(username, repository string) (*registry.Repository, error) {
|
func NewRepositoryClientForUI(username, repository string) (*registry.Repository, error) {
|
||||||
|
@ -72,3 +72,4 @@ Changelog for harbor database schema
|
|||||||
- create table `harbor_resource_label`
|
- create table `harbor_resource_label`
|
||||||
- create table `user_group`
|
- create table `user_group`
|
||||||
- modify table `project_member` use `id` as PK and add column `entity_type` to indicate if the member is user or group.
|
- modify table `project_member` use `id` as PK and add column `entity_type` to indicate if the member is user or group.
|
||||||
|
- add `job_uuid` column to `replication_job` and `img_scan_job`
|
||||||
|
Loading…
Reference in New Issue
Block a user