Merge branch 'master' into add-loading

This commit is contained in:
Fangyuan Cheng 2019-04-28 22:23:38 +08:00 committed by GitHub
commit 12737bfa7b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
428 changed files with 19733 additions and 11438 deletions

113
src/Gopkg.lock generated
View File

@ -25,14 +25,6 @@
revision = "c7af12943936e8c39859482e61f0574c2fd7fc75"
version = "v1.4.2"
[[projects]]
digest = "1:9e9193aa51197513b3abcb108970d831fbcf40ef96aa845c4f03276e1fa316d2"
name = "github.com/Sirupsen/logrus"
packages = ["."]
pruneopts = "UT"
revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc"
version = "v1.0.5"
[[projects]]
digest = "1:e8078e5f9d84e87745efb3c0961e78045500cda10d7102fdf839fbac4b49a423"
name = "github.com/Unknwon/goconfig"
@ -141,14 +133,15 @@
version = "v3.0.0"
[[projects]]
digest = "1:5a39bab16f84dd753a3af60076a915b55584cc6df3b3dfba53bfd48bf4420e77"
digest = "1:d06c54bbda3a04ec18a2fa0577896b3c40f13409639b442379ee0a5a53be8259"
name = "github.com/docker/distribution"
packages = [
".",
"context",
"digest",
"digestset",
"health",
"manifest",
"manifest/manifestlist",
"manifest/schema1",
"manifest/schema2",
"reference",
@ -159,8 +152,8 @@
"uuid",
]
pruneopts = "UT"
revision = "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89"
version = "v2.6.2"
revision = "2461543d988979529609e8cb6fca9ca190dc48da"
version = "v2.7.1"
[[projects]]
branch = "master"
@ -178,27 +171,6 @@
pruneopts = "UT"
revision = "aabc10ec26b754e797f9028f4589c5b7bd90dc20"
[[projects]]
digest = "1:fea1a444386b05e00dfcf2cb9a95fb09e11f1689056161471229baa4a7a65459"
name = "github.com/docker/notary"
packages = [
".",
"client",
"client/changelist",
"cryptoservice",
"storage",
"trustmanager",
"trustmanager/yubikey",
"trustpinning",
"tuf",
"tuf/data",
"tuf/signed",
"tuf/utils",
"tuf/validation",
]
pruneopts = "UT"
revision = "c04e3e6d05881045def11167c51d4a8baa34899a"
[[projects]]
digest = "1:0594af97b2f4cec6554086eeace6597e20a4b69466eb4ada25adf9f4300dddd2"
name = "github.com/garyburd/redigo"
@ -354,6 +326,14 @@
revision = "0ff49de124c6f76f8494e194af75bde0f1a49a29"
version = "v1.1.6"
[[projects]]
digest = "1:31e761d97c76151dde79e9d28964a812c46efc5baee4085b86f68f0c654450de"
name = "github.com/konsorten/go-windows-terminal-sequences"
packages = ["."]
pruneopts = "UT"
revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e"
version = "v1.0.2"
[[projects]]
branch = "master"
digest = "1:bd26bbaf1e9f9dfe829a88f87a0849b56f717c31785443a67668f2c752fa8412"
@ -396,6 +376,17 @@
revision = "aa2ec055abd10d26d539eb630a92241b781ce4bc"
version = "v1.0.0-rc0"
[[projects]]
digest = "1:11db38d694c130c800d0aefb502fb02519e514dc53d9804ce51d1ad25ec27db6"
name = "github.com/opencontainers/image-spec"
packages = [
"specs-go",
"specs-go/v1",
]
pruneopts = "UT"
revision = "d60099175f88c47cd379c4738d158884749ed235"
version = "v1.0.1"
[[projects]]
digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b"
name = "github.com/pkg/errors"
@ -431,6 +422,14 @@
revision = "b024fc5ea0e34bc3f83d9941c8d60b0622bfaca4"
version = "v1"
[[projects]]
digest = "1:fd61cf4ae1953d55df708acb6b91492d538f49c305b364a014049914495db426"
name = "github.com/sirupsen/logrus"
packages = ["."]
pruneopts = "UT"
revision = "8bdbc7bcc01dcbb8ec23dc8a28e332258d25251f"
version = "v1.4.1"
[[projects]]
digest = "1:9424f440bba8f7508b69414634aef3b2b3a877e522d8a4624692412805407bb7"
name = "github.com/spf13/pflag"
@ -440,16 +439,47 @@
version = "v1.0.1"
[[projects]]
digest = "1:994df93785d966f82180e17a0857fa53f7155cddca3898ad00b27e8d4481e4ae"
digest = "1:ac83cf90d08b63ad5f7e020ef480d319ae890c208f8524622a2f3136e2686b02"
name = "github.com/stretchr/objx"
packages = ["."]
pruneopts = "UT"
revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c"
version = "v0.1.1"
[[projects]]
digest = "1:288e2ba4192b77ec619875ab54d82e2179ca8978e8baa690dcb4343a4a1f4da7"
name = "github.com/stretchr/testify"
packages = [
"assert",
"mock",
"require",
"suite",
]
pruneopts = "UT"
revision = "b91bfb9ebec76498946beb6af7c0230c7cc7ba6c"
version = "v1.2.0"
revision = "ffdc059bfe9ce6a4e144ba849dbedead332c6053"
version = "v1.3.0"
[[projects]]
digest = "1:a5702d6fd0891671faf050c05451d3ee4cfd70cb958e11556fefaca628ce832e"
name = "github.com/theupdateframework/notary"
packages = [
".",
"client",
"client/changelist",
"cryptoservice",
"storage",
"trustmanager",
"trustmanager/yubikey",
"trustpinning",
"tuf",
"tuf/data",
"tuf/signed",
"tuf/utils",
"tuf/validation",
]
pruneopts = "UT"
revision = "d6e1431feb32348e0650bf7551ac5cffd01d857b"
version = "v0.6.1"
[[projects]]
digest = "1:ab3259b9f5008a18ff8c1cc34623eccce354f3a9faf5b409983cd6717d64b40b"
@ -722,18 +752,14 @@
"github.com/dghubble/sling",
"github.com/dgrijalva/jwt-go",
"github.com/docker/distribution",
"github.com/docker/distribution/digest",
"github.com/docker/distribution/health",
"github.com/docker/distribution/manifest/manifestlist",
"github.com/docker/distribution/manifest/schema1",
"github.com/docker/distribution/manifest/schema2",
"github.com/docker/distribution/reference",
"github.com/docker/distribution/registry/auth/token",
"github.com/docker/distribution/registry/client/auth/challenge",
"github.com/docker/libtrust",
"github.com/docker/notary",
"github.com/docker/notary/client",
"github.com/docker/notary/trustpinning",
"github.com/docker/notary/tuf/data",
"github.com/garyburd/redigo/redis",
"github.com/ghodss/yaml",
"github.com/go-sql-driver/mysql",
@ -749,8 +775,13 @@
"github.com/pkg/errors",
"github.com/robfig/cron",
"github.com/stretchr/testify/assert",
"github.com/stretchr/testify/mock",
"github.com/stretchr/testify/require",
"github.com/stretchr/testify/suite",
"github.com/theupdateframework/notary",
"github.com/theupdateframework/notary/client",
"github.com/theupdateframework/notary/trustpinning",
"github.com/theupdateframework/notary/tuf/data",
"golang.org/x/crypto/pbkdf2",
"golang.org/x/oauth2",
"golang.org/x/oauth2/clientcredentials",

View File

@ -42,7 +42,7 @@ ignored = ["github.com/goharbor/harbor/tests*"]
[[constraint]]
name = "github.com/docker/distribution"
version = "=2.6.2"
version = "=2.7.1"
[[constraint]]
branch = "master"
@ -52,7 +52,7 @@ ignored = ["github.com/goharbor/harbor/tests*"]
name = "github.com/go-sql-driver/mysql"
version = "=1.3.0"
[[constraint]]
[[override]]
name = "github.com/mattn/go-sqlite3"
version = "=1.6.0"
@ -66,7 +66,7 @@ ignored = ["github.com/goharbor/harbor/tests*"]
[[constraint]]
name = "github.com/stretchr/testify"
version = "=1.2.0"
version = "=1.3.0"
[[constraint]]
name = "github.com/gorilla/handlers"
@ -126,8 +126,12 @@ ignored = ["github.com/goharbor/harbor/tests*"]
[[constraint]]
name = "github.com/bmatcuk/doublestar"
version = "1.1.1"
version = "=1.1.1"
[[constraint]]
name = "github.com/pkg/errors"
version = "=0.8.1"
[[constraint]]
name = "github.com/docker/notary"
version = "=0.6.1"

View File

@ -65,7 +65,7 @@ func (c *Controller) getIndexYaml(namespaces []string) (*helm_repo.IndexFile, er
// Retrieve index.yaml for repositories
workerPool := make(chan struct{}, initialItemCount)
// Add initial tokens to the pool
// Add initial tokens to the worker
for i := 0; i < initialItemCount; i++ {
workerPool <- struct{}{}
}
@ -103,7 +103,7 @@ LOOP:
go func(ns string) {
defer func() {
waitGroup.Done() // done
// Return the worker back to the pool
// Return the worker back to the worker
workerPool <- struct{}{}
}()

View File

@ -15,16 +15,17 @@
package metadata
import (
"github.com/goharbor/harbor/src/common"
"testing"
)
func TestCfgMetaData_InitFromArray(t *testing.T) {
testArray := []Item{
{Scope: SystemScope, Group: BasicGroup, EnvKey: "HARBOR_ADMIN_PASSWORD", DefaultValue: "", Name: "admin_initial_password", ItemType: &PasswordType{}, Editable: true},
{Scope: SystemScope, Group: BasicGroup, EnvKey: "ADMIRAL_URL", DefaultValue: "NA", Name: "admiral_url", ItemType: &StringType{}, Editable: false},
{Scope: UserScope, Group: BasicGroup, EnvKey: "AUTH_MODE", DefaultValue: "db_auth", Name: "auth_mode", ItemType: &StringType{}, Editable: false},
{Scope: SystemScope, Group: BasicGroup, EnvKey: "CFG_EXPIRATION", DefaultValue: "5", Name: "cfg_expiration", ItemType: &StringType{}, Editable: false},
{Scope: SystemScope, Group: BasicGroup, EnvKey: "CHART_REPOSITORY_URL", DefaultValue: "http://chartmuseum:9999", Name: "chart_repository_url", ItemType: &StringType{}, Editable: false},
{Scope: SystemScope, Group: BasicGroup, EnvKey: "HARBOR_ADMIN_PASSWORD", DefaultValue: "", Name: common.AdminInitialPassword, ItemType: &PasswordType{}, Editable: true},
{Scope: SystemScope, Group: BasicGroup, EnvKey: "ADMIRAL_URL", DefaultValue: "NA", Name: common.AdmiralEndpoint, ItemType: &StringType{}, Editable: false},
{Scope: UserScope, Group: BasicGroup, EnvKey: "AUTH_MODE", DefaultValue: "db_auth", Name: common.AUTHMode, ItemType: &StringType{}, Editable: false},
{Scope: SystemScope, Group: BasicGroup, EnvKey: "CFG_EXPIRATION", DefaultValue: "5", Name: common.CfgExpiration, ItemType: &StringType{}, Editable: false},
{Scope: SystemScope, Group: BasicGroup, EnvKey: "CHART_REPOSITORY_URL", DefaultValue: "http://chartmuseum:9999", Name: common.ChartRepoURL, ItemType: &StringType{}, Editable: false},
}
curInst := Instance()
curInst.initFromArray(testArray)
@ -32,11 +33,11 @@ func TestCfgMetaData_InitFromArray(t *testing.T) {
if len(metaDataInstance.metaMap) != 5 {
t.Errorf("Can not initial metadata, size %v", len(metaDataInstance.metaMap))
}
item, ok := curInst.GetByName("admin_initial_password")
item, ok := curInst.GetByName(common.AdminInitialPassword)
if ok == false {
t.Errorf("Can not get admin_initial_password metadata")
}
if item.Name != "admin_initial_password" {
if item.Name != common.AdminInitialPassword {
t.Errorf("Can not get admin_initial_password metadata")
}

View File

@ -59,77 +59,77 @@ var (
// 2. Get/Set config settings by CfgManager
// 3. CfgManager.Load()/CfgManager.Save() to load/save from configure storage.
ConfigList = []Item{
// TODO: All these Name should be reference to const, see #7040
{Name: "admin_initial_password", Scope: SystemScope, Group: BasicGroup, EnvKey: "HARBOR_ADMIN_PASSWORD", DefaultValue: "", ItemType: &PasswordType{}, Editable: true},
{Name: "admiral_url", Scope: SystemScope, Group: BasicGroup, EnvKey: "ADMIRAL_URL", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: "auth_mode", Scope: UserScope, Group: BasicGroup, EnvKey: "AUTH_MODE", DefaultValue: "db_auth", ItemType: &AuthModeType{}, Editable: false},
{Name: "cfg_expiration", Scope: SystemScope, Group: BasicGroup, EnvKey: "CFG_EXPIRATION", DefaultValue: "5", ItemType: &IntType{}, Editable: false},
{Name: "chart_repository_url", Scope: SystemScope, Group: BasicGroup, EnvKey: "CHART_REPOSITORY_URL", DefaultValue: "http://chartmuseum:9999", ItemType: &StringType{}, Editable: false},
{Name: "clair_db", Scope: SystemScope, Group: ClairGroup, EnvKey: "CLAIR_DB", DefaultValue: "postgres", ItemType: &StringType{}, Editable: false},
{Name: "clair_db_host", Scope: SystemScope, Group: ClairGroup, EnvKey: "CLAIR_DB_HOST", DefaultValue: "postgresql", ItemType: &StringType{}, Editable: false},
{Name: "clair_db_password", Scope: SystemScope, Group: ClairGroup, EnvKey: "CLAIR_DB_PASSWORD", DefaultValue: "root123", ItemType: &PasswordType{}, Editable: false},
{Name: "clair_db_port", Scope: SystemScope, Group: ClairGroup, EnvKey: "CLAIR_DB_PORT", DefaultValue: "5432", ItemType: &PortType{}, Editable: false},
{Name: "clair_db_sslmode", Scope: SystemScope, Group: ClairGroup, EnvKey: "CLAIR_DB_SSLMODE", DefaultValue: "disable", ItemType: &StringType{}, Editable: false},
{Name: "clair_db_username", Scope: SystemScope, Group: ClairGroup, EnvKey: "CLAIR_DB_USERNAME", DefaultValue: "postgres", ItemType: &StringType{}, Editable: false},
{Name: "clair_url", Scope: SystemScope, Group: ClairGroup, EnvKey: "CLAIR_URL", DefaultValue: "http://clair:6060", ItemType: &StringType{}, Editable: false},
{Name: common.AdminInitialPassword, Scope: SystemScope, Group: BasicGroup, EnvKey: "HARBOR_ADMIN_PASSWORD", DefaultValue: "", ItemType: &PasswordType{}, Editable: true},
{Name: common.AdmiralEndpoint, Scope: SystemScope, Group: BasicGroup, EnvKey: "ADMIRAL_URL", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: common.AUTHMode, Scope: UserScope, Group: BasicGroup, EnvKey: "AUTH_MODE", DefaultValue: "db_auth", ItemType: &AuthModeType{}, Editable: false},
{Name: common.CfgExpiration, Scope: SystemScope, Group: BasicGroup, EnvKey: "CFG_EXPIRATION", DefaultValue: "5", ItemType: &IntType{}, Editable: false},
{Name: common.ChartRepoURL, Scope: SystemScope, Group: BasicGroup, EnvKey: "CHART_REPOSITORY_URL", DefaultValue: "http://chartmuseum:9999", ItemType: &StringType{}, Editable: false},
{Name: "core_url", Scope: SystemScope, Group: BasicGroup, EnvKey: "CORE_URL", DefaultValue: "http://core:8080", ItemType: &StringType{}, Editable: false},
{Name: "database_type", Scope: SystemScope, Group: BasicGroup, EnvKey: "DATABASE_TYPE", DefaultValue: "postgresql", ItemType: &StringType{}, Editable: false},
{Name: common.ClairDB, Scope: SystemScope, Group: ClairGroup, EnvKey: "CLAIR_DB", DefaultValue: "postgres", ItemType: &StringType{}, Editable: false},
{Name: common.ClairDBHost, Scope: SystemScope, Group: ClairGroup, EnvKey: "CLAIR_DB_HOST", DefaultValue: "postgresql", ItemType: &StringType{}, Editable: false},
{Name: common.ClairDBPassword, Scope: SystemScope, Group: ClairGroup, EnvKey: "CLAIR_DB_PASSWORD", DefaultValue: "root123", ItemType: &PasswordType{}, Editable: false},
{Name: common.ClairDBPort, Scope: SystemScope, Group: ClairGroup, EnvKey: "CLAIR_DB_PORT", DefaultValue: "5432", ItemType: &PortType{}, Editable: false},
{Name: common.ClairDBSSLMode, Scope: SystemScope, Group: ClairGroup, EnvKey: "CLAIR_DB_SSLMODE", DefaultValue: "disable", ItemType: &StringType{}, Editable: false},
{Name: common.ClairDBUsername, Scope: SystemScope, Group: ClairGroup, EnvKey: "CLAIR_DB_USERNAME", DefaultValue: "postgres", ItemType: &StringType{}, Editable: false},
{Name: common.ClairURL, Scope: SystemScope, Group: ClairGroup, EnvKey: "CLAIR_URL", DefaultValue: "http://clair:6060", ItemType: &StringType{}, Editable: false},
{Name: "email_from", Scope: UserScope, Group: EmailGroup, EnvKey: "EMAIL_FROM", DefaultValue: "admin <sample_admin@mydomain.com>", ItemType: &StringType{}, Editable: false},
{Name: "email_host", Scope: UserScope, Group: EmailGroup, EnvKey: "EMAIL_HOST", DefaultValue: "smtp.mydomain.com", ItemType: &StringType{}, Editable: false},
{Name: "email_identity", Scope: UserScope, Group: EmailGroup, EnvKey: "EMAIL_IDENTITY", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: "email_insecure", Scope: UserScope, Group: EmailGroup, EnvKey: "EMAIL_INSECURE", DefaultValue: "false", ItemType: &BoolType{}, Editable: false},
{Name: "email_password", Scope: UserScope, Group: EmailGroup, EnvKey: "EMAIL_PWD", DefaultValue: "", ItemType: &PasswordType{}, Editable: false},
{Name: "email_port", Scope: UserScope, Group: EmailGroup, EnvKey: "EMAIL_PORT", DefaultValue: "25", ItemType: &PortType{}, Editable: false},
{Name: "email_ssl", Scope: UserScope, Group: EmailGroup, EnvKey: "EMAIL_SSL", DefaultValue: "false", ItemType: &BoolType{}, Editable: false},
{Name: "email_username", Scope: UserScope, Group: EmailGroup, EnvKey: "EMAIL_USR", DefaultValue: "sample_admin@mydomain.com", ItemType: &StringType{}, Editable: false},
{Name: common.CoreURL, Scope: SystemScope, Group: BasicGroup, EnvKey: "CORE_URL", DefaultValue: "http://core:8080", ItemType: &StringType{}, Editable: false},
{Name: common.DatabaseType, Scope: SystemScope, Group: BasicGroup, EnvKey: "DATABASE_TYPE", DefaultValue: "postgresql", ItemType: &StringType{}, Editable: false},
{Name: "ext_endpoint", Scope: SystemScope, Group: BasicGroup, EnvKey: "EXT_ENDPOINT", DefaultValue: "https://host01.com", ItemType: &StringType{}, Editable: false},
{Name: "jobservice_url", Scope: SystemScope, Group: BasicGroup, EnvKey: "JOBSERVICE_URL", DefaultValue: "http://jobservice:8080", ItemType: &StringType{}, Editable: false},
{Name: common.EmailFrom, Scope: UserScope, Group: EmailGroup, EnvKey: "EMAIL_FROM", DefaultValue: "admin <sample_admin@mydomain.com>", ItemType: &StringType{}, Editable: false},
{Name: common.EmailHost, Scope: UserScope, Group: EmailGroup, EnvKey: "EMAIL_HOST", DefaultValue: "smtp.mydomain.com", ItemType: &StringType{}, Editable: false},
{Name: common.EmailIdentity, Scope: UserScope, Group: EmailGroup, EnvKey: "EMAIL_IDENTITY", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: common.EmailInsecure, Scope: UserScope, Group: EmailGroup, EnvKey: "EMAIL_INSECURE", DefaultValue: "false", ItemType: &BoolType{}, Editable: false},
{Name: common.EmailPassword, Scope: UserScope, Group: EmailGroup, EnvKey: "EMAIL_PWD", DefaultValue: "", ItemType: &PasswordType{}, Editable: false},
{Name: common.EmailPort, Scope: UserScope, Group: EmailGroup, EnvKey: "EMAIL_PORT", DefaultValue: "25", ItemType: &PortType{}, Editable: false},
{Name: common.EmailSSL, Scope: UserScope, Group: EmailGroup, EnvKey: "EMAIL_SSL", DefaultValue: "false", ItemType: &BoolType{}, Editable: false},
{Name: common.EmailUsername, Scope: UserScope, Group: EmailGroup, EnvKey: "EMAIL_USR", DefaultValue: "sample_admin@mydomain.com", ItemType: &StringType{}, Editable: false},
{Name: "ldap_base_dn", Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_BASE_DN", DefaultValue: "", ItemType: &NonEmptyStringType{}, Editable: false},
{Name: "ldap_filter", Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_FILTER", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: "ldap_group_base_dn", Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_BASE_DN", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: "ldap_group_admin_dn", Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_ADMIN_DN", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: "ldap_group_attribute_name", Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_GID", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: "ldap_group_search_filter", Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_FILTER", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: "ldap_group_search_scope", Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_SCOPE", DefaultValue: "2", ItemType: &LdapScopeType{}, Editable: false},
{Name: "ldap_scope", Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_SCOPE", DefaultValue: "2", ItemType: &LdapScopeType{}, Editable: false},
{Name: "ldap_search_dn", Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_SEARCH_DN", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: "ldap_search_password", Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_SEARCH_PWD", DefaultValue: "", ItemType: &PasswordType{}, Editable: false},
{Name: "ldap_timeout", Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_TIMEOUT", DefaultValue: "5", ItemType: &IntType{}, Editable: false},
{Name: "ldap_uid", Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_UID", DefaultValue: "cn", ItemType: &NonEmptyStringType{}, Editable: false},
{Name: "ldap_url", Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_URL", DefaultValue: "", ItemType: &NonEmptyStringType{}, Editable: false},
{Name: "ldap_verify_cert", Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_VERIFY_CERT", DefaultValue: "true", ItemType: &BoolType{}, Editable: false},
{Name: common.ExtEndpoint, Scope: SystemScope, Group: BasicGroup, EnvKey: "EXT_ENDPOINT", DefaultValue: "https://host01.com", ItemType: &StringType{}, Editable: false},
{Name: common.JobServiceURL, Scope: SystemScope, Group: BasicGroup, EnvKey: "JOBSERVICE_URL", DefaultValue: "http://jobservice:8080", ItemType: &StringType{}, Editable: false},
{Name: common.LDAPBaseDN, Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_BASE_DN", DefaultValue: "", ItemType: &NonEmptyStringType{}, Editable: false},
{Name: common.LDAPFilter, Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_FILTER", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: common.LDAPGroupBaseDN, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_BASE_DN", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: common.LdapGroupAdminDn, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_ADMIN_DN", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: common.LDAPGroupAttributeName, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_GID", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: common.LDAPGroupSearchFilter, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_FILTER", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: common.LDAPGroupSearchScope, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_SCOPE", DefaultValue: "2", ItemType: &LdapScopeType{}, Editable: false},
{Name: common.LDAPScope, Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_SCOPE", DefaultValue: "2", ItemType: &LdapScopeType{}, Editable: false},
{Name: common.LDAPSearchDN, Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_SEARCH_DN", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: common.LDAPSearchPwd, Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_SEARCH_PWD", DefaultValue: "", ItemType: &PasswordType{}, Editable: false},
{Name: common.LDAPTimeout, Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_TIMEOUT", DefaultValue: "5", ItemType: &IntType{}, Editable: false},
{Name: common.LDAPUID, Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_UID", DefaultValue: "cn", ItemType: &NonEmptyStringType{}, Editable: false},
{Name: common.LDAPURL, Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_URL", DefaultValue: "", ItemType: &NonEmptyStringType{}, Editable: false},
{Name: common.LDAPVerifyCert, Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_VERIFY_CERT", DefaultValue: "true", ItemType: &BoolType{}, Editable: false},
{Name: common.LDAPGroupMembershipAttribute, Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_GROUP_MEMBERSHIP_ATTRIBUTE", DefaultValue: "memberof", ItemType: &StringType{}, Editable: true},
{Name: "max_job_workers", Scope: SystemScope, Group: BasicGroup, EnvKey: "MAX_JOB_WORKERS", DefaultValue: "10", ItemType: &IntType{}, Editable: false},
{Name: "notary_url", Scope: SystemScope, Group: BasicGroup, EnvKey: "NOTARY_URL", DefaultValue: "http://notary-server:4443", ItemType: &StringType{}, Editable: false},
{Name: "scan_all_policy", Scope: UserScope, Group: BasicGroup, EnvKey: "", DefaultValue: "", ItemType: &MapType{}, Editable: false},
{Name: common.MaxJobWorkers, Scope: SystemScope, Group: BasicGroup, EnvKey: "MAX_JOB_WORKERS", DefaultValue: "10", ItemType: &IntType{}, Editable: false},
{Name: common.NotaryURL, Scope: SystemScope, Group: BasicGroup, EnvKey: "NOTARY_URL", DefaultValue: "http://notary-server:4443", ItemType: &StringType{}, Editable: false},
{Name: common.ScanAllPolicy, Scope: UserScope, Group: BasicGroup, EnvKey: "", DefaultValue: "", ItemType: &MapType{}, Editable: false},
{Name: "postgresql_database", Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_DATABASE", DefaultValue: "registry", ItemType: &StringType{}, Editable: false},
{Name: "postgresql_host", Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_HOST", DefaultValue: "postgresql", ItemType: &StringType{}, Editable: false},
{Name: "postgresql_password", Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_PASSWORD", DefaultValue: "root123", ItemType: &PasswordType{}, Editable: false},
{Name: "postgresql_port", Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_PORT", DefaultValue: "5432", ItemType: &PortType{}, Editable: false},
{Name: "postgresql_sslmode", Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_SSLMODE", DefaultValue: "disable", ItemType: &StringType{}, Editable: false},
{Name: "postgresql_username", Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_USERNAME", DefaultValue: "postgres", ItemType: &StringType{}, Editable: false},
{Name: common.PostGreSQLDatabase, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_DATABASE", DefaultValue: "registry", ItemType: &StringType{}, Editable: false},
{Name: common.PostGreSQLHOST, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_HOST", DefaultValue: "postgresql", ItemType: &StringType{}, Editable: false},
{Name: common.PostGreSQLPassword, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_PASSWORD", DefaultValue: "root123", ItemType: &PasswordType{}, Editable: false},
{Name: common.PostGreSQLPort, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_PORT", DefaultValue: "5432", ItemType: &PortType{}, Editable: false},
{Name: common.PostGreSQLSSLMode, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_SSLMODE", DefaultValue: "disable", ItemType: &StringType{}, Editable: false},
{Name: common.PostGreSQLUsername, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_USERNAME", DefaultValue: "postgres", ItemType: &StringType{}, Editable: false},
{Name: "project_creation_restriction", Scope: UserScope, Group: BasicGroup, EnvKey: "PROJECT_CREATION_RESTRICTION", DefaultValue: common.ProCrtRestrEveryone, ItemType: &ProjectCreationRestrictionType{}, Editable: false},
{Name: "read_only", Scope: UserScope, Group: BasicGroup, EnvKey: "READ_ONLY", DefaultValue: "false", ItemType: &BoolType{}, Editable: false},
{Name: common.ProjectCreationRestriction, Scope: UserScope, Group: BasicGroup, EnvKey: "PROJECT_CREATION_RESTRICTION", DefaultValue: common.ProCrtRestrEveryone, ItemType: &ProjectCreationRestrictionType{}, Editable: false},
{Name: common.ReadOnly, Scope: UserScope, Group: BasicGroup, EnvKey: "READ_ONLY", DefaultValue: "false", ItemType: &BoolType{}, Editable: false},
{Name: "registry_storage_provider_name", Scope: SystemScope, Group: BasicGroup, EnvKey: "REGISTRY_STORAGE_PROVIDER_NAME", DefaultValue: "filesystem", ItemType: &StringType{}, Editable: false},
{Name: "registry_url", Scope: SystemScope, Group: BasicGroup, EnvKey: "REGISTRY_URL", DefaultValue: "http://registry:5000", ItemType: &StringType{}, Editable: false},
{Name: "registry_controller_url", Scope: SystemScope, Group: BasicGroup, EnvKey: "REGISTRY_CONTROLLER_URL", DefaultValue: "http://registryctl:8080", ItemType: &StringType{}, Editable: false},
{Name: "self_registration", Scope: UserScope, Group: BasicGroup, EnvKey: "SELF_REGISTRATION", DefaultValue: "true", ItemType: &BoolType{}, Editable: false},
{Name: "token_expiration", Scope: UserScope, Group: BasicGroup, EnvKey: "TOKEN_EXPIRATION", DefaultValue: "30", ItemType: &IntType{}, Editable: false},
{Name: "token_service_url", Scope: SystemScope, Group: BasicGroup, EnvKey: "TOKEN_SERVICE_URL", DefaultValue: "http://core:8080/service/token", ItemType: &StringType{}, Editable: false},
{Name: common.RegistryStorageProviderName, Scope: SystemScope, Group: BasicGroup, EnvKey: "REGISTRY_STORAGE_PROVIDER_NAME", DefaultValue: "filesystem", ItemType: &StringType{}, Editable: false},
{Name: common.RegistryURL, Scope: SystemScope, Group: BasicGroup, EnvKey: "REGISTRY_URL", DefaultValue: "http://registry:5000", ItemType: &StringType{}, Editable: false},
{Name: common.RegistryControllerURL, Scope: SystemScope, Group: BasicGroup, EnvKey: "REGISTRY_CONTROLLER_URL", DefaultValue: "http://registryctl:8080", ItemType: &StringType{}, Editable: false},
{Name: common.SelfRegistration, Scope: UserScope, Group: BasicGroup, EnvKey: "SELF_REGISTRATION", DefaultValue: "true", ItemType: &BoolType{}, Editable: false},
{Name: common.TokenExpiration, Scope: UserScope, Group: BasicGroup, EnvKey: "TOKEN_EXPIRATION", DefaultValue: "30", ItemType: &IntType{}, Editable: false},
{Name: common.TokenServiceURL, Scope: SystemScope, Group: BasicGroup, EnvKey: "TOKEN_SERVICE_URL", DefaultValue: "http://core:8080/service/token", ItemType: &StringType{}, Editable: false},
{Name: "uaa_client_id", Scope: UserScope, Group: UAAGroup, EnvKey: "UAA_CLIENTID", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: "uaa_client_secret", Scope: UserScope, Group: UAAGroup, EnvKey: "UAA_CLIENTSECRET", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: "uaa_endpoint", Scope: UserScope, Group: UAAGroup, EnvKey: "UAA_ENDPOINT", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: "uaa_verify_cert", Scope: UserScope, Group: UAAGroup, EnvKey: "UAA_VERIFY_CERT", DefaultValue: "false", ItemType: &BoolType{}, Editable: false},
{Name: common.UAAClientID, Scope: UserScope, Group: UAAGroup, EnvKey: "UAA_CLIENTID", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: common.UAAClientSecret, Scope: UserScope, Group: UAAGroup, EnvKey: "UAA_CLIENTSECRET", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: common.UAAEndpoint, Scope: UserScope, Group: UAAGroup, EnvKey: "UAA_ENDPOINT", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: common.UAAVerifyCert, Scope: UserScope, Group: UAAGroup, EnvKey: "UAA_VERIFY_CERT", DefaultValue: "false", ItemType: &BoolType{}, Editable: false},
{Name: common.HTTPAuthProxyEndpoint, Scope: UserScope, Group: HTTPAuthGroup, ItemType: &StringType{}},
{Name: common.HTTPAuthProxyTokenReviewEndpoint, Scope: UserScope, Group: HTTPAuthGroup, ItemType: &StringType{}},
@ -143,10 +143,10 @@ var (
{Name: common.OIDCScope, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}},
{Name: common.OIDCVerifyCert, Scope: UserScope, Group: OIDCGroup, DefaultValue: "true", ItemType: &BoolType{}},
{Name: "with_chartmuseum", Scope: SystemScope, Group: BasicGroup, EnvKey: "WITH_CHARTMUSEUM", DefaultValue: "false", ItemType: &BoolType{}, Editable: true},
{Name: "with_clair", Scope: SystemScope, Group: BasicGroup, EnvKey: "WITH_CLAIR", DefaultValue: "false", ItemType: &BoolType{}, Editable: true},
{Name: "with_notary", Scope: SystemScope, Group: BasicGroup, EnvKey: "WITH_NOTARY", DefaultValue: "false", ItemType: &BoolType{}, Editable: true},
{Name: common.WithChartMuseum, Scope: SystemScope, Group: BasicGroup, EnvKey: "WITH_CHARTMUSEUM", DefaultValue: "false", ItemType: &BoolType{}, Editable: true},
{Name: common.WithClair, Scope: SystemScope, Group: BasicGroup, EnvKey: "WITH_CLAIR", DefaultValue: "false", ItemType: &BoolType{}, Editable: true},
{Name: common.WithNotary, Scope: SystemScope, Group: BasicGroup, EnvKey: "WITH_NOTARY", DefaultValue: "false", ItemType: &BoolType{}, Editable: true},
// the unit of expiration is minute, 43200 minutes = 30 days
{Name: "robot_token_duration", Scope: UserScope, Group: BasicGroup, EnvKey: "ROBOT_TOKEN_DURATION", DefaultValue: "43200", ItemType: &IntType{}, Editable: true},
{Name: common.RobotTokenDuration, Scope: UserScope, Group: BasicGroup, EnvKey: "ROBOT_TOKEN_DURATION", DefaultValue: "43200", ItemType: &IntType{}, Editable: true},
}
)

View File

@ -113,6 +113,7 @@ const (
CfgDriverDB = "db"
NewHarborAdminName = "admin@harbor.local"
RegistryStorageProviderName = "registry_storage_provider_name"
RegistryControllerURL = "registry_controller_url"
UserMember = "u"
GroupMember = "g"
ReadOnly = "read_only"

View File

@ -3,6 +3,7 @@ package job
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
@ -10,6 +11,7 @@ import (
commonhttp "github.com/goharbor/harbor/src/common/http"
"github.com/goharbor/harbor/src/common/http/modifier/auth"
"github.com/goharbor/harbor/src/common/job/models"
"github.com/goharbor/harbor/src/jobservice/job"
)
// Client wraps interface to access jobservice.
@ -17,6 +19,7 @@ type Client interface {
SubmitJob(*models.JobData) (string, error)
GetJobLog(uuid string) ([]byte, error)
PostAction(uuid, action string) error
GetExecutions(uuid string) ([]job.Stats, error)
// TODO Redirect joblog when we see there's memory issue.
}
@ -103,6 +106,36 @@ func (d *DefaultClient) GetJobLog(uuid string) ([]byte, error) {
return data, nil
}
// GetExecutions ...
func (d *DefaultClient) GetExecutions(periodicJobID string) ([]job.Stats, error) {
url := fmt.Sprintf("%s/api/v1/jobs/%s/executions", d.endpoint, periodicJobID)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, err
}
resp, err := d.client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, &commonhttp.Error{
Code: resp.StatusCode,
Message: string(data),
}
}
var exes []job.Stats
err = json.Unmarshal(data, &exes)
if err != nil {
return nil, err
}
return exes, nil
}
// PostAction call jobservice's API to operate action for job specified by uuid
func (d *DefaultClient) PostAction(uuid, action string) error {
url := d.endpoint + "/api/v1/jobs/" + uuid

View File

@ -47,6 +47,14 @@ func TestGetJobLog(t *testing.T) {
assert.Contains(text, "The content in this file is for mocking the get log api.")
}
func TestGetExecutions(t *testing.T) {
assert := assert.New(t)
exes, err := testClient.GetExecutions(ID)
assert.Nil(err)
stat := exes[0]
assert.Equal(ID+"@123123", stat.Info.JobID)
}
func TestPostAction(t *testing.T) {
assert := assert.New(t)
err := testClient.PostAction(ID, "fff")

View File

@ -7,10 +7,6 @@ const (
ImageScanAllJob = "IMAGE_SCAN_ALL"
// ImageGC the name of image garbage collection job in job service
ImageGC = "IMAGE_GC"
// Replication : the name of the replication job in job service
Replication = "REPLICATION"
// ReplicationScheduler : the name of the replication scheduler job in job service
ReplicationScheduler = "IMAGE_REPLICATE"
// JobKindGeneric : Kind of generic job
JobKindGeneric = "Generic"

View File

@ -28,25 +28,28 @@ type JobMetadata struct {
// JobStats keeps the result of job launching.
type JobStats struct {
Stats *JobStatData `json:"job"`
Stats *StatsInfo `json:"job"`
}
// JobStatData keeps the stats of job
type JobStatData struct {
JobID string `json:"id"`
Status string `json:"status"`
JobName string `json:"name"`
JobKind string `json:"kind"`
IsUnique bool `json:"unique"`
RefLink string `json:"ref_link,omitempty"`
CronSpec string `json:"cron_spec,omitempty"`
EnqueueTime int64 `json:"enqueue_time"`
UpdateTime int64 `json:"update_time"`
RunAt int64 `json:"run_at,omitempty"`
CheckIn string `json:"check_in,omitempty"`
CheckInAt int64 `json:"check_in_at,omitempty"`
DieAt int64 `json:"die_at,omitempty"`
HookStatus string `json:"hook_status,omitempty"`
// StatsInfo keeps the stats of job
type StatsInfo struct {
JobID string `json:"id"`
Status string `json:"status"`
JobName string `json:"name"`
JobKind string `json:"kind"`
IsUnique bool `json:"unique"`
RefLink string `json:"ref_link,omitempty"`
CronSpec string `json:"cron_spec,omitempty"`
EnqueueTime int64 `json:"enqueue_time"`
UpdateTime int64 `json:"update_time"`
RunAt int64 `json:"run_at,omitempty"`
CheckIn string `json:"check_in,omitempty"`
CheckInAt int64 `json:"check_in_at,omitempty"`
DieAt int64 `json:"die_at,omitempty"`
WebHookURL string `json:"web_hook_url,omitempty"`
UpstreamJobID string `json:"upstream_job_id,omitempty"` // Ref the upstream job if existing
NumericPID int64 `json:"numeric_policy_id,omitempty"` // The numeric policy ID of the periodic job
Parameters Parameters `json:"parameters,omitempty"`
}
// JobPoolStats represents the healthy and status of all the running worker pools.
@ -54,7 +57,7 @@ type JobPoolStats struct {
Pools []*JobPoolStatsData `json:"worker_pools"`
}
// JobPoolStatsData represent the healthy and status of the worker pool.
// JobPoolStatsData represent the healthy and status of the worker worker.
type JobPoolStatsData struct {
WorkerPoolID string `json:"worker_pool_id"`
StartedAt int64 `json:"started_at"`
@ -71,9 +74,10 @@ type JobActionRequest struct {
// JobStatusChange is designed for reporting the status change via hook.
type JobStatusChange struct {
JobID string `json:"job_id"`
Status string `json:"status"`
CheckIn string `json:"check_in,omitempty"`
JobID string `json:"job_id"`
Status string `json:"status"`
CheckIn string `json:"check_in,omitempty"`
Metadata *StatsInfo `json:"metadata,omitempty"`
}
// Message is designed for sub/pub messages

View File

@ -12,6 +12,8 @@ import (
"time"
"github.com/goharbor/harbor/src/common/job/models"
"github.com/goharbor/harbor/src/jobservice/job"
job_models "github.com/goharbor/harbor/src/jobservice/job"
)
const (
@ -45,6 +47,29 @@ func NewJobServiceServer() *httptest.Server {
panic(err)
}
})
mux.HandleFunc(fmt.Sprintf("%s/%s/executions", jobsPrefix, jobUUID),
func(rw http.ResponseWriter, req *http.Request) {
if req.Method != http.MethodGet {
rw.WriteHeader(http.StatusMethodNotAllowed)
return
}
var stats []job.Stats
stat := job_models.Stats{
Info: &job_models.StatsInfo{
JobID: jobUUID + "@123123",
Status: "Pending",
RunAt: time.Now().Unix(),
IsUnique: false,
},
}
stats = append(stats, stat)
b, _ := json.Marshal(stats)
if _, err := rw.Write(b); err != nil {
panic(err)
}
rw.WriteHeader(http.StatusOK)
return
})
mux.HandleFunc(fmt.Sprintf("%s/%s", jobsPrefix, jobUUID),
func(rw http.ResponseWriter, req *http.Request) {
if req.Method != http.MethodPost {
@ -77,7 +102,7 @@ func NewJobServiceServer() *httptest.Server {
json.Unmarshal(data, &jobReq)
if jobReq.Job.Name == "replication" {
respData := models.JobStats{
Stats: &models.JobStatData{
Stats: &models.StatsInfo{
JobID: jobUUID,
Status: "Pending",
RunAt: time.Now().Unix(),

View File

@ -15,8 +15,6 @@
package secret
const (
// AdminserverUser is the name of adminserver user
AdminserverUser = "harbor-adminserver"
// JobserviceUser is the name of jobservice user
JobserviceUser = "harbor-jobservice"
// CoreUser is the name of ui user

View File

@ -13,7 +13,7 @@ import (
goldap "gopkg.in/ldap.v2"
)
var adminServerLdapTestConfig = map[string]interface{}{
var ldapTestConfig = map[string]interface{}{
common.ExtEndpoint: "host01.com",
common.AUTHMode: "ldap_auth",
common.DatabaseType: "postgresql",
@ -35,7 +35,7 @@ var adminServerLdapTestConfig = map[string]interface{}{
common.AdminInitialPassword: "password",
}
var adminServerDefaultConfigWithVerifyCert = map[string]interface{}{
var defaultConfigWithVerifyCert = map[string]interface{}{
common.ExtEndpoint: "https://host01.com",
common.AUTHMode: common.LDAPAuth,
common.DatabaseType: "postgresql",
@ -91,7 +91,7 @@ func TestMain(m *testing.M) {
log.Fatalf("failed to initialize configurations: %v", err)
}
uiConfig.Upload(adminServerLdapTestConfig)
uiConfig.Upload(ldapTestConfig)
os.Exit(m.Run())
@ -270,11 +270,11 @@ func TestSession_SearchGroup(t *testing.T) {
}
ldapConfig := models.LdapConf{
LdapURL: adminServerLdapTestConfig[common.LDAPURL].(string) + ":389",
LdapSearchDn: adminServerLdapTestConfig[common.LDAPSearchDN].(string),
LdapURL: ldapTestConfig[common.LDAPURL].(string) + ":389",
LdapSearchDn: ldapTestConfig[common.LDAPSearchDN].(string),
LdapScope: 2,
LdapSearchPassword: adminServerLdapTestConfig[common.LDAPSearchPwd].(string),
LdapBaseDn: adminServerLdapTestConfig[common.LDAPBaseDN].(string),
LdapSearchPassword: ldapTestConfig[common.LDAPSearchPwd].(string),
LdapBaseDn: ldapTestConfig[common.LDAPBaseDN].(string),
}
tests := []struct {
@ -311,11 +311,11 @@ func TestSession_SearchGroup(t *testing.T) {
func TestSession_SearchGroupByDN(t *testing.T) {
ldapConfig := models.LdapConf{
LdapURL: adminServerLdapTestConfig[common.LDAPURL].(string) + ":389",
LdapSearchDn: adminServerLdapTestConfig[common.LDAPSearchDN].(string),
LdapURL: ldapTestConfig[common.LDAPURL].(string) + ":389",
LdapSearchDn: ldapTestConfig[common.LDAPSearchDN].(string),
LdapScope: 2,
LdapSearchPassword: adminServerLdapTestConfig[common.LDAPSearchPwd].(string),
LdapBaseDn: adminServerLdapTestConfig[common.LDAPBaseDN].(string),
LdapSearchPassword: ldapTestConfig[common.LDAPSearchPwd].(string),
LdapBaseDn: ldapTestConfig[common.LDAPBaseDN].(string),
}
ldapGroupConfig := models.LdapGroupConf{
LdapGroupBaseDN: "ou=group,dc=example,dc=com",

View File

@ -23,16 +23,16 @@ import (
"strings"
"github.com/docker/distribution/registry/auth/token"
"github.com/docker/notary"
"github.com/docker/notary/client"
"github.com/docker/notary/trustpinning"
"github.com/docker/notary/tuf/data"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/common/utils/registry"
"github.com/goharbor/harbor/src/core/config"
tokenutil "github.com/goharbor/harbor/src/core/service/token"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/client"
"github.com/theupdateframework/notary/trustpinning"
"github.com/theupdateframework/notary/tuf/data"
"github.com/opencontainers/go-digest"
digest "github.com/opencontainers/go-digest"
)
var (
@ -91,7 +91,7 @@ func GetTargets(notaryEndpoint string, username string, fqRepo string) ([]Target
}
tr := registry.NewTransport(registry.GetHTTPTransport(), authorizer)
gun := data.GUN(fqRepo)
notaryRepo, err := client.NewFileCachedNotaryRepository(notaryCachePath, gun, notaryEndpoint, tr, mockRetriever, trustPin)
notaryRepo, err := client.NewFileCachedRepository(notaryCachePath, gun, notaryEndpoint, tr, mockRetriever, trustPin)
if err != nil {
return res, err
}

View File

@ -148,8 +148,8 @@ func AuthCodeURL(state string) (string, error) {
log.Errorf("Failed to get OAuth configuration, error: %v", err)
return "", err
}
if strings.HasPrefix(conf.Endpoint.AuthURL, googleEndpoint) {
return conf.AuthCodeURL(state, oauth2.AccessTypeOffline), nil
if strings.HasPrefix(conf.Endpoint.AuthURL, googleEndpoint) { // make sure the refresh token will be returned
return conf.AuthCodeURL(state, oauth2.AccessTypeOffline, oauth2.SetAuthURLParam("prompt", "consent")), nil
}
return conf.AuthCodeURL(state), nil
}

View File

@ -102,6 +102,7 @@ func (dm *defaultManager) VerifyToken(ctx context.Context, user *models.OIDCUser
if err != nil {
return verifyError(err)
}
log.Debugf("Token string for verify: %s", tokenStr)
_, err = VerifyToken(ctx, token.IDToken)
if err == nil {
return nil

View File

@ -38,12 +38,7 @@ func NewBasicAuthCredential(username, password string) Credential {
}
func (b *basicAuthCredential) AddAuthorization(req *http.Request) {
// only add the authentication info when the username isn't empty
// the logic is needed for requesting resources from docker hub's
// public repositories
if len(b.username) > 0 {
req.SetBasicAuth(b.username, b.password)
}
req.SetBasicAuth(b.username, b.password)
}
// implement github.com/goharbor/harbor/src/common/http/modifier.Modifier

View File

@ -17,7 +17,6 @@ package auth
import (
"regexp"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/reference"
"github.com/goharbor/harbor/src/common/utils/log"
)
@ -26,8 +25,8 @@ var (
base = regexp.MustCompile("/v2")
catalog = regexp.MustCompile("/v2/_catalog")
tag = regexp.MustCompile("/v2/(" + reference.NameRegexp.String() + ")/tags/list")
manifest = regexp.MustCompile("/v2/(" + reference.NameRegexp.String() + ")/manifests/(" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + ")")
blob = regexp.MustCompile("/v2/(" + reference.NameRegexp.String() + ")/blobs/" + digest.DigestRegexp.String())
manifest = regexp.MustCompile("/v2/(" + reference.NameRegexp.String() + ")/manifests/(" + reference.TagRegexp.String() + "|" + reference.DigestRegexp.String() + ")")
blob = regexp.MustCompile("/v2/(" + reference.NameRegexp.String() + ")/blobs/" + reference.DigestRegexp.String())
blobUpload = regexp.MustCompile("/v2/(" + reference.NameRegexp.String() + ")/blobs/uploads")
blobUploadChunk = regexp.MustCompile("/v2/(" + reference.NameRegexp.String() + ")/blobs/uploads/[a-zA-Z0-9-_.=]+")

View File

@ -31,8 +31,8 @@ func TestParseRepository(t *testing.T) {
{"/v2/tags/list", ""},
{"/v2/tags/list/tags/list", "tags/list"},
{"/v2/library/manifests/latest", "library"},
{"/v2/library/manifests/sha256:1234567890", "library"},
{"/v2/library/blobs/sha256:1234567890", "library"},
{"/v2/library/manifests/sha256:eec76eedea59f7bf39a2713bfd995c82cfaa97724ee5b7f5aba253e07423d0ae", "library"},
{"/v2/library/blobs/sha256:eec76eedea59f7bf39a2713bfd995c82cfaa97724ee5b7f5aba253e07423d0ae", "library"},
{"/v2/library/blobs/uploads", "library"},
{"/v2/library/blobs/uploads/1234567890", "library"},
}

View File

@ -193,7 +193,7 @@ func parseScopes(req *http.Request) ([]*token.ResourceActions, error) {
// base
scope = nil
} else {
// unknow
// unknown
return scopes, fmt.Errorf("can not parse scope from the request: %s %s", req.Method, req.URL.Path)
}
@ -205,7 +205,7 @@ func parseScopes(req *http.Request) ([]*token.ResourceActions, error) {
for _, s := range scopes {
strs = append(strs, scopeString(s))
}
log.Debugf("scopses parsed from request: %s", strings.Join(strs, " "))
log.Debugf("scopes parsed from request: %s", strings.Join(strs, " "))
return scopes, nil
}

View File

@ -192,7 +192,7 @@ func (dc *defaultClient) UpdateConfig(cfg *ClientConfig) error {
pool := x509.NewCertPool()
// Do not throw error if the certificate is malformed, so we can put a place holder.
if ok := pool.AppendCertsFromPEM(content); !ok {
log.Warningf("Failed to append certificate to cert pool, cert path: %s", cfg.CARootPath)
log.Warningf("Failed to append certificate to cert worker, cert path: %s", cfg.CARootPath)
} else {
tc.RootCAs = pool
}

View File

@ -169,8 +169,22 @@ func (aj *AJAPI) getLog(id int64) {
aj.SendNotFoundError(errors.New("Failed to get Job"))
return
}
jobID := job.UUID
// to get the latest execution job id, then to query job log.
if job.Kind == common_job.JobKindPeriodic {
exes, err := utils_core.GetJobServiceClient().GetExecutions(job.UUID)
if err != nil {
aj.SendInternalServerError(err)
return
}
if len(exes) == 0 {
aj.SendNotFoundError(errors.New("no execution log "))
return
}
jobID = exes[0].Info.JobID
}
logBytes, err := utils_core.GetJobServiceClient().GetJobLog(job.UUID)
logBytes, err := utils_core.GetJobServiceClient().GetJobLog(jobID)
if err != nil {
if httpErr, ok := err.(*common_http.Error); ok {
aj.RenderError(httpErr.Code, "")

View File

@ -121,7 +121,7 @@ func (c *ConfigAPI) Put() {
}
func (c *ConfigAPI) validateCfg(cfgs map[string]interface{}) (bool, error) {
mode := c.cfgManager.Get("auth_mode").GetString()
mode := c.cfgManager.Get(common.AUTHMode).GetString()
if value, ok := cfgs[common.AUTHMode]; ok {
flag, err := authModeCanBeModified()
if err != nil {

View File

@ -26,7 +26,7 @@ import (
"strings"
)
var adminServerTestConfig = map[string]interface{}{
var testConfig = map[string]interface{}{
common.DefaultCoreEndpoint: "test",
}
@ -34,7 +34,7 @@ func TestMain(m *testing.M) {
test.InitDatabaseFromEnv()
config.Init()
config.Upload(adminServerTestConfig)
config.Upload(testConfig)
os.Exit(m.Run())
}

View File

@ -87,6 +87,9 @@ func (gc *GCAPI) Put() {
return
}
ajr.Name = common_job.ImageGC
ajr.Parameters = map[string]interface{}{
"redis_url_reg": os.Getenv("_REDIS_URL_REG"),
}
gc.updateSchedule(ajr)
}

View File

@ -20,6 +20,7 @@ import (
"net/http"
"strconv"
common_http "github.com/goharbor/harbor/src/common/http"
"github.com/goharbor/harbor/src/replication"
"github.com/goharbor/harbor/src/replication/dao/models"
"github.com/goharbor/harbor/src/replication/event"
@ -264,6 +265,12 @@ func (r *ReplicationOperationAPI) GetTaskLog() {
logBytes, err := replication.OperationCtl.GetTaskLog(taskID)
if err != nil {
if httpErr, ok := err.(*common_http.Error); ok {
if ok && httpErr.Code == http.StatusNotFound {
r.SendNotFoundError(fmt.Errorf("the log of task %d not found", taskID))
return
}
}
r.SendInternalServerError(fmt.Errorf("failed to get log of task %d: %v", taskID, err))
return
}

View File

@ -102,6 +102,12 @@ func (ua *UserAPI) Prepare() {
return
}
if user == nil {
log.Errorf("User with username %s does not exist in DB.", ua.SecurityCtx.GetUsername())
ua.SendInternalServerError(fmt.Errorf("user %s does not exist in DB", ua.SecurityCtx.GetUsername()))
return
}
ua.currentUserID = user.UserID
id := ua.Ctx.Input.Param(":id")
if id == "current" {

View File

@ -30,9 +30,16 @@ import (
"time"
)
const refreshDuration = 5 * time.Second
const refreshDuration = 2 * time.Second
const userEntryComment = "By Authproxy"
var secureTransport = &http.Transport{}
var insecureTransport = &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}
// Auth implements HTTP authenticator the required attributes.
// The attribute Endpoint is the HTTP endpoint to which the POST request should be issued for authentication
type Auth struct {
@ -125,6 +132,9 @@ func (a *Auth) fillInModel(u *models.User) error {
func (a *Auth) ensure() error {
a.Lock()
defer a.Unlock()
if a.client == nil {
a.client = &http.Client{}
}
if time.Now().Sub(a.settingTimeStamp) >= refreshDuration {
setting, err := config.HTTPAuthProxySetting()
if err != nil {
@ -134,16 +144,12 @@ func (a *Auth) ensure() error {
a.SkipCertVerify = !setting.VerifyCert
a.AlwaysOnboard = setting.AlwaysOnBoard
}
if a.client == nil {
tr := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: a.SkipCertVerify,
},
}
a.client = &http.Client{
Transport: tr,
}
if a.SkipCertVerify {
a.client.Transport = insecureTransport
} else {
a.client.Transport = secureTransport
}
return nil
}

View File

@ -15,11 +15,13 @@
package authproxy
import (
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
cut "github.com/goharbor/harbor/src/common/utils/test"
"github.com/goharbor/harbor/src/core/auth"
"github.com/goharbor/harbor/src/core/auth/authproxy/test"
"github.com/goharbor/harbor/src/core/config"
"github.com/stretchr/testify/assert"
"net/http/httptest"
"os"
@ -45,6 +47,13 @@ func TestMain(m *testing.M) {
// So it won't require mocking the cfgManager
settingTimeStamp: time.Now(),
}
conf := map[string]interface{}{
common.HTTPAuthProxyEndpoint: "dummy",
common.HTTPAuthProxyTokenReviewEndpoint: "dummy",
common.HTTPAuthProxyVerifyCert: "false",
}
config.InitWithSettings(conf)
rc := m.Run()
if err := dao.ClearHTTPAuthProxyUsers(); err != nil {
panic(err)

View File

@ -29,7 +29,7 @@ import (
coreConfig "github.com/goharbor/harbor/src/core/config"
)
var adminServerTestConfig = map[string]interface{}{
var testConfig = map[string]interface{}{
common.ExtEndpoint: "host01.com",
common.AUTHMode: "db_auth",
common.DatabaseType: "postgresql",
@ -68,7 +68,7 @@ func TestMain(m *testing.M) {
log.Fatalf("failed to initialize configurations: %v", err)
}
config.Upload(adminServerTestConfig)
config.Upload(testConfig)
retCode := m.Run()
os.Exit(retCode)
}

View File

@ -116,7 +116,7 @@ func initProjectManager() error {
}
pool := x509.NewCertPool()
if ok := pool.AppendCertsFromPEM(content); !ok {
return fmt.Errorf("failed to append cert content into cert pool")
return fmt.Errorf("failed to append cert content into cert worker")
}
AdmiralClient = &http.Client{
Transport: &http.Transport{

View File

@ -33,6 +33,7 @@ import (
const tokenKey = "oidc_token"
const stateKey = "oidc_state"
const userInfoKey = "oidc_user_info"
const oidcUserComment = "Onboarded via OIDC provider"
// OIDCController handles requests for OIDC login, callback and user onboard
type OIDCController struct {
@ -67,6 +68,7 @@ func (oc *OIDCController) RedirectLogin() {
return
}
oc.SetSession(stateKey, state)
log.Debugf("State dumped to session: %s", state)
// Force to use the func 'Redirect' of beego.Controller
oc.Controller.Redirect(url, http.StatusFound)
}
@ -75,6 +77,8 @@ func (oc *OIDCController) RedirectLogin() {
// kick off onboard if needed.
func (oc *OIDCController) Callback() {
if oc.Ctx.Request.URL.Query().Get("state") != oc.GetSession(stateKey) {
log.Errorf("State mismatch, in session: %s, in url: %s", oc.GetSession(stateKey),
oc.Ctx.Request.URL.Query().Get("state"))
oc.SendBadRequestError(errors.New("State mismatch"))
return
}
@ -106,21 +110,34 @@ func (oc *OIDCController) Callback() {
oc.SendInternalServerError(err)
return
}
tokenBytes, err := json.Marshal(token)
if err != nil {
oc.SendInternalServerError(err)
return
}
log.Debugf("Exchanged token string: %s", string(tokenBytes))
oc.SetSession(tokenKey, tokenBytes)
if u == nil {
oc.SetSession(userInfoKey, string(ouDataStr))
oc.Controller.Redirect("/oidc-onboard", http.StatusFound)
oc.Controller.Redirect(fmt.Sprintf("/oidc-onboard?username=%s", strings.Replace(d.Username, " ", "_", -1)),
http.StatusFound)
} else {
oidcUser, err := dao.GetOIDCUserByUserID(u.UserID)
if err != nil {
oc.SendInternalServerError(err)
return
}
_, t, err := secretAndToken(tokenBytes)
oidcUser.Token = t
if err := dao.UpdateOIDCUser(oidcUser); err != nil {
oc.SendInternalServerError(err)
return
}
oc.SetSession(userKey, *u)
oc.Controller.Redirect("/", http.StatusFound)
}
}
// Onboard handles the request to onboard an user authenticated via OIDC provider
@ -170,18 +187,19 @@ func (oc *OIDCController) Onboard() {
email := d.Email
if email == "" {
email = utils.GenerateRandomString() + "@harbor.com"
email = utils.GenerateRandomString() + "@placeholder.com"
}
user := models.User{
Username: username,
Email: email,
OIDCUserMeta: &oidcUser,
Comment: oidcUserComment,
}
err = dao.OnBoardOIDCUser(&user)
if err != nil {
if strings.Contains(err.Error(), dao.ErrDupUser.Error()) {
oc.RenderError(http.StatusConflict, "Duplicate username")
oc.RenderError(http.StatusConflict, "Conflict in username, the user with same username has been onboarded.")
return
}
oc.SendInternalServerError(err)

View File

@ -38,9 +38,10 @@ var statusMap = map[string]string{
// Handler handles reqeust on /service/notifications/jobs/adminjob/*, which listens to the webhook of jobservice.
type Handler struct {
api.BaseController
id int64
UUID string
status string
id int64
UUID string
status string
UpstreamJobID string
}
// Prepare ...
@ -60,7 +61,13 @@ func (h *Handler) Prepare() {
return
}
h.id = id
h.UUID = data.JobID
// UpstreamJobID is the periodic job id
if data.Metadata.UpstreamJobID != "" {
h.UUID = data.Metadata.UpstreamJobID
} else {
h.UUID = data.JobID
}
status, ok := statusMap[data.Status]
if !ok {
log.Infof("drop the job status update event: job id-%d, status-%s", h.id, status)

View File

@ -20,8 +20,8 @@ import (
"net/http"
"strings"
"github.com/goharbor/harbor/src/jobservice/common/utils"
"github.com/goharbor/harbor/src/jobservice/config"
"github.com/goharbor/harbor/src/jobservice/utils"
)
const (

View File

@ -16,7 +16,6 @@ package api
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
@ -24,13 +23,19 @@ import (
"github.com/gorilla/mux"
"fmt"
"github.com/goharbor/harbor/src/jobservice/common/query"
"github.com/goharbor/harbor/src/jobservice/common/utils"
"github.com/goharbor/harbor/src/jobservice/core"
"github.com/goharbor/harbor/src/jobservice/errs"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/jobservice/logger"
"github.com/goharbor/harbor/src/jobservice/models"
"github.com/goharbor/harbor/src/jobservice/opm"
"github.com/pkg/errors"
"strconv"
)
const totalHeaderKey = "Total-Count"
// Handler defines approaches to handle the http requests.
type Handler interface {
// HandleLaunchJobReq is used to handle the job submission request.
@ -47,6 +52,12 @@ type Handler interface {
// HandleJobLogReq is used to handle the request of getting job logs
HandleJobLogReq(w http.ResponseWriter, req *http.Request)
// HandleJobLogReq is used to handle the request of getting periodic executions
HandlePeriodicExecutions(w http.ResponseWriter, req *http.Request)
// HandleScheduledJobs is used to handle the request of getting pending scheduled jobs
HandleScheduledJobs(w http.ResponseWriter, req *http.Request)
}
// DefaultHandler is the default request handler which implements the Handler interface.
@ -63,10 +74,6 @@ func NewDefaultHandler(ctl core.Interface) *DefaultHandler {
// HandleLaunchJobReq is implementation of method defined in interface 'Handler'
func (dh *DefaultHandler) HandleLaunchJobReq(w http.ResponseWriter, req *http.Request) {
if !dh.preCheck(w, req) {
return
}
data, err := ioutil.ReadAll(req.Body)
if err != nil {
dh.handleError(w, req, http.StatusInternalServerError, errs.ReadRequestBodyError(err))
@ -74,8 +81,8 @@ func (dh *DefaultHandler) HandleLaunchJobReq(w http.ResponseWriter, req *http.Re
}
// unmarshal data
jobReq := models.JobRequest{}
if err = json.Unmarshal(data, &jobReq); err != nil {
jobReq := &job.Request{}
if err = json.Unmarshal(data, jobReq); err != nil {
dh.handleError(w, req, http.StatusInternalServerError, errs.HandleJSONDataError(err))
return
}
@ -83,13 +90,19 @@ func (dh *DefaultHandler) HandleLaunchJobReq(w http.ResponseWriter, req *http.Re
// Pass request to the controller for the follow-up.
jobStats, err := dh.controller.LaunchJob(jobReq)
if err != nil {
if errs.IsConflictError(err) {
code := http.StatusInternalServerError
if errs.IsBadRequestError(err) {
// Bad request
code = http.StatusBadRequest
} else if errs.IsConflictError(err) {
// Conflict error
dh.handleError(w, req, http.StatusConflict, err)
code = http.StatusConflict
} else {
// General error
dh.handleError(w, req, http.StatusInternalServerError, errs.LaunchJobError(err))
err = errs.LaunchJobError(err)
}
dh.handleError(w, req, code, err)
return
}
@ -98,22 +111,20 @@ func (dh *DefaultHandler) HandleLaunchJobReq(w http.ResponseWriter, req *http.Re
// HandleGetJobReq is implementation of method defined in interface 'Handler'
func (dh *DefaultHandler) HandleGetJobReq(w http.ResponseWriter, req *http.Request) {
if !dh.preCheck(w, req) {
return
}
vars := mux.Vars(req)
jobID := vars["job_id"]
jobStats, err := dh.controller.GetJob(jobID)
if err != nil {
code := http.StatusInternalServerError
backErr := errs.GetJobStatsError(err)
if errs.IsObjectNotFoundError(err) {
code = http.StatusNotFound
backErr = err
} else if errs.IsBadRequestError(err) {
code = http.StatusBadRequest
} else {
err = errs.GetJobStatsError(err)
}
dh.handleError(w, req, code, backErr)
dh.handleError(w, req, code, err)
return
}
@ -122,10 +133,6 @@ func (dh *DefaultHandler) HandleGetJobReq(w http.ResponseWriter, req *http.Reque
// HandleJobActionReq is implementation of method defined in interface 'Handler'
func (dh *DefaultHandler) HandleJobActionReq(w http.ResponseWriter, req *http.Request) {
if !dh.preCheck(w, req) {
return
}
vars := mux.Vars(req)
jobID := vars["job_id"]
@ -136,48 +143,30 @@ func (dh *DefaultHandler) HandleJobActionReq(w http.ResponseWriter, req *http.Re
}
// unmarshal data
jobActionReq := models.JobActionRequest{}
if err = json.Unmarshal(data, &jobActionReq); err != nil {
jobActionReq := &job.ActionRequest{}
if err = json.Unmarshal(data, jobActionReq); err != nil {
dh.handleError(w, req, http.StatusInternalServerError, errs.HandleJSONDataError(err))
return
}
switch jobActionReq.Action {
case opm.CtlCommandStop:
if err := dh.controller.StopJob(jobID); err != nil {
code := http.StatusInternalServerError
backErr := errs.StopJobError(err)
if errs.IsObjectNotFoundError(err) {
code = http.StatusNotFound
backErr = err
}
dh.handleError(w, req, code, backErr)
return
// Only support stop command now
cmd := job.OPCommand(jobActionReq.Action)
if !cmd.IsStop() {
dh.handleError(w, req, http.StatusNotImplemented, errs.UnknownActionNameError(errors.Errorf("command: %s", jobActionReq.Action)))
return
}
// Stop job
if err := dh.controller.StopJob(jobID); err != nil {
code := http.StatusInternalServerError
if errs.IsObjectNotFoundError(err) {
code = http.StatusNotFound
} else if errs.IsBadRequestError(err) {
code = http.StatusBadRequest
} else {
err = errs.StopJobError(err)
}
case opm.CtlCommandCancel:
if err := dh.controller.CancelJob(jobID); err != nil {
code := http.StatusInternalServerError
backErr := errs.CancelJobError(err)
if errs.IsObjectNotFoundError(err) {
code = http.StatusNotFound
backErr = err
}
dh.handleError(w, req, code, backErr)
return
}
case opm.CtlCommandRetry:
if err := dh.controller.RetryJob(jobID); err != nil {
code := http.StatusInternalServerError
backErr := errs.RetryJobError(err)
if errs.IsObjectNotFoundError(err) {
code = http.StatusNotFound
backErr = err
}
dh.handleError(w, req, code, backErr)
return
}
default:
dh.handleError(w, req, http.StatusNotImplemented, errs.UnknownActionNameError(fmt.Errorf("%s", jobID)))
dh.handleError(w, req, code, err)
return
}
@ -188,10 +177,6 @@ func (dh *DefaultHandler) HandleJobActionReq(w http.ResponseWriter, req *http.Re
// HandleCheckStatusReq is implementation of method defined in interface 'Handler'
func (dh *DefaultHandler) HandleCheckStatusReq(w http.ResponseWriter, req *http.Request) {
if !dh.preCheck(w, req) {
return
}
stats, err := dh.controller.CheckStatus()
if err != nil {
dh.handleError(w, req, http.StatusInternalServerError, errs.CheckStatsError(err))
@ -203,34 +188,74 @@ func (dh *DefaultHandler) HandleCheckStatusReq(w http.ResponseWriter, req *http.
// HandleJobLogReq is implementation of method defined in interface 'Handler'
func (dh *DefaultHandler) HandleJobLogReq(w http.ResponseWriter, req *http.Request) {
if !dh.preCheck(w, req) {
return
}
vars := mux.Vars(req)
jobID := vars["job_id"]
if strings.Contains(jobID, "..") || strings.ContainsRune(jobID, os.PathSeparator) {
dh.handleError(w, req, http.StatusBadRequest, fmt.Errorf("Invalid Job ID: %s", jobID))
dh.handleError(w, req, http.StatusBadRequest, errors.Errorf("invalid Job ID: %s", jobID))
return
}
logData, err := dh.controller.GetJobLogData(jobID)
if err != nil {
code := http.StatusInternalServerError
backErr := errs.GetJobLogError(err)
if errs.IsObjectNotFoundError(err) {
code = http.StatusNotFound
backErr = err
} else if errs.IsBadRequestError(err) {
code = http.StatusBadRequest
} else {
err = errs.GetJobLogError(err)
}
dh.handleError(w, req, code, backErr)
dh.handleError(w, req, code, err)
return
}
dh.log(req, http.StatusOK, "")
w.WriteHeader(http.StatusOK)
w.Write(logData)
writeDate(w, logData)
}
// HandlePeriodicExecutions is implementation of method defined in interface 'Handler'
func (dh *DefaultHandler) HandlePeriodicExecutions(w http.ResponseWriter, req *http.Request) {
// Get param
vars := mux.Vars(req)
jobID := vars["job_id"]
// Get query params
q := extractQuery(req)
executions, total, err := dh.controller.GetPeriodicExecutions(jobID, q)
if err != nil {
code := http.StatusInternalServerError
if errs.IsObjectNotFoundError(err) {
code = http.StatusNotFound
} else if errs.IsBadRequestError(err) {
code = http.StatusBadRequest
} else {
err = errs.GetPeriodicExecutionError(err)
}
dh.handleError(w, req, code, err)
return
}
w.Header().Add(totalHeaderKey, fmt.Sprintf("%d", total))
dh.handleJSONData(w, req, http.StatusOK, executions)
}
// HandleScheduledJobs is implementation of method defined in interface 'Handler'
func (dh *DefaultHandler) HandleScheduledJobs(w http.ResponseWriter, req *http.Request) {
// Get query parameters
q := extractQuery(req)
jobs, total, err := dh.controller.ScheduledJobs(q)
if err != nil {
dh.handleError(w, req, http.StatusInternalServerError, errs.GetScheduledJobsError(err))
return
}
w.Header().Add(totalHeaderKey, fmt.Sprintf("%d", total))
dh.handleJSONData(w, req, http.StatusOK, jobs)
}
func (dh *DefaultHandler) handleJSONData(w http.ResponseWriter, req *http.Request, code int, object interface{}) {
@ -245,7 +270,7 @@ func (dh *DefaultHandler) handleJSONData(w http.ResponseWriter, req *http.Reques
w.Header().Set(http.CanonicalHeaderKey("Accept"), "application/json")
w.Header().Set(http.CanonicalHeaderKey("content-type"), "application/json")
w.WriteHeader(code)
w.Write(data)
writeDate(w, data)
}
func (dh *DefaultHandler) handleError(w http.ResponseWriter, req *http.Request, code int, err error) {
@ -253,18 +278,54 @@ func (dh *DefaultHandler) handleError(w http.ResponseWriter, req *http.Request,
logger.Errorf("Serve http request '%s %s' error: %d %s", req.Method, req.URL.String(), code, err.Error())
w.WriteHeader(code)
w.Write([]byte(err.Error()))
}
func (dh *DefaultHandler) preCheck(w http.ResponseWriter, req *http.Request) bool {
if dh.controller == nil {
dh.handleError(w, req, http.StatusInternalServerError, errs.MissingBackendHandlerError(fmt.Errorf("nil controller")))
return false
}
return true
writeDate(w, []byte(err.Error()))
}
func (dh *DefaultHandler) log(req *http.Request, code int, text string) {
logger.Debugf("Serve http request '%s %s': %d %s", req.Method, req.URL.String(), code, text)
}
func extractQuery(req *http.Request) *query.Parameter {
q := &query.Parameter{
PageNumber: 1,
PageSize: query.DefaultPageSize,
Extras: make(query.ExtraParameters),
}
queries := req.URL.Query()
// Page number
p := queries.Get(query.ParamKeyPage)
if !utils.IsEmptyStr(p) {
if pv, err := strconv.ParseUint(p, 10, 32); err == nil {
if pv > 1 {
q.PageNumber = uint(pv)
}
}
}
// Page number
size := queries.Get(query.ParamKeyPageSize)
if !utils.IsEmptyStr(size) {
if pz, err := strconv.ParseUint(size, 10, 32); err == nil {
if pz > 0 {
q.PageSize = uint(pz)
}
}
}
// Extra query parameters
nonStoppedOnly := queries.Get(query.ParamKeyNonStoppedOnly)
if !utils.IsEmptyStr(nonStoppedOnly) {
if nonStoppedOnlyV, err := strconv.ParseBool(nonStoppedOnly); err == nil {
q.Extras.Set(query.ExtraParamKeyNonStoppedOnly, nonStoppedOnlyV)
}
}
return q
}
func writeDate(w http.ResponseWriter, bytes []byte) {
if _, err := w.Write(bytes); err != nil {
logger.Errorf("writer write error: %s", err)
}
}

View File

@ -18,298 +18,495 @@ import (
"encoding/json"
"errors"
"fmt"
"github.com/goharbor/harbor/src/jobservice/common/query"
"github.com/goharbor/harbor/src/jobservice/errs"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/jobservice/worker"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"io/ioutil"
"math/rand"
"net/http"
"os"
"strings"
"sync"
"testing"
"time"
"github.com/goharbor/harbor/src/jobservice/env"
"github.com/goharbor/harbor/src/jobservice/models"
)
const fakeSecret = "I'mfakesecret"
const (
secretKey = "CORE_SECRET"
fakeSecret = "I'mfakesecret"
)
var testingAuthProvider = &SecretAuthenticator{}
var testingHandler = NewDefaultHandler(&fakeController{})
var testingRouter = NewBaseRouter(testingHandler, testingAuthProvider)
var client = &http.Client{
Timeout: 10 * time.Second,
Transport: &http.Transport{
MaxIdleConns: 20,
IdleConnTimeout: 30 * time.Second,
},
// APIHandlerTestSuite tests functions of API handler
type APIHandlerTestSuite struct {
suite.Suite
server *Server
controller *fakeController
APIAddr string
client *http.Client
cancel context.CancelFunc
}
func TestUnAuthorizedAccess(t *testing.T) {
exportUISecret("hello")
// SetupSuite prepares test suite
func (suite *APIHandlerTestSuite) SetupSuite() {
_ = os.Setenv(secretKey, fakeSecret)
suite.client = &http.Client{
Timeout: 10 * time.Second,
Transport: &http.Transport{
MaxIdleConns: 20,
IdleConnTimeout: 30 * time.Second,
},
}
suite.createServer()
go func() {
_ = suite.server.Start()
}()
server, port, ctx := createServer()
server.Start()
<-time.After(200 * time.Millisecond)
res, err := getReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs/fake_job", port))
if e := expectFormatedError(res, err); e != nil {
t.Fatal(e)
}
if strings.Index(err.Error(), "401") == -1 {
t.Fatalf("expect '401' but got none 401 error")
}
server.Stop()
ctx.WG.Wait()
}
func TestLaunchJobFailed(t *testing.T) {
exportUISecret(fakeSecret)
server, port, ctx := createServer()
server.Start()
<-time.After(200 * time.Millisecond)
resData, err := postReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs", port), createJobReq(false))
if e := expectFormatedError(resData, err); e != nil {
t.Error(e)
}
server.Stop()
ctx.WG.Wait()
// TearDownSuite clears test suite
func (suite *APIHandlerTestSuite) TearDownSuite() {
_ = os.Unsetenv(secretKey)
_ = suite.server.Stop()
suite.cancel()
}
func TestLaunchJobSucceed(t *testing.T) {
exportUISecret(fakeSecret)
server, port, ctx := createServer()
server.Start()
<-time.After(200 * time.Millisecond)
res, err := postReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs", port), createJobReq(true))
if err != nil {
t.Fatal(err)
}
obj, err := getResult(res)
if err != nil {
t.Fatal(err)
}
if obj.Stats.JobID != "fake_ID_ok" {
t.Fatalf("expect job ID 'fake_ID_ok' but got '%s'\n", obj.Stats.JobID)
}
server.Stop()
ctx.WG.Wait()
// TestAPIHandlerTestSuite is suite entry for 'go test'
func TestAPIHandlerTestSuite(t *testing.T) {
suite.Run(t, new(APIHandlerTestSuite))
}
func TestGetJobFailed(t *testing.T) {
exportUISecret(fakeSecret)
// TestUnAuthorizedAccess ...
func (suite *APIHandlerTestSuite) TestUnAuthorizedAccess() {
_ = os.Unsetenv(secretKey)
defer func() {
_ = os.Setenv(secretKey, fakeSecret)
}()
server, port, ctx := createServer()
server.Start()
<-time.After(200 * time.Millisecond)
res, err := getReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs/fake_job", port))
if e := expectFormatedError(res, err); e != nil {
t.Fatal(e)
}
server.Stop()
ctx.WG.Wait()
_, code := suite.getReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/fake_job"))
assert.Equal(suite.T(), 401, code, "expect '401' but got none 401 error")
}
func TestGetJobSucceed(t *testing.T) {
exportUISecret(fakeSecret)
// TestLaunchJobFailed ...
func (suite *APIHandlerTestSuite) TestLaunchJobFailed() {
req := createJobReq()
bytes, _ := json.Marshal(req)
server, port, ctx := createServer()
server.Start()
<-time.After(200 * time.Millisecond)
fc1 := &fakeController{}
fc1.On("LaunchJob", req).Return(nil, errs.BadRequestError(req.Job.Name))
suite.controller = fc1
_, code := suite.postReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs"), bytes)
assert.Equal(suite.T(), 400, code, "expect 400 bad request but got %d", code)
res, err := getReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs/fake_job_ok", port))
if err != nil {
t.Fatal(err)
}
obj, err := getResult(res)
if err != nil {
t.Fatal(err)
}
if obj.Stats.JobName != "testing" || obj.Stats.JobID != "fake_ID_ok" {
t.Fatalf("expect job ID 'fake_ID_ok' of 'testing', but got '%s'\n", obj.Stats.JobID)
}
fc2 := &fakeController{}
fc2.On("LaunchJob", req).Return(nil, errs.ConflictError(req.Job.Name))
suite.controller = fc2
_, code = suite.postReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs"), bytes)
assert.Equal(suite.T(), 409, code, "expect 409 conflict but got %d", code)
server.Stop()
ctx.WG.Wait()
fc3 := &fakeController{}
fc3.On("LaunchJob", req).Return(nil, errs.LaunchJobError(errors.New("testing launch job")))
suite.controller = fc3
_, code = suite.postReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs"), bytes)
assert.Equal(suite.T(), 500, code, "expect 500 internal server error but got %d", code)
}
func TestJobActionFailed(t *testing.T) {
exportUISecret(fakeSecret)
// TestLaunchJobSucceed ...
func (suite *APIHandlerTestSuite) TestLaunchJobSucceed() {
req := createJobReq()
bytes, _ := json.Marshal(req)
server, port, ctx := createServer()
server.Start()
<-time.After(200 * time.Millisecond)
fc := &fakeController{}
fc.On("LaunchJob", req).Return(createJobStats("sample", "Generic", ""), nil)
suite.controller = fc
actionReq, err := createJobActionReq("stop")
if err != nil {
t.Fatal(err)
}
resData, err := postReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs/fake_job", port), actionReq)
expectFormatedError(resData, err)
actionReq, err = createJobActionReq("cancel")
if err != nil {
t.Fatal(err)
}
resData, err = postReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs/fake_job", port), actionReq)
expectFormatedError(resData, err)
actionReq, err = createJobActionReq("retry")
if err != nil {
t.Fatal(err)
}
resData, err = postReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs/fake_job", port), actionReq)
expectFormatedError(resData, err)
server.Stop()
ctx.WG.Wait()
_, code := suite.postReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs"), bytes)
assert.Equal(suite.T(), 202, code, "expected 202 created but got %d when launching job", code)
}
func TestJobActionSucceed(t *testing.T) {
exportUISecret(fakeSecret)
// TestGetJobFailed ...
func (suite *APIHandlerTestSuite) TestGetJobFailed() {
fc := &fakeController{}
fc.On("GetJob", "fake_job_ID").Return(nil, errs.NoObjectFoundError("fake_job_ID"))
suite.controller = fc
server, port, ctx := createServer()
server.Start()
<-time.After(200 * time.Millisecond)
actionReq, err := createJobActionReq("stop")
if err != nil {
t.Fatal(err)
}
_, err = postReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs/fake_job_ok", port), actionReq)
if err != nil {
t.Fatal(err)
}
actionReq, err = createJobActionReq("cancel")
if err != nil {
t.Fatal(err)
}
_, err = postReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs/fake_job_ok", port), actionReq)
if err != nil {
t.Fatal(err)
}
actionReq, err = createJobActionReq("retry")
if err != nil {
t.Fatal(err)
}
_, err = postReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs/fake_job_ok", port), actionReq)
if err != nil {
t.Fatal(err)
}
server.Stop()
ctx.WG.Wait()
_, code := suite.getReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/fake_job_ID"))
assert.Equal(suite.T(), 404, code, "expected 404 not found but got %d when getting job", code)
}
func TestCheckStatus(t *testing.T) {
exportUISecret(fakeSecret)
// TestGetJobSucceed ...
func (suite *APIHandlerTestSuite) TestGetJobSucceed() {
fc := &fakeController{}
fc.On("GetJob", "fake_job_ID").Return(createJobStats("sample", "Generic", ""), nil)
suite.controller = fc
server, port, ctx := createServer()
server.Start()
<-time.After(200 * time.Millisecond)
resData, err := getReq(fmt.Sprintf("http://localhost:%d/api/v1/stats", port))
if err != nil {
t.Fatal(err)
}
poolStats := &models.JobPoolStats{
Pools: make([]*models.JobPoolStatsData, 0),
}
err = json.Unmarshal(resData, poolStats)
if err != nil {
t.Fatal(err)
}
if poolStats.Pools[0].WorkerPoolID != "fake_pool_ID" {
t.Fatalf("expect pool ID 'fake_pool_ID' but got '%s'", poolStats.Pools[0].WorkerPoolID)
}
server.Stop()
ctx.WG.Wait()
res, code := suite.getReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/fake_job_ID"))
require.Equal(suite.T(), 200, code, "expected 200 ok but got %d when getting job", code)
stats, err := getResult(res)
require.Nil(suite.T(), err, "no error should be occurred when unmarshal job stats")
assert.Equal(suite.T(), "fake_job_ID", stats.Info.JobID, "expected job ID 'fake_job_ID' but got %s", stats.Info.JobID)
}
func TestGetJobLogInvalidID(t *testing.T) {
exportUISecret(fakeSecret)
// TestJobActionFailed ...
func (suite *APIHandlerTestSuite) TestJobActionFailed() {
actionReq := createJobActionReq("not-support")
data, _ := json.Marshal(actionReq)
_, code := suite.postReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/fake_job_ID"), data)
assert.Equal(suite.T(), 501, code, "expected 501 not implemented but got %d", code)
server, port, ctx := createServer()
server.Start()
<-time.After(200 * time.Millisecond)
fc1 := &fakeController{}
fc1.On("StopJob", "fake_job_ID_not").Return(errs.NoObjectFoundError("fake_job_ID_not"))
suite.controller = fc1
actionReq = createJobActionReq("stop")
data, _ = json.Marshal(actionReq)
_, code = suite.postReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/fake_job_ID_not"), data)
assert.Equal(suite.T(), 404, code, "expected 404 not found but got %d", code)
_, err := getReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs/%%2F..%%2Fpasswd/log", port))
if err == nil || strings.Contains(err.Error(), "400") {
t.Fatalf("Expected 400 error but got: %v", err)
}
fc2 := &fakeController{}
fc2.On("StopJob", "fake_job_ID").Return(errs.BadRequestError("fake_job_ID"))
suite.controller = fc2
_, code = suite.postReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/fake_job_ID"), data)
assert.Equal(suite.T(), 400, code, "expected 400 bad request but got %d", code)
server.Stop()
ctx.WG.Wait()
fc3 := &fakeController{}
fc3.On("StopJob", "fake_job_ID").Return(errs.StopJobError(errors.New("testing error")))
suite.controller = fc3
_, code = suite.postReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/fake_job_ID"), data)
assert.Equal(suite.T(), 500, code, "expected 500 internal server but got %d", code)
}
func TestGetJobLog(t *testing.T) {
exportUISecret(fakeSecret)
// TestJobActionSucceed ...
func (suite *APIHandlerTestSuite) TestJobActionSucceed() {
fc := &fakeController{}
fc.On("StopJob", "fake_job_ID_not").Return(nil)
suite.controller = fc
actionReq := createJobActionReq("stop")
data, _ := json.Marshal(actionReq)
_, code := suite.postReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/fake_job_ID_not"), data)
assert.Equal(suite.T(), 204, code, "expected 204 no content but got %d", code)
}
server, port, ctx := createServer()
server.Start()
<-time.After(200 * time.Millisecond)
// TestCheckStatus ...
func (suite *APIHandlerTestSuite) TestCheckStatus() {
statsRes := &worker.Stats{
Pools: []*worker.StatsData{
{
WorkerPoolID: "my-worker-pool-ID",
},
},
}
fc := &fakeController{}
fc.On("CheckStatus").Return(statsRes, nil)
suite.controller = fc
resData, err := getReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs/fake_job_ok/log", port))
bytes, code := suite.getReq(fmt.Sprintf("%s/%s", suite.APIAddr, "stats"))
require.Equal(suite.T(), 200, code, "expected 200 ok when getting worker stats but got %d", code)
poolStats := &worker.Stats{
Pools: make([]*worker.StatsData, 0),
}
err := json.Unmarshal(bytes, poolStats)
assert.Nil(suite.T(), err, "no error should be occurred when unmarshal worker stats")
assert.Equal(suite.T(), 1, len(poolStats.Pools), "at least 1 pool exists but got %d", len(poolStats.Pools))
assert.Equal(suite.T(), "my-worker-pool-ID", poolStats.Pools[0].WorkerPoolID, "expected pool ID 'my-worker-pool-ID' but got %s", poolStats.Pools[0].WorkerPoolID)
}
// TestGetJobLogInvalidID ...
func (suite *APIHandlerTestSuite) TestGetJobLogInvalidID() {
fc := &fakeController{}
fc.On("GetJobLogData", "fake_job_ID_not").Return(nil, errs.NoObjectFoundError("fake_job_ID_not"))
suite.controller = fc
_, code := suite.getReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/fake_job_ID_not/log"))
assert.Equal(suite.T(), 404, code, "expected 404 not found but got %d", code)
}
// TestGetJobLog ...
func (suite *APIHandlerTestSuite) TestGetJobLog() {
fc := &fakeController{}
fc.On("GetJobLogData", "fake_job_ID").Return([]byte("hello log"), nil)
suite.controller = fc
resData, code := suite.getReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/fake_job_ID/log"))
require.Equal(suite.T(), 200, code, "expected 200 ok but got %d", code)
assert.Equal(suite.T(), "hello log", string(resData))
}
// TestGetPeriodicExecutionsWithoutQuery ...
func (suite *APIHandlerTestSuite) TestGetPeriodicExecutionsWithoutQuery() {
q := &query.Parameter{
PageNumber: 1,
PageSize: query.DefaultPageSize,
Extras: make(query.ExtraParameters),
}
fc := &fakeController{}
fc.On("GetPeriodicExecutions", "fake_job_ID", q).
Return([]*job.Stats{createJobStats("sample", "Generic", "")}, int64(1), nil)
suite.controller = fc
_, code := suite.getReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/fake_job_ID/executions"))
assert.Equal(suite.T(), 200, code, "expected 200 ok but got %d", code)
}
// TestGetPeriodicExecutionsWithQuery ...
func (suite *APIHandlerTestSuite) TestGetPeriodicExecutionsWithQuery() {
extras := make(query.ExtraParameters)
extras.Set(query.ExtraParamKeyNonStoppedOnly, true)
q := &query.Parameter{
PageNumber: 2,
PageSize: 50,
Extras: extras,
}
fc := &fakeController{}
fc.On("GetPeriodicExecutions", "fake_job_ID", q).
Return([]*job.Stats{createJobStats("sample", "Generic", "")}, int64(1), nil)
suite.controller = fc
_, code := suite.getReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/fake_job_ID/executions?page_number=2&page_size=50&non_dead_only=true"))
assert.Equal(suite.T(), 200, code, "expected 200 ok but got %d", code)
}
// TestScheduledJobs ...
func (suite *APIHandlerTestSuite) TestScheduledJobs() {
q := &query.Parameter{
PageNumber: 2,
PageSize: 50,
Extras: make(query.ExtraParameters),
}
fc := &fakeController{}
fc.On("ScheduledJobs", q).
Return([]*job.Stats{createJobStats("sample", "Generic", "")}, int64(1), nil)
suite.controller = fc
_, code := suite.getReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/scheduled?page_number=2&page_size=50"))
assert.Equal(suite.T(), 200, code, "expected 200 ok but got %d", code)
}
// createServer ...
func (suite *APIHandlerTestSuite) createServer() {
port := uint(30000 + rand.Intn(1000))
suite.APIAddr = fmt.Sprintf("http://localhost:%d/api/v1", port)
config := ServerConfig{
Protocol: "http",
Port: port,
}
ctx, cancel := context.WithCancel(context.Background())
testingRouter := NewBaseRouter(
NewDefaultHandler(suite),
&SecretAuthenticator{},
)
suite.server = NewServer(ctx, testingRouter, config)
suite.cancel = cancel
}
// postReq ...
func (suite *APIHandlerTestSuite) postReq(url string, data []byte) ([]byte, int) {
req, err := http.NewRequest(http.MethodPost, url, strings.NewReader(string(data)))
if err != nil {
t.Fatal(err)
return nil, 0
}
if len(resData) == 0 {
t.Fatal("expect job log but got nothing")
req.Header.Set(authHeader, fmt.Sprintf("%s %s", secretPrefix, fakeSecret))
res, err := suite.client.Do(req)
if err != nil {
return nil, 0
}
server.Stop()
ctx.WG.Wait()
}
var (
resData []byte
)
func expectFormatedError(data []byte, err error) error {
if err == nil {
return errors.New("expect error but got nil")
}
if err != nil && len(data) <= 0 {
return errors.New("expect error but got nothing")
}
if err != nil && len(data) > 0 {
var m = make(map[string]interface{})
if err := json.Unmarshal(data, &m); err != nil {
return err
}
if _, ok := m["code"]; !ok {
return errors.New("malformated error")
defer func() {
_ = res.Body.Close()
}()
if res.ContentLength > 0 {
resData, err = ioutil.ReadAll(res.Body)
if err != nil {
return nil, 0
}
}
return nil
return resData, res.StatusCode
}
func createJobReq(ok bool) []byte {
params := make(map[string]interface{})
// getReq ...
func (suite *APIHandlerTestSuite) getReq(url string) ([]byte, int) {
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, 0
}
req.Header.Set(authHeader, fmt.Sprintf("%s %s", secretPrefix, fakeSecret))
res, err := suite.client.Do(req)
if err != nil {
return nil, 0
}
defer func() {
_ = res.Body.Close()
}()
data, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, 0
}
return data, res.StatusCode
}
func (suite *APIHandlerTestSuite) LaunchJob(req *job.Request) (*job.Stats, error) {
return suite.controller.LaunchJob(req)
}
func (suite *APIHandlerTestSuite) GetJob(jobID string) (*job.Stats, error) {
return suite.controller.GetJob(jobID)
}
func (suite *APIHandlerTestSuite) StopJob(jobID string) error {
return suite.controller.StopJob(jobID)
}
func (suite *APIHandlerTestSuite) RetryJob(jobID string) error {
return suite.controller.RetryJob(jobID)
}
func (suite *APIHandlerTestSuite) CheckStatus() (*worker.Stats, error) {
return suite.controller.CheckStatus()
}
func (suite *APIHandlerTestSuite) GetJobLogData(jobID string) ([]byte, error) {
return suite.controller.GetJobLogData(jobID)
}
func (suite *APIHandlerTestSuite) GetPeriodicExecutions(periodicJobID string, query *query.Parameter) ([]*job.Stats, int64, error) {
return suite.controller.GetPeriodicExecutions(periodicJobID, query)
}
func (suite *APIHandlerTestSuite) ScheduledJobs(query *query.Parameter) ([]*job.Stats, int64, error) {
return suite.controller.ScheduledJobs(query)
}
type fakeController struct {
mock.Mock
}
func (fc *fakeController) LaunchJob(req *job.Request) (*job.Stats, error) {
args := fc.Called(req)
if args.Error(1) != nil {
return nil, args.Error(1)
}
return args.Get(0).(*job.Stats), nil
}
func (fc *fakeController) GetJob(jobID string) (*job.Stats, error) {
args := fc.Called(jobID)
if args.Error(1) != nil {
return nil, args.Error(1)
}
return args.Get(0).(*job.Stats), nil
}
func (fc *fakeController) StopJob(jobID string) error {
args := fc.Called(jobID)
return args.Error(0)
}
func (fc *fakeController) RetryJob(jobID string) error {
args := fc.Called(jobID)
return args.Error(0)
}
func (fc *fakeController) CheckStatus() (*worker.Stats, error) {
args := fc.Called()
if args.Error(1) != nil {
return nil, args.Error(1)
}
return args.Get(0).(*worker.Stats), nil
}
func (fc *fakeController) GetJobLogData(jobID string) ([]byte, error) {
args := fc.Called(jobID)
if args.Error(1) != nil {
return nil, args.Error(1)
}
return args.Get(0).([]byte), nil
}
func (fc *fakeController) GetPeriodicExecutions(periodicJobID string, query *query.Parameter) ([]*job.Stats, int64, error) {
args := fc.Called(periodicJobID, query)
if args.Error(2) != nil {
return nil, args.Get(1).(int64), args.Error(2)
}
return args.Get(0).([]*job.Stats), args.Get(1).(int64), nil
}
func (fc *fakeController) ScheduledJobs(query *query.Parameter) ([]*job.Stats, int64, error) {
args := fc.Called(query)
if args.Error(2) != nil {
return nil, args.Get(1).(int64), args.Error(2)
}
return args.Get(0).([]*job.Stats), args.Get(1).(int64), nil
}
func createJobStats(name, kind, cron string) *job.Stats {
now := time.Now()
params := make(job.Parameters)
params["image"] = "testing:v1"
name := "fake_job_ok"
if !ok {
name = "fake_job_error"
return &job.Stats{
Info: &job.StatsInfo{
JobID: "fake_job_ID",
Status: job.PendingStatus.String(),
JobName: name,
JobKind: kind,
IsUnique: false,
RefLink: "/api/v1/jobs/fake_job_ID",
CronSpec: cron,
RunAt: now.Add(100 * time.Second).Unix(),
EnqueueTime: now.Unix(),
UpdateTime: now.Unix(),
Parameters: params,
},
}
req := &models.JobRequest{
Job: &models.JobData{
Name: name,
}
func getResult(res []byte) (*job.Stats, error) {
obj := &job.Stats{}
err := json.Unmarshal(res, obj)
return obj, err
}
func createJobReq() *job.Request {
params := make(job.Parameters)
params["image"] = "testing:v1"
return &job.Request{
Job: &job.RequestBody{
Name: "my-testing-job",
Parameters: params,
Metadata: &models.JobMetadata{
Metadata: &job.Metadata{
JobKind: "Periodic",
Cron: "5 * * * * *",
IsUnique: true,
@ -317,178 +514,10 @@ func createJobReq(ok bool) []byte {
StatusHook: "http://localhost:39999",
},
}
data, _ := json.Marshal(req)
return data
}
func createJobActionReq(action string) ([]byte, error) {
actionReq := models.JobActionRequest{
func createJobActionReq(action string) *job.ActionRequest {
return &job.ActionRequest{
Action: action,
}
return json.Marshal(&actionReq)
}
func postReq(url string, data []byte) ([]byte, error) {
req, err := http.NewRequest(http.MethodPost, url, strings.NewReader(string(data)))
if err != nil {
return nil, err
}
req.Header.Set(authHeader, fmt.Sprintf("%s %s", secretPrefix, fakeSecret))
res, err := client.Do(req)
if err != nil {
return nil, err
}
var (
resData []byte
)
defer res.Body.Close()
if res.ContentLength > 0 {
resData, err = ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
}
if res.StatusCode >= http.StatusOK && res.StatusCode <= http.StatusNoContent {
return resData, nil
}
return resData, fmt.Errorf("expect status code '200,201,202,204', but got '%d'", res.StatusCode)
}
func getReq(url string) ([]byte, error) {
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, err
}
req.Header.Set(authHeader, fmt.Sprintf("%s %s", secretPrefix, fakeSecret))
res, err := client.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
data, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
if res.StatusCode != http.StatusOK {
return data, fmt.Errorf("expect status code '200', but got '%d'", res.StatusCode)
}
return data, nil
}
func exportUISecret(secret string) {
os.Setenv("CORE_SECRET", secret)
}
type fakeController struct{}
func (fc *fakeController) LaunchJob(req models.JobRequest) (models.JobStats, error) {
if req.Job.Name != "fake_job_ok" || req.Job.Metadata == nil {
return models.JobStats{}, errors.New("failed")
}
return createJobStats(req.Job.Name, req.Job.Metadata.JobKind, req.Job.Metadata.Cron), nil
}
func (fc *fakeController) GetJob(jobID string) (models.JobStats, error) {
if jobID != "fake_job_ok" {
return models.JobStats{}, errors.New("failed")
}
return createJobStats("testing", "Generic", ""), nil
}
func (fc *fakeController) StopJob(jobID string) error {
if jobID == "fake_job_ok" {
return nil
}
return errors.New("failed")
}
func (fc *fakeController) RetryJob(jobID string) error {
if jobID == "fake_job_ok" {
return nil
}
return errors.New("failed")
}
func (fc *fakeController) CancelJob(jobID string) error {
if jobID == "fake_job_ok" {
return nil
}
return errors.New("failed")
}
func (fc *fakeController) CheckStatus() (models.JobPoolStats, error) {
return models.JobPoolStats{
Pools: []*models.JobPoolStatsData{{
WorkerPoolID: "fake_pool_ID",
Status: "running",
StartedAt: time.Now().Unix(),
}},
}, nil
}
func (fc *fakeController) GetJobLogData(jobID string) ([]byte, error) {
if jobID == "fake_job_ok" {
return []byte("job log"), nil
}
return nil, errors.New("failed")
}
func createJobStats(name, kind, cron string) models.JobStats {
now := time.Now()
return models.JobStats{
Stats: &models.JobStatData{
JobID: "fake_ID_ok",
Status: "pending",
JobName: name,
JobKind: kind,
IsUnique: false,
RefLink: "/api/v1/jobs/fake_ID_ok",
CronSpec: cron,
RunAt: now.Add(100 * time.Second).Unix(),
EnqueueTime: now.Unix(),
UpdateTime: now.Unix(),
},
}
}
func getResult(res []byte) (models.JobStats, error) {
obj := models.JobStats{}
err := json.Unmarshal(res, &obj)
return obj, err
}
func createServer() (*Server, uint, *env.Context) {
port := uint(30000 + rand.Intn(10000))
config := ServerConfig{
Protocol: "http",
Port: port,
}
ctx := &env.Context{
SystemContext: context.Background(),
WG: new(sync.WaitGroup),
ErrorChan: make(chan error, 1),
}
server := NewServer(ctx, testingRouter, config)
return server, port, ctx
}

View File

@ -16,6 +16,7 @@ package api
import (
"fmt"
"github.com/pkg/errors"
"net/http"
"github.com/goharbor/harbor/src/jobservice/errs"
@ -68,9 +69,12 @@ func (br *BaseRouter) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if req.URL.String() != fmt.Sprintf("%s/%s/stats", baseRoute, apiVersion) {
if err := br.authenticator.DoAuth(req); err != nil {
authErr := errs.UnauthorizedError(err)
if authErr == nil {
authErr = errors.Errorf("unauthorized: %s", err)
}
logger.Errorf("Serve http request '%s %s' failed with error: %s", req.Method, req.URL.String(), authErr.Error())
w.WriteHeader(http.StatusUnauthorized)
w.Write([]byte(authErr.Error()))
writeDate(w, []byte(authErr.Error()))
return
}
}
@ -84,8 +88,10 @@ func (br *BaseRouter) registerRoutes() {
subRouter := br.router.PathPrefix(fmt.Sprintf("%s/%s", baseRoute, apiVersion)).Subrouter()
subRouter.HandleFunc("/jobs", br.handler.HandleLaunchJobReq).Methods(http.MethodPost)
subRouter.HandleFunc("/jobs/scheduled", br.handler.HandleScheduledJobs).Methods(http.MethodGet)
subRouter.HandleFunc("/jobs/{job_id}", br.handler.HandleGetJobReq).Methods(http.MethodGet)
subRouter.HandleFunc("/jobs/{job_id}", br.handler.HandleJobActionReq).Methods(http.MethodPost)
subRouter.HandleFunc("/jobs/{job_id}/log", br.handler.HandleJobLogReq).Methods(http.MethodGet)
subRouter.HandleFunc("/stats", br.handler.HandleCheckStatusReq).Methods(http.MethodGet)
subRouter.HandleFunc("/jobs/{job_id}/executions", br.handler.HandlePeriodicExecutions).Methods(http.MethodGet)
}

View File

@ -15,14 +15,13 @@
package api
import (
"context"
"crypto/tls"
"fmt"
"net/http"
"time"
"context"
"github.com/goharbor/harbor/src/jobservice/config"
"github.com/goharbor/harbor/src/jobservice/env"
"github.com/goharbor/harbor/src/jobservice/logger"
)
@ -38,7 +37,7 @@ type Server struct {
config ServerConfig
// The context
context *env.Context
context context.Context
}
// ServerConfig contains the configurations of Server.
@ -57,7 +56,7 @@ type ServerConfig struct {
}
// NewServer is constructor of Server.
func NewServer(ctx *env.Context, router Router, cfg ServerConfig) *Server {
func NewServer(ctx context.Context, router Router, cfg ServerConfig) *Server {
apiServer := &Server{
router: router,
config: cfg,
@ -96,39 +95,23 @@ func NewServer(ctx *env.Context, router Router, cfg ServerConfig) *Server {
}
// Start the server to serve requests.
func (s *Server) Start() {
s.context.WG.Add(1)
go func() {
var err error
defer func() {
s.context.WG.Done()
logger.Infof("API server is gracefully shutdown")
}()
if s.config.Protocol == config.JobServiceProtocolHTTPS {
err = s.httpServer.ListenAndServeTLS(s.config.Cert, s.config.Key)
} else {
err = s.httpServer.ListenAndServe()
}
if err != nil {
s.context.ErrorChan <- err
}
// Blocking call
func (s *Server) Start() error {
defer func() {
logger.Info("API server is stopped")
}()
if s.config.Protocol == config.JobServiceProtocolHTTPS {
return s.httpServer.ListenAndServeTLS(s.config.Cert, s.config.Key)
}
return s.httpServer.ListenAndServe()
}
// Stop server gracefully.
func (s *Server) Stop() {
go func() {
defer func() {
logger.Info("Stop API server done!")
}()
shutDownCtx, cancel := context.WithTimeout(s.context.SystemContext, 10*time.Second)
defer cancel()
func (s *Server) Stop() error {
shutDownCtx, cancel := context.WithTimeout(s.context, 15*time.Second)
defer cancel()
if err := s.httpServer.Shutdown(shutDownCtx); err != nil {
logger.Errorf("Shutdown API server failed with error: %s\n", err)
}
}()
return s.httpServer.Shutdown(shutDownCtx)
}

View File

@ -0,0 +1,52 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package query
const (
// DefaultPageSize defines the default page size
DefaultPageSize uint = 25
// ParamKeyPage defines query param key of page number
ParamKeyPage = "page_number"
// ParamKeyPageSize defines query param key of page size
ParamKeyPageSize = "page_size"
// ParamKeyNonStoppedOnly defines query param key of querying non stopped periodic executions
ParamKeyNonStoppedOnly = "non_dead_only"
// ExtraParamKeyNonStoppedOnly defines extra parameter key for querying non stopped periodic executions
ExtraParamKeyNonStoppedOnly = "NonDeadOnly"
)
// ExtraParameters to keep non pagination query parameters
type ExtraParameters map[string]interface{}
// Set extra parameters
func (ep ExtraParameters) Set(key string, v interface{}) {
if len(key) > 0 {
ep[key] = v
}
}
// Get the extra parameter by key
func (ep ExtraParameters) Get(key string) (interface{}, bool) {
v, ok := ep[key]
return v, ok
}
// Parameter for getting executions
type Parameter struct {
PageNumber uint
PageSize uint
Extras ExtraParameters
}

View File

@ -0,0 +1,29 @@
package query
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
)
// QueryTestSuite tests q
type QueryTestSuite struct {
suite.Suite
}
// TestQueryTestSuite is entry of go test
func TestQueryTestSuite(t *testing.T) {
suite.Run(t, new(QueryTestSuite))
}
// TestExtraParams tests extra parameters
func (suite *QueryTestSuite) TestExtraParams() {
extras := make(ExtraParameters)
extras.Set("a", 100)
v, ok := extras.Get("a")
assert.Equal(suite.T(), true, ok)
assert.Equal(suite.T(), 100, v.(int))
}

View File

@ -12,27 +12,33 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package utils
package rds
import (
"fmt"
"math/rand"
"strings"
"time"
)
func generateScore() int64 {
ticks := time.Now().Unix()
rand := rand.New(rand.NewSource(ticks))
return ticks + rand.Int63n(1000) // Double confirm to avoid potential duplications
// Functions defined here are mainly from dep lib "github.com/gocraft/work".
// Only for compatible
// RedisNamespacePrefix ... Same with 'KeyNamespacePrefix', only for compatibility.
func RedisNamespacePrefix(namespace string) string {
return KeyNamespacePrefix(namespace)
}
// MakePeriodicPolicyUUID returns an UUID for the periodic policy.
func MakePeriodicPolicyUUID() (string, int64) {
score := generateScore()
return MakeIdentifier(), score
// RedisKeyScheduled returns key of scheduled job.
func RedisKeyScheduled(namespace string) string {
return RedisNamespacePrefix(namespace) + "scheduled"
}
// RedisKeyLastPeriodicEnqueue returns key of timestamp if last periodic enqueue.
func RedisKeyLastPeriodicEnqueue(namespace string) string {
return RedisNamespacePrefix(namespace) + "last_periodic_enqueue"
}
// ----------------------------------------------------------
// KeyNamespacePrefix returns the based key based on the namespace.
func KeyNamespacePrefix(namespace string) string {
ns := strings.TrimSpace(namespace)
@ -53,16 +59,6 @@ func KeyPeriodicPolicy(namespace string) string {
return fmt.Sprintf("%s:%s", KeyPeriod(namespace), "policies")
}
// KeyPeriodicPolicyScore returns the key of policy key and score mapping.
func KeyPeriodicPolicyScore(namespace string) string {
return fmt.Sprintf("%s:%s", KeyPeriod(namespace), "key_score")
}
// KeyPeriodicJobTimeSlots returns the key of the time slots of scheduled jobs.
func KeyPeriodicJobTimeSlots(namespace string) string {
return fmt.Sprintf("%s:%s", KeyPeriod(namespace), "scheduled_slots")
}
// KeyPeriodicNotification returns the key of periodic pub/sub channel.
func KeyPeriodicNotification(namespace string) string {
return fmt.Sprintf("%s:%s", KeyPeriodicPolicy(namespace), "notifications")
@ -78,12 +74,17 @@ func KeyJobStats(namespace string, jobID string) string {
return fmt.Sprintf("%s%s:%s", KeyNamespacePrefix(namespace), "job_stats", jobID)
}
// KeyJobCtlCommands returns the key for publishing ctl commands like 'stop' etc.
func KeyJobCtlCommands(namespace string, jobID string) string {
return fmt.Sprintf("%s%s:%s", KeyNamespacePrefix(namespace), "ctl_commands", jobID)
}
// KeyUpstreamJobAndExecutions returns the key for persisting executions.
func KeyUpstreamJobAndExecutions(namespace, upstreamJobID string) string {
return fmt.Sprintf("%s%s:%s", KeyNamespacePrefix(namespace), "executions", upstreamJobID)
}
// KeyHookEventRetryQueue returns the key of hook event retrying queue
func KeyHookEventRetryQueue(namespace string) string {
return fmt.Sprintf("%s%s", KeyNamespacePrefix(namespace), "hook_events")
}
// KeyStatusUpdateRetryQueue returns the key of status change retrying queue
func KeyStatusUpdateRetryQueue(namespace string) string {
return fmt.Sprintf("%s%s", KeyNamespacePrefix(namespace), "status_change_events")
}

View File

@ -0,0 +1,156 @@
package rds
import (
"fmt"
"github.com/garyburd/redigo/redis"
"github.com/goharbor/harbor/src/jobservice/common/utils"
"github.com/pkg/errors"
"time"
)
// ErrNoElements is a pre defined error to describe the case that no elements got
// from the backend database.
var ErrNoElements = errors.New("no elements got from the backend")
// HmSet sets the properties of hash map
func HmSet(conn redis.Conn, key string, fieldAndValues ...interface{}) error {
if conn == nil {
return errors.New("nil redis connection")
}
if utils.IsEmptyStr(key) {
return errors.New("no key specified to do HMSET")
}
if len(fieldAndValues) == 0 {
return errors.New("no properties specified to do HMSET")
}
args := make([]interface{}, 0, len(fieldAndValues)+2)
args = append(args, key)
args = append(args, fieldAndValues...)
args = append(args, "update_time", time.Now().Unix()) // Add update timestamp
_, err := conn.Do("HMSET", args...)
return err
}
// HmGet gets values of multiple fields
// Values have same order with the provided fields
func HmGet(conn redis.Conn, key string, fields ...interface{}) ([]interface{}, error) {
if conn == nil {
return nil, errors.New("nil redis connection")
}
if utils.IsEmptyStr(key) {
return nil, errors.New("no key specified to do HMGET")
}
if len(fields) == 0 {
return nil, errors.New("no fields specified to do HMGET")
}
args := make([]interface{}, 0, len(fields)+1)
args = append(args, key)
args = append(args, fields...)
return redis.Values(conn.Do("HMGET", args...))
}
// JobScore represents the data item with score in the redis db.
type JobScore struct {
JobBytes []byte
Score int64
}
// GetZsetByScore get the items from the zset filtered by the specified score scope.
func GetZsetByScore(conn redis.Conn, key string, scores []int64) ([]JobScore, error) {
if conn == nil {
return nil, errors.New("nil redis conn when getting zset by score")
}
if utils.IsEmptyStr(key) {
return nil, errors.New("missing key when getting zset by score")
}
if len(scores) < 2 {
return nil, errors.New("bad arguments: not enough scope scores provided")
}
values, err := redis.Values(conn.Do("ZRANGEBYSCORE", key, scores[0], scores[1], "WITHSCORES"))
if err != nil {
return nil, err
}
var jobsWithScores []JobScore
if err := redis.ScanSlice(values, &jobsWithScores); err != nil {
return nil, err
}
return jobsWithScores, nil
}
// AcquireLock acquires a redis lock with specified expired time
func AcquireLock(conn redis.Conn, lockerKey string, lockerID string, expireTime int64) error {
args := []interface{}{lockerKey, lockerID, "NX", "EX", expireTime}
res, err := conn.Do("SET", args...)
if err != nil {
return err
}
// Existing, the value can not be override
if res == nil {
return fmt.Errorf("key %s is already set with value %v", lockerKey, lockerID)
}
return nil
}
// ReleaseLock releases the acquired lock
func ReleaseLock(conn redis.Conn, lockerKey string, lockerID string) error {
theID, err := redis.String(conn.Do("GET", lockerKey))
if err != nil {
return err
}
if theID == lockerID {
_, err := conn.Do("DEL", lockerKey)
return err
}
return errors.New("locker ID mismatch")
}
// ZPopMin pops the element with lowest score in the zset
func ZPopMin(conn redis.Conn, key string) (interface{}, error) {
err := conn.Send("MULTI")
err = conn.Send("ZRANGE", key, 0, 0) // lowest one
err = conn.Send("ZREMRANGEBYRANK", key, 0, 0)
if err != nil {
return nil, err
}
replies, err := redis.Values(conn.Do("EXEC"))
if err != nil {
return nil, err
}
if len(replies) < 2 {
return nil, errors.Errorf("zpopmin error: not enough results returned, expected %d but got %d", 2, len(replies))
}
zrangeReply := replies[0]
if zrangeReply != nil {
if elements, ok := zrangeReply.([]interface{}); ok {
if len(elements) == 0 {
return nil, ErrNoElements
}
return elements[0], nil
}
}
return nil, errors.New("zpopmin error: bad result reply")
}

View File

@ -0,0 +1,139 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rds
import (
"encoding/json"
"github.com/goharbor/harbor/src/jobservice/tests"
"github.com/gomodule/redigo/redis"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"testing"
"time"
)
// For testing
type simpleStatusChange struct {
JobID string
}
// RdsUtilsTestSuite tests functions located in rds package
type RdsUtilsTestSuite struct {
suite.Suite
pool *redis.Pool
namespace string
conn redis.Conn
}
// SetupSuite prepares test suite
func (suite *RdsUtilsTestSuite) SetupSuite() {
suite.pool = tests.GiveMeRedisPool()
suite.namespace = tests.GiveMeTestNamespace()
}
// SetupTest prepares test cases
func (suite *RdsUtilsTestSuite) SetupTest() {
suite.conn = suite.pool.Get()
}
// TearDownTest clears test cases
func (suite *RdsUtilsTestSuite) TearDownTest() {
err := suite.conn.Close()
assert.NoError(suite.T(), err, "close conn: nil error expected but got %s", err)
}
// TearDownSuite clears test suite
func (suite *RdsUtilsTestSuite) TearDownSuite() {
conn := suite.pool.Get()
defer func() {
_ = conn.Close()
}()
err := tests.ClearAll(suite.namespace, conn)
assert.NoError(suite.T(), err, "clear all: nil error expected but got %s", err)
}
// TestZPopMin ...
func (suite *RdsUtilsTestSuite) TestZPopMin() {
s1 := &simpleStatusChange{"a"}
s2 := &simpleStatusChange{"b"}
raw1, _ := json.Marshal(s1)
raw2, _ := json.Marshal(s2)
key := KeyStatusUpdateRetryQueue(suite.namespace)
_, err := suite.conn.Do("ZADD", key, time.Now().Unix(), raw1)
_, err = suite.conn.Do("ZADD", key, time.Now().Unix()+5, raw2)
require.Nil(suite.T(), err, "zadd objects error should be nil")
v, err := ZPopMin(suite.conn, key)
require.Nil(suite.T(), err, "nil error should be returned by calling ZPopMin")
change1 := &simpleStatusChange{}
_ = json.Unmarshal(v.([]byte), change1)
assert.Equal(suite.T(), "a", change1.JobID, "job ID not equal")
v, err = ZPopMin(suite.conn, key)
require.Nil(suite.T(), err, "nil error should be returned by calling ZPopMin")
change2 := &simpleStatusChange{}
_ = json.Unmarshal(v.([]byte), change2)
assert.Equal(suite.T(), "b", change2.JobID, "job ID not equal")
}
// TestHmGetAndSet ...
func (suite *RdsUtilsTestSuite) TestHmGetAndSet() {
key := KeyJobStats(suite.namespace, "fake_job_id")
err := HmSet(suite.conn, key, "a", "hello", "b", 100)
require.Nil(suite.T(), err, "nil error should be returned for HmSet")
values, err := HmGet(suite.conn, key, "a", "b")
require.Nil(suite.T(), err, "nil error should be returned for HmGet")
assert.Equal(suite.T(), 2, len(values), "two values should be returned")
assert.Equal(suite.T(), string(values[0].([]byte)), "hello")
assert.Equal(suite.T(), string(values[1].([]byte)), "100")
}
// TestAcquireAndReleaseLock ...
func (suite *RdsUtilsTestSuite) TestAcquireAndReleaseLock() {
key := KeyPeriodicLock(suite.namespace)
err := AcquireLock(suite.conn, key, "RdsUtilsTestSuite", 60)
assert.Nil(suite.T(), err, "nil error should be returned for 1st acquiring lock")
err = AcquireLock(suite.conn, key, "RdsUtilsTestSuite", 60)
assert.NotNil(suite.T(), err, "non nil error should be returned for 2nd acquiring lock")
err = ReleaseLock(suite.conn, key, "RdsUtilsTestSuite")
assert.Nil(suite.T(), err, "nil error should be returned for releasing lock")
}
// TestGetZsetByScore ...
func (suite *RdsUtilsTestSuite) TestGetZsetByScore() {
key := KeyPeriod(suite.namespace)
count, err := suite.conn.Do("ZADD", key, 1, "hello", 2, "world")
require.Nil(suite.T(), err, "nil error should be returned when adding prepared data by ZADD")
require.Equal(suite.T(), int64(2), count.(int64), "two items should be added")
datas, err := GetZsetByScore(suite.conn, key, []int64{0, 2})
require.Nil(suite.T(), err, "nil error should be returned when getting data with scores")
assert.Equal(suite.T(), 2, len(datas), "expected 2 items but got %d", len(datas))
}
// TestRdsUtilsTestSuite is suite entry for 'go test'
func TestRdsUtilsTestSuite(t *testing.T) {
suite.Run(t, new(RdsUtilsTestSuite))
}

View File

@ -16,24 +16,37 @@
package utils
import (
"errors"
"crypto/rand"
"encoding/json"
"fmt"
"github.com/gocraft/work"
"github.com/pkg/errors"
"io"
"net"
"net/url"
"os"
"strconv"
"strings"
"github.com/gomodule/redigo/redis"
)
// CtlContextKey is used to keep controller reference in the system context
type CtlContextKey string
// NodeIDContextKey is used to keep node ID in the system context
type NodeIDContextKey string
const (
// CtlKeyOfLaunchJobFunc is context key to keep the ctl launch job func
CtlKeyOfLaunchJobFunc CtlContextKey = "controller_launch_job_func"
// NodeID is const of the ID context key
NodeID NodeIDContextKey = "node_id"
)
// MakeIdentifier creates uuid for job.
func MakeIdentifier() string {
b := make([]byte, 12)
_, err := io.ReadFull(rand.Reader, b)
if err != nil {
return ""
}
return fmt.Sprintf("%x", b)
}
// IsEmptyStr check if the specified str is empty (len ==0) after triming prefix and suffix spaces.
func IsEmptyStr(str string) bool {
return len(strings.TrimSpace(str)) == 0
@ -105,7 +118,7 @@ func TranslateRedisAddress(commaFormat string) (string, bool) {
return "", false
}
urlParts := []string{}
urlParts := make([]string, 0)
// section[0] should be host:port
redisURL := fmt.Sprintf("redis://%s", sections[0])
if _, err := url.Parse(redisURL); err != nil {
@ -127,31 +140,48 @@ func TranslateRedisAddress(commaFormat string) (string, bool) {
return strings.Join(urlParts, ""), true
}
// JobScore represents the data item with score in the redis db.
type JobScore struct {
JobBytes []byte
Score int64
// SerializeJob encodes work.Job to json data.
func SerializeJob(job *work.Job) ([]byte, error) {
return json.Marshal(job)
}
// GetZsetByScore get the items from the zset filtered by the specified score scope.
func GetZsetByScore(pool *redis.Pool, key string, scores []int64) ([]JobScore, error) {
if pool == nil || IsEmptyStr(key) || len(scores) < 2 {
return nil, errors.New("bad arguments")
}
// DeSerializeJob decodes bytes to ptr of work.Job.
func DeSerializeJob(jobBytes []byte) (*work.Job, error) {
var j work.Job
err := json.Unmarshal(jobBytes, &j)
conn := pool.Get()
defer conn.Close()
return &j, err
}
values, err := redis.Values(conn.Do("ZRANGEBYSCORE", key, scores[0], scores[1], "WITHSCORES"))
// ResolveHostnameAndIP gets the local hostname and IP
func ResolveHostnameAndIP() (string, error) {
host, err := os.Hostname()
if err != nil {
return nil, err
return "", err
}
var jobsWithScores []JobScore
if err := redis.ScanSlice(values, &jobsWithScores); err != nil {
return nil, err
addrs, err := net.InterfaceAddrs()
if err != nil {
return "", err
}
return jobsWithScores, nil
for _, address := range addrs {
if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
return fmt.Sprintf("%s:%s", host, ipnet.IP.String()), nil
}
}
}
return "", errors.New("failed to resolve local host&ip")
}
// GenerateNodeID returns ID of current node
func GenerateNodeID() string {
hIP, err := ResolveHostnameAndIP()
if err != nil {
return MakeIdentifier()
}
return hIP
}

View File

@ -10,7 +10,7 @@ https_config:
#Server listening port
port: 9443
#Worker pool
#Worker worker
worker_pool:
#Worker concurrency
workers: 10
@ -29,17 +29,14 @@ job_loggers:
- name: "FILE"
level: "DEBUG"
settings: # Customized settings of logger
base_dir: "/tmp/job_logs"
base_dir: "/Users/szou/tmp/job_logs"
sweeper:
duration: 1 #days
settings: # Customized settings of sweeper
work_dir: "/tmp/job_logs"
work_dir: "/Users/szou/tmp/job_logs"
#Loggers for the job service
loggers:
- name: "STD_OUTPUT" # Same with above
level: "DEBUG"
#Admin server endpoint
admin_server: "http://adminserver:9010/"

View File

@ -23,21 +23,20 @@ import (
"strconv"
"strings"
"github.com/goharbor/harbor/src/jobservice/utils"
yaml "gopkg.in/yaml.v2"
"github.com/goharbor/harbor/src/jobservice/common/utils"
"gopkg.in/yaml.v2"
)
const (
jobServiceProtocol = "JOB_SERVICE_PROTOCOL"
jobServicePort = "JOB_SERVICE_PORT"
jobServiceHTTPCert = "JOB_SERVICE_HTTPS_CERT"
jobServiceHTTPKey = "JOB_SERVICE_HTTPS_KEY"
jobServiceWorkerPoolBackend = "JOB_SERVICE_POOL_BACKEND"
jobServiceWorkers = "JOB_SERVICE_POOL_WORKERS"
jobServiceRedisURL = "JOB_SERVICE_POOL_REDIS_URL"
jobServiceRedisNamespace = "JOB_SERVICE_POOL_REDIS_NAMESPACE"
jobServiceCoreServerEndpoint = "CORE_URL"
jobServiceAuthSecret = "JOBSERVICE_SECRET"
jobServiceProtocol = "JOB_SERVICE_PROTOCOL"
jobServicePort = "JOB_SERVICE_PORT"
jobServiceHTTPCert = "JOB_SERVICE_HTTPS_CERT"
jobServiceHTTPKey = "JOB_SERVICE_HTTPS_KEY"
jobServiceWorkerPoolBackend = "JOB_SERVICE_POOL_BACKEND"
jobServiceWorkers = "JOB_SERVICE_POOL_WORKERS"
jobServiceRedisURL = "JOB_SERVICE_POOL_REDIS_URL"
jobServiceRedisNamespace = "JOB_SERVICE_POOL_REDIS_NAMESPACE"
jobServiceAuthSecret = "JOBSERVICE_SECRET"
// JobServiceProtocolHTTPS points to the 'https' protocol
JobServiceProtocolHTTPS = "https"
@ -68,7 +67,7 @@ type Configuration struct {
// Additional config when using https
HTTPSConfig *HTTPSConfig `yaml:"https_config,omitempty"`
// Configurations of worker pool
// Configurations of worker worker
PoolConfig *PoolConfig `yaml:"worker_pool,omitempty"`
// Job logger configurations
@ -84,13 +83,13 @@ type HTTPSConfig struct {
Key string `yaml:"key"`
}
// RedisPoolConfig keeps redis pool info.
// RedisPoolConfig keeps redis worker info.
type RedisPoolConfig struct {
RedisURL string `yaml:"redis_url"`
Namespace string `yaml:"namespace"`
}
// PoolConfig keeps worker pool configurations.
// PoolConfig keeps worker worker configurations.
type PoolConfig struct {
// Worker concurrency
WorkerCount uint `yaml:"workers"`
@ -274,32 +273,32 @@ func (c *Configuration) validate() error {
}
if c.PoolConfig == nil {
return errors.New("no worker pool is configured")
return errors.New("no worker worker is configured")
}
if c.PoolConfig.Backend != JobServicePoolBackendRedis {
return fmt.Errorf("worker pool backend %s does not support", c.PoolConfig.Backend)
return fmt.Errorf("worker worker backend %s does not support", c.PoolConfig.Backend)
}
// When backend is redis
if c.PoolConfig.Backend == JobServicePoolBackendRedis {
if c.PoolConfig.RedisPoolCfg == nil {
return fmt.Errorf("redis pool must be configured when backend is set to '%s'", c.PoolConfig.Backend)
return fmt.Errorf("redis worker must be configured when backend is set to '%s'", c.PoolConfig.Backend)
}
if utils.IsEmptyStr(c.PoolConfig.RedisPoolCfg.RedisURL) {
return errors.New("URL of redis pool is empty")
return errors.New("URL of redis worker is empty")
}
if !strings.HasPrefix(c.PoolConfig.RedisPoolCfg.RedisURL, redisSchema) {
return errors.New("Invalid redis URL")
return errors.New("invalid redis URL")
}
if _, err := url.Parse(c.PoolConfig.RedisPoolCfg.RedisURL); err != nil {
return fmt.Errorf("Invalid redis URL: %s", err.Error())
return fmt.Errorf("invalid redis URL: %s", err.Error())
}
if utils.IsEmptyStr(c.PoolConfig.RedisPoolCfg.Namespace) {
return errors.New("namespace of redis pool is required")
return errors.New("namespace of redis worker is required")
}
}

View File

@ -14,121 +14,141 @@
package config
import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"os"
"testing"
)
func TestConfigLoadingFailed(t *testing.T) {
cfg := &Configuration{}
if err := cfg.Load("./config.not-existing.yaml", false); err == nil {
t.Fatalf("Load config from none-existing document, expect none nil error but got '%s'\n", err)
}
// ConfigurationTestSuite tests the configuration loading
type ConfigurationTestSuite struct {
suite.Suite
}
func TestConfigLoadingSucceed(t *testing.T) {
cfg := &Configuration{}
if err := cfg.Load("../config_test.yml", false); err != nil {
t.Fatalf("Load config from yaml file, expect nil error but got error '%s'\n", err)
}
// TestConfigurationTestSuite is suite entry for 'go test'
func TestConfigurationTestSuite(t *testing.T) {
suite.Run(t, new(ConfigurationTestSuite))
}
func TestConfigLoadingWithEnv(t *testing.T) {
setENV()
// TestConfigLoadingFailed ...
func (suite *ConfigurationTestSuite) TestConfigLoadingFailed() {
cfg := &Configuration{}
if err := cfg.Load("../config_test.yml", true); err != nil {
t.Fatalf("Load config from yaml file, expect nil error but got error '%s'\n", err)
}
if cfg.Protocol != "https" {
t.Errorf("expect protocol 'https', but got '%s'\n", cfg.Protocol)
}
if cfg.Port != 8989 {
t.Errorf("expect port 8989 but got '%d'\n", cfg.Port)
}
if cfg.PoolConfig.WorkerCount != 8 {
t.Errorf("expect workcount 8 but go '%d'\n", cfg.PoolConfig.WorkerCount)
}
if cfg.PoolConfig.RedisPoolCfg.RedisURL != "redis://arbitrary_username:password@8.8.8.8:6379/0" {
t.Errorf("expect redis URL 'localhost' but got '%s'\n", cfg.PoolConfig.RedisPoolCfg.RedisURL)
}
if cfg.PoolConfig.RedisPoolCfg.Namespace != "ut_namespace" {
t.Errorf("expect redis namespace 'ut_namespace' but got '%s'\n", cfg.PoolConfig.RedisPoolCfg.Namespace)
}
if GetAuthSecret() != "js_secret" {
t.Errorf("expect auth secret 'js_secret' but got '%s'", GetAuthSecret())
}
if GetUIAuthSecret() != "core_secret" {
t.Errorf("expect auth secret 'core_secret' but got '%s'", GetUIAuthSecret())
}
unsetENV()
err := cfg.Load("./config.not-existing.yaml", false)
assert.NotNil(suite.T(), err, "load config from none-existing document, expect none nil error but got nil")
}
func TestDefaultConfig(t *testing.T) {
if err := DefaultConfig.Load("../config_test.yml", true); err != nil {
t.Fatalf("Load config from yaml file, expect nil error but got error '%s'\n", err)
}
// TestConfigLoadingSucceed ...
func (suite *ConfigurationTestSuite) TestConfigLoadingSucceed() {
cfg := &Configuration{}
err := cfg.Load("../config_test.yml", false)
assert.Nil(suite.T(), err, "Load config from yaml file, expect nil error but got error '%s'", err)
}
// TestConfigLoadingWithEnv ...
func (suite *ConfigurationTestSuite) TestConfigLoadingWithEnv() {
err := setENV()
require.Nil(suite.T(), err, "set envs: expect nil error but got error '%s'", err)
defer func() {
err := unsetENV()
require.Nil(suite.T(), err, "unset envs: expect nil error but got error '%s'", err)
}()
cfg := &Configuration{}
err = cfg.Load("../config_test.yml", true)
require.Nil(suite.T(), err, "load config from yaml file, expect nil error but got error '%s'", err)
assert.Equal(suite.T(), "https", cfg.Protocol, "expect protocol 'https', but got '%s'", cfg.Protocol)
assert.Equal(suite.T(), uint(8989), cfg.Port, "expect port 8989 but got '%d'", cfg.Port)
assert.Equal(
suite.T(),
uint(8),
cfg.PoolConfig.WorkerCount,
"expect worker count 8 but go '%d'",
cfg.PoolConfig.WorkerCount,
)
assert.Equal(
suite.T(),
"redis://arbitrary_username:password@8.8.8.8:6379/0",
cfg.PoolConfig.RedisPoolCfg.RedisURL,
"expect redis URL 'localhost' but got '%s'",
cfg.PoolConfig.RedisPoolCfg.RedisURL,
)
assert.Equal(
suite.T(),
"ut_namespace",
cfg.PoolConfig.RedisPoolCfg.Namespace,
"expect redis namespace 'ut_namespace' but got '%s'",
cfg.PoolConfig.RedisPoolCfg.Namespace,
)
assert.Equal(suite.T(), "js_secret", GetAuthSecret(), "expect auth secret 'js_secret' but got '%s'", GetAuthSecret())
assert.Equal(suite.T(), "core_secret", GetUIAuthSecret(), "expect auth secret 'core_secret' but got '%s'", GetUIAuthSecret())
}
// TestDefaultConfig ...
func (suite *ConfigurationTestSuite) TestDefaultConfig() {
err := DefaultConfig.Load("../config_test.yml", true)
require.Nil(suite.T(), err, "load config from yaml file, expect nil error but got error '%s'", err)
redisURL := DefaultConfig.PoolConfig.RedisPoolCfg.RedisURL
if redisURL != "redis://localhost:6379" {
t.Errorf("expect redisURL '%s' but got '%s'\n", "redis://localhost:6379", redisURL)
}
assert.Equal(suite.T(), "redis://localhost:6379", redisURL, "expect redisURL '%s' but got '%s'", "redis://localhost:6379", redisURL)
if len(DefaultConfig.JobLoggerConfigs) == 0 {
t.Errorf("expect 2 job loggers configured but got %d", len(DefaultConfig.JobLoggerConfigs))
}
jLoggerCount := len(DefaultConfig.JobLoggerConfigs)
assert.Equal(suite.T(), 2, jLoggerCount, "expect 2 job loggers configured but got %d", jLoggerCount)
if len(DefaultConfig.LoggerConfigs) == 0 {
t.Errorf("expect 1 loggers configured but got %d", len(DefaultConfig.LoggerConfigs))
}
loggerCount := len(DefaultConfig.LoggerConfigs)
assert.Equal(suite.T(), 1, loggerCount, "expect 1 loggers configured but got %d", loggerCount)
// Only verify the complicated one
theLogger := DefaultConfig.JobLoggerConfigs[1]
if theLogger.Name != "FILE" {
t.Fatalf("expect FILE logger but got %s", theLogger.Name)
}
if theLogger.Level != "INFO" {
t.Errorf("expect INFO log level of FILE logger but got %s", theLogger.Level)
}
if len(theLogger.Settings) == 0 {
t.Errorf("expect extra settings but got nothing")
}
if theLogger.Settings["base_dir"] != "/tmp/job_logs" {
t.Errorf("expect extra setting base_dir to be '/tmp/job_logs' but got %s", theLogger.Settings["base_dir"])
}
if theLogger.Sweeper == nil {
t.Fatalf("expect non nil sweeper of FILE logger but got nil")
}
if theLogger.Sweeper.Duration != 5 {
t.Errorf("expect sweep duration to be 5 but got %d", theLogger.Sweeper.Duration)
}
if theLogger.Sweeper.Settings["work_dir"] != "/tmp/job_logs" {
t.Errorf("expect work dir of sweeper of FILE logger to be '/tmp/job_logs' but got %s", theLogger.Sweeper.Settings["work_dir"])
}
assert.Equal(suite.T(), "FILE", theLogger.Name, "expect FILE logger but got %s", theLogger.Name)
assert.Equal(suite.T(), "INFO", theLogger.Level, "expect INFO log level of FILE logger but got %s", theLogger.Level)
assert.NotEqual(suite.T(), 0, len(theLogger.Settings), "expect extra settings but got nothing")
assert.Equal(
suite.T(),
"/tmp/job_logs",
theLogger.Settings["base_dir"],
"expect extra setting base_dir to be '/tmp/job_logs' but got %s",
theLogger.Settings["base_dir"],
)
assert.NotNil(suite.T(), theLogger.Sweeper, "expect non nil sweeper of FILE logger but got nil")
assert.Equal(suite.T(), 5, theLogger.Sweeper.Duration, "expect sweep duration to be 5 but got %d", theLogger.Sweeper.Duration)
assert.Equal(
suite.T(),
"/tmp/job_logs",
theLogger.Sweeper.Settings["work_dir"],
"expect work dir of sweeper of FILE logger to be '/tmp/job_logs' but got %s",
theLogger.Sweeper.Settings["work_dir"],
)
}
func setENV() {
os.Setenv("JOB_SERVICE_PROTOCOL", "https")
os.Setenv("JOB_SERVICE_PORT", "8989")
os.Setenv("JOB_SERVICE_HTTPS_CERT", "../server.crt")
os.Setenv("JOB_SERVICE_HTTPS_KEY", "../server.key")
os.Setenv("JOB_SERVICE_POOL_BACKEND", "redis")
os.Setenv("JOB_SERVICE_POOL_WORKERS", "8")
os.Setenv("JOB_SERVICE_POOL_REDIS_URL", "8.8.8.8:6379,100,password,0")
os.Setenv("JOB_SERVICE_POOL_REDIS_NAMESPACE", "ut_namespace")
os.Setenv("JOBSERVICE_SECRET", "js_secret")
os.Setenv("CORE_SECRET", "core_secret")
func setENV() error {
err := os.Setenv("JOB_SERVICE_PROTOCOL", "https")
err = os.Setenv("JOB_SERVICE_PORT", "8989")
err = os.Setenv("JOB_SERVICE_HTTPS_CERT", "../server.crt")
err = os.Setenv("JOB_SERVICE_HTTPS_KEY", "../server.key")
err = os.Setenv("JOB_SERVICE_POOL_BACKEND", "redis")
err = os.Setenv("JOB_SERVICE_POOL_WORKERS", "8")
err = os.Setenv("JOB_SERVICE_POOL_REDIS_URL", "8.8.8.8:6379,100,password,0")
err = os.Setenv("JOB_SERVICE_POOL_REDIS_NAMESPACE", "ut_namespace")
err = os.Setenv("JOBSERVICE_SECRET", "js_secret")
err = os.Setenv("CORE_SECRET", "core_secret")
return err
}
func unsetENV() {
os.Unsetenv("JOB_SERVICE_PROTOCOL")
os.Unsetenv("JOB_SERVICE_PORT")
os.Unsetenv("JOB_SERVICE_HTTPS_CERT")
os.Unsetenv("JOB_SERVICE_HTTPS_KEY")
os.Unsetenv("JOB_SERVICE_POOL_BACKEND")
os.Unsetenv("JOB_SERVICE_POOL_WORKERS")
os.Unsetenv("JOB_SERVICE_POOL_REDIS_URL")
os.Unsetenv("JOB_SERVICE_POOL_REDIS_NAMESPACE")
os.Unsetenv("JOBSERVICE_SECRET")
os.Unsetenv("CORE_SECRET")
func unsetENV() error {
err := os.Unsetenv("JOB_SERVICE_PROTOCOL")
err = os.Unsetenv("JOB_SERVICE_PORT")
err = os.Unsetenv("JOB_SERVICE_HTTPS_CERT")
err = os.Unsetenv("JOB_SERVICE_HTTPS_KEY")
err = os.Unsetenv("JOB_SERVICE_POOL_BACKEND")
err = os.Unsetenv("JOB_SERVICE_POOL_WORKERS")
err = os.Unsetenv("JOB_SERVICE_POOL_REDIS_URL")
err = os.Unsetenv("JOB_SERVICE_POOL_REDIS_NAMESPACE")
err = os.Unsetenv("JOBSERVICE_SECRET")
err = os.Unsetenv("CORE_SECRET")
return err
}

View File

@ -10,7 +10,7 @@ https_config:
#Server listening port
port: 9444
#Worker pool
#Worker worker
worker_pool:
#Worker concurrency
workers: 10
@ -20,7 +20,7 @@ worker_pool:
#redis://[arbitrary_username:password@]ipaddress:port/database_index
#or ipaddress:port[,weight,password,database_index]
redis_url: "localhost:6379"
namespace: "harbor_job_service"
namespace: "testing_job_service_v2"
#Loggers for the running job
job_loggers:
@ -39,7 +39,3 @@ job_loggers:
loggers:
- name: "STD_OUTPUT" # Same with above
level: "DEBUG"
#Admin server endpoint
admin_server: "http://127.0.0.1:8888"

View File

@ -15,129 +15,127 @@
package core
import (
"errors"
"fmt"
"github.com/pkg/errors"
"github.com/goharbor/harbor/src/jobservice/logger"
"github.com/goharbor/harbor/src/jobservice/common/query"
"github.com/goharbor/harbor/src/jobservice/common/utils"
"github.com/goharbor/harbor/src/jobservice/errs"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/jobservice/models"
"github.com/goharbor/harbor/src/jobservice/pool"
"github.com/goharbor/harbor/src/jobservice/utils"
"github.com/goharbor/harbor/src/jobservice/lcm"
"github.com/goharbor/harbor/src/jobservice/worker"
"github.com/robfig/cron"
)
const (
hookActivated = "activated"
hookDeactivated = "error"
)
// Controller implement the core interface and provides related job handle methods.
// Controller will coordinate the lower components to complete the process as a commander role.
type Controller struct {
// Refer the backend pool
backendPool pool.Interface
// basicController implement the core interface and provides related job handle methods.
// basicController will coordinate the lower components to complete the process as a commander role.
type basicController struct {
// Refer the backend worker
backendWorker worker.Interface
// Refer the job life cycle management controller
ctl lcm.Controller
}
// NewController is constructor of Controller.
func NewController(backendPool pool.Interface) *Controller {
return &Controller{
backendPool: backendPool,
// NewController is constructor of basicController.
func NewController(backendWorker worker.Interface, ctl lcm.Controller) Interface {
return &basicController{
backendWorker: backendWorker,
ctl: ctl,
}
}
// LaunchJob is implementation of same method in core interface.
func (c *Controller) LaunchJob(req models.JobRequest) (models.JobStats, error) {
func (bc *basicController) LaunchJob(req *job.Request) (res *job.Stats, err error) {
if err := validJobReq(req); err != nil {
return models.JobStats{}, err
return nil, errs.BadRequestError(err)
}
// Validate job name
jobType, isKnownJob := c.backendPool.IsKnownJob(req.Job.Name)
jobType, isKnownJob := bc.backendWorker.IsKnownJob(req.Job.Name)
if !isKnownJob {
return models.JobStats{}, fmt.Errorf("job with name '%s' is unknown", req.Job.Name)
return nil, errs.BadRequestError(errors.Errorf("job with name '%s' is unknown", req.Job.Name))
}
// Validate parameters
if err := c.backendPool.ValidateJobParameters(jobType, req.Job.Parameters); err != nil {
return models.JobStats{}, err
if err := bc.backendWorker.ValidateJobParameters(jobType, req.Job.Parameters); err != nil {
return nil, errs.BadRequestError(err)
}
// Enqueue job regarding of the kind
var (
res models.JobStats
err error
)
switch req.Job.Metadata.JobKind {
case job.JobKindScheduled:
res, err = c.backendPool.Schedule(
case job.KindScheduled:
res, err = bc.backendWorker.Schedule(
req.Job.Name,
req.Job.Parameters,
req.Job.Metadata.ScheduleDelay,
req.Job.Metadata.IsUnique)
case job.JobKindPeriodic:
res, err = c.backendPool.PeriodicallyEnqueue(
req.Job.Metadata.IsUnique,
req.Job.StatusHook,
)
case job.KindPeriodic:
res, err = bc.backendWorker.PeriodicallyEnqueue(
req.Job.Name,
req.Job.Parameters,
req.Job.Metadata.Cron)
req.Job.Metadata.Cron,
req.Job.Metadata.IsUnique,
req.Job.StatusHook,
)
default:
res, err = c.backendPool.Enqueue(req.Job.Name, req.Job.Parameters, req.Job.Metadata.IsUnique)
res, err = bc.backendWorker.Enqueue(
req.Job.Name,
req.Job.Parameters,
req.Job.Metadata.IsUnique,
req.Job.StatusHook,
)
}
// Register status hook?
// Save job stats
if err == nil {
if !utils.IsEmptyStr(req.Job.StatusHook) {
if err := c.backendPool.RegisterHook(res.Stats.JobID, req.Job.StatusHook); err != nil {
res.Stats.HookStatus = hookDeactivated
} else {
res.Stats.HookStatus = hookActivated
}
if _, err := bc.ctl.New(res); err != nil {
return nil, err
}
}
return res, err
return
}
// GetJob is implementation of same method in core interface.
func (c *Controller) GetJob(jobID string) (models.JobStats, error) {
func (bc *basicController) GetJob(jobID string) (*job.Stats, error) {
if utils.IsEmptyStr(jobID) {
return models.JobStats{}, errors.New("empty job ID")
return nil, errs.BadRequestError(errors.New("empty job ID"))
}
return c.backendPool.GetJobStats(jobID)
t, err := bc.ctl.Track(jobID)
if err != nil {
return nil, err
}
return t.Job(), nil
}
// StopJob is implementation of same method in core interface.
func (c *Controller) StopJob(jobID string) error {
func (bc *basicController) StopJob(jobID string) error {
if utils.IsEmptyStr(jobID) {
return errors.New("empty job ID")
return errs.BadRequestError(errors.New("empty job ID"))
}
return c.backendPool.StopJob(jobID)
}
// CancelJob is implementation of same method in core interface.
func (c *Controller) CancelJob(jobID string) error {
if utils.IsEmptyStr(jobID) {
return errors.New("empty job ID")
}
return c.backendPool.CancelJob(jobID)
return bc.backendWorker.StopJob(jobID)
}
// RetryJob is implementation of same method in core interface.
func (c *Controller) RetryJob(jobID string) error {
func (bc *basicController) RetryJob(jobID string) error {
if utils.IsEmptyStr(jobID) {
return errors.New("empty job ID")
return errs.BadRequestError(errors.New("empty job ID"))
}
return c.backendPool.RetryJob(jobID)
return bc.backendWorker.RetryJob(jobID)
}
// GetJobLogData is used to return the log text data for the specified job if exists
func (c *Controller) GetJobLogData(jobID string) ([]byte, error) {
func (bc *basicController) GetJobLogData(jobID string) ([]byte, error) {
if utils.IsEmptyStr(jobID) {
return nil, errors.New("empty job ID")
return nil, errs.BadRequestError(errors.New("empty job ID"))
}
logData, err := logger.Retrieve(jobID)
@ -149,12 +147,46 @@ func (c *Controller) GetJobLogData(jobID string) ([]byte, error) {
}
// CheckStatus is implementation of same method in core interface.
func (c *Controller) CheckStatus() (models.JobPoolStats, error) {
return c.backendPool.Stats()
func (bc *basicController) CheckStatus() (*worker.Stats, error) {
return bc.backendWorker.Stats()
}
func validJobReq(req models.JobRequest) error {
if req.Job == nil {
// GetPeriodicExecutions gets the periodic executions for the specified periodic job
func (bc *basicController) GetPeriodicExecutions(periodicJobID string, query *query.Parameter) ([]*job.Stats, int64, error) {
if utils.IsEmptyStr(periodicJobID) {
return nil, 0, errs.BadRequestError(errors.New("nil periodic job ID"))
}
t, err := bc.ctl.Track(periodicJobID)
if err != nil {
return nil, 0, err
}
eIDs, total, err := t.Executions(query)
if err != nil {
return nil, 0, err
}
res := make([]*job.Stats, 0)
for _, eID := range eIDs {
et, err := bc.ctl.Track(eID)
if err != nil {
return nil, 0, err
}
res = append(res, et.Job())
}
return res, total, nil
}
// ScheduledJobs returns the scheduled jobs by page
func (bc *basicController) ScheduledJobs(query *query.Parameter) ([]*job.Stats, int64, error) {
return bc.backendWorker.ScheduledJobs(query)
}
func validJobReq(req *job.Request) error {
if req == nil || req.Job == nil {
return errors.New("empty job request is not allowed")
}
@ -166,29 +198,29 @@ func validJobReq(req models.JobRequest) error {
return errors.New("metadata of job is missing")
}
if req.Job.Metadata.JobKind != job.JobKindGeneric &&
req.Job.Metadata.JobKind != job.JobKindPeriodic &&
req.Job.Metadata.JobKind != job.JobKindScheduled {
return fmt.Errorf(
if req.Job.Metadata.JobKind != job.KindGeneric &&
req.Job.Metadata.JobKind != job.KindPeriodic &&
req.Job.Metadata.JobKind != job.KindScheduled {
return errors.Errorf(
"job kind '%s' is not supported, only support '%s','%s','%s'",
req.Job.Metadata.JobKind,
job.JobKindGeneric,
job.JobKindScheduled,
job.JobKindPeriodic)
job.KindGeneric,
job.KindScheduled,
job.KindPeriodic)
}
if req.Job.Metadata.JobKind == job.JobKindScheduled &&
if req.Job.Metadata.JobKind == job.KindScheduled &&
req.Job.Metadata.ScheduleDelay == 0 {
return fmt.Errorf("'schedule_delay' must be specified if the job kind is '%s'", job.JobKindScheduled)
return errors.Errorf("'schedule_delay' must be specified for %s job", job.KindScheduled)
}
if req.Job.Metadata.JobKind == job.JobKindPeriodic {
if req.Job.Metadata.JobKind == job.KindPeriodic {
if utils.IsEmptyStr(req.Job.Metadata.Cron) {
return fmt.Errorf("'cron_spec' must be specified if the job kind is '%s'", job.JobKindPeriodic)
return fmt.Errorf("'cron_spec' must be specified for the %s job", job.KindPeriodic)
}
if _, err := cron.Parse(req.Job.Metadata.Cron); err != nil {
return fmt.Errorf("'cron_spec' is not correctly set: %s", err)
return fmt.Errorf("'cron_spec' is not correctly set: %s: %s", req.Job.Metadata.Cron, err)
}
}

View File

@ -14,312 +14,404 @@
package core
import (
"errors"
"context"
"github.com/goharbor/harbor/src/jobservice/common/query"
"github.com/goharbor/harbor/src/jobservice/common/utils"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/jobservice/job/impl/sample"
"github.com/goharbor/harbor/src/jobservice/tests"
"github.com/goharbor/harbor/src/jobservice/worker"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"testing"
"github.com/goharbor/harbor/src/jobservice/env"
"github.com/goharbor/harbor/src/jobservice/models"
"time"
)
func TestLaunchGenericJob(t *testing.T) {
pool := &fakePool{}
c := NewController(pool)
req := createJobReq("Generic", false, false)
res, err := c.LaunchJob(req)
if err != nil {
t.Fatal(err)
}
// ControllerTestSuite tests functions of core controller
type ControllerTestSuite struct {
suite.Suite
if res.Stats.JobID != "fake_ID" {
t.Fatalf("expect enqueued job ID 'fake_ID' but got '%s'\n", res.Stats.JobID)
lcmCtl *fakeLcmController
worker *fakeWorker
ctl Interface
res *job.Stats
jobID string
params job.Parameters
}
// SetupSuite prepares test suite
func (suite *ControllerTestSuite) SetupSuite() {
suite.ctl = NewController(suite, suite)
suite.params = make(job.Parameters)
suite.params["name"] = "testing:v1"
suite.jobID = utils.MakeIdentifier()
suite.res = &job.Stats{
Info: &job.StatsInfo{
JobID: suite.jobID,
},
}
}
func TestLaunchGenericJobUnique(t *testing.T) {
pool := &fakePool{}
c := NewController(pool)
req := createJobReq("Generic", true, false)
res, err := c.LaunchJob(req)
if err != nil {
t.Fatal(err)
}
// Prepare for each test case
func (suite *ControllerTestSuite) SetupTest() {
suite.worker = &fakeWorker{}
suite.lcmCtl = &fakeLcmController{}
if res.Stats.JobID != "fake_ID" {
t.Fatalf("expect enqueued job ID 'fake_ID' but got '%s'\n", res.Stats.JobID)
}
suite.lcmCtl.On("Track", suite.jobID).Return(job.NewBasicTrackerWithStats(nil, suite.res, "ns", nil, nil), nil)
suite.lcmCtl.On("New", suite.res).Return(job.NewBasicTrackerWithStats(nil, suite.res, "ns", nil, nil), nil)
suite.worker.On("IsKnownJob", job.SampleJob).Return((*sample.Job)(nil), true)
suite.worker.On("IsKnownJob", "fake").Return(nil, false)
suite.worker.On("ValidateJobParameters", (*sample.Job)(nil), suite.params).Return(nil)
}
func TestLaunchGenericJobWithHook(t *testing.T) {
pool := &fakePool{}
c := NewController(pool)
req := createJobReq("Generic", false, true)
res, err := c.LaunchJob(req)
if err != nil {
t.Fatal(err)
}
if res.Stats.JobID != "fake_ID" {
t.Fatalf("expect enqueued job ID 'fake_ID' but got '%s'\n", res.Stats.JobID)
}
// TestControllerTestSuite is suite entry for 'go test'
func TestControllerTestSuite(t *testing.T) {
suite.Run(t, new(ControllerTestSuite))
}
func TestLaunchScheduledJob(t *testing.T) {
pool := &fakePool{}
c := NewController(pool)
req := createJobReq("Scheduled", false, true)
res, err := c.LaunchJob(req)
if err != nil {
t.Fatal(err)
}
// SetupSuite prepares test suite
func (suite *ControllerTestSuite) TestLaunchGenericJob() {
req := createJobReq("Generic")
if res.Stats.JobID != "fake_ID_Scheduled" {
t.Fatalf("expect enqueued job ID 'fake_ID_Scheduled' but got '%s'\n", res.Stats.JobID)
}
suite.worker.On("Enqueue", job.SampleJob, suite.params, true, req.Job.StatusHook).Return(suite.res, nil)
res, err := suite.ctl.LaunchJob(req)
require.Nil(suite.T(), err, "launch job: nil error expected but got %s", err)
assert.Equal(suite.T(), suite.jobID, res.Info.JobID, "mismatch job ID")
}
func TestLaunchScheduledUniqueJob(t *testing.T) {
pool := &fakePool{}
c := NewController(pool)
req := createJobReq("Scheduled", true, false)
res, err := c.LaunchJob(req)
if err != nil {
t.Fatal(err)
}
// TestLaunchScheduledJob ...
func (suite *ControllerTestSuite) TestLaunchScheduledJob() {
req := createJobReq("Scheduled")
if res.Stats.JobID != "fake_ID_Scheduled" {
t.Fatalf("expect enqueued job ID 'fake_ID_Scheduled' but got '%s'\n", res.Stats.JobID)
}
suite.worker.On("Schedule", job.SampleJob, suite.params, uint64(100), true, req.Job.StatusHook).Return(suite.res, nil)
res, err := suite.ctl.LaunchJob(req)
require.Nil(suite.T(), err, "launch scheduled job: nil error expected but got %s", err)
assert.Equal(suite.T(), suite.jobID, res.Info.JobID, "mismatch job ID")
}
func TestLaunchPeriodicJob(t *testing.T) {
pool := &fakePool{}
c := NewController(pool)
req := createJobReq("Periodic", true, false)
res, err := c.LaunchJob(req)
if err != nil {
t.Fatal(err)
}
// TestLaunchPeriodicJob ...
func (suite *ControllerTestSuite) TestLaunchPeriodicJob() {
req := createJobReq("Periodic")
if res.Stats.JobID != "fake_ID_Periodic" {
t.Fatalf("expect enqueued job ID 'fake_ID_Periodic' but got '%s'\n", res.Stats.JobID)
}
suite.worker.On("PeriodicallyEnqueue", job.SampleJob, suite.params, "5 * * * * *", true, req.Job.StatusHook).Return(suite.res, nil)
res, err := suite.ctl.LaunchJob(req)
require.Nil(suite.T(), err, "launch periodic job: nil error expected but got %s", err)
assert.Equal(suite.T(), suite.jobID, res.Info.JobID, "mismatch job ID")
}
func TestGetJobStats(t *testing.T) {
pool := &fakePool{}
c := NewController(pool)
stats, err := c.GetJob("fake_ID")
if err != nil {
t.Fatal(err)
}
if stats.Stats.Status != "running" {
t.Fatalf("expect stauts 'running' but got '%s'\n", stats.Stats.Status)
}
// TestGetJobStats ...
func (suite *ControllerTestSuite) TestGetJobStats() {
res, err := suite.ctl.GetJob(suite.jobID)
require.Nil(suite.T(), err, "get job stats: nil error expected but got %s", err)
assert.Equal(suite.T(), suite.jobID, res.Info.JobID, "mismatch job ID")
}
func TestJobActions(t *testing.T) {
pool := &fakePool{}
c := NewController(pool)
// TestJobActions ...
func (suite *ControllerTestSuite) TestJobActions() {
suite.worker.On("StopJob", suite.jobID).Return(nil)
suite.worker.On("RetryJob", suite.jobID).Return(nil)
if err := c.StopJob("fake_ID"); err != nil {
t.Fatal(err)
}
err := suite.ctl.StopJob(suite.jobID)
err = suite.ctl.RetryJob(suite.jobID)
if err := c.CancelJob("fake_ID"); err != nil {
t.Fatal(err)
}
if err := c.RetryJob("fake_ID"); err != nil {
t.Fatal(err)
}
assert.Nil(suite.T(), err, "job action: nil error expected but got %s", err)
}
func TestGetJobLogData(t *testing.T) {
pool := &fakePool{}
c := NewController(pool)
if _, err := c.GetJobLogData("fake_ID"); err == nil {
t.Fatal("expect error but got nil")
}
}
func TestCheckStatus(t *testing.T) {
pool := &fakePool{}
c := NewController(pool)
st, err := c.CheckStatus()
if err != nil {
t.Fatal(err)
}
if len(st.Pools) == 0 {
t.Fatal("expect status data but got zero list")
}
if st.Pools[0].Status != "running" {
t.Fatalf("expect status 'running' but got '%s'\n", st.Pools[0].Status)
}
}
func TestInvalidCheck(t *testing.T) {
pool := &fakePool{}
c := NewController(pool)
req := models.JobRequest{
Job: &models.JobData{
Name: "DEMO",
Metadata: &models.JobMetadata{
JobKind: "kind",
// TestCheckStatus ...
func (suite *ControllerTestSuite) TestCheckStatus() {
suite.worker.On("Stats").Return(&worker.Stats{
Pools: []*worker.StatsData{
{
Status: "running",
},
},
}, nil)
st, err := suite.ctl.CheckStatus()
require.Nil(suite.T(), err, "check worker status: nil error expected but got %s", err)
assert.Equal(suite.T(), 1, len(st.Pools), "expected 1 pool status but got 0")
assert.Equal(suite.T(), "running", st.Pools[0].Status, "expected running pool but got %s", st.Pools[0].Status)
}
// TestScheduledJobs ...
func (suite *ControllerTestSuite) TestScheduledJobs() {
q := &query.Parameter{
PageSize: 20,
PageNumber: 1,
}
suite.worker.On("ScheduledJobs", q).Return([]*job.Stats{suite.res}, 1, nil)
_, total, err := suite.ctl.ScheduledJobs(q)
require.Nil(suite.T(), err, "scheduled jobs: nil error expected but got %s", err)
assert.Equal(suite.T(), int64(1), total, "expected 1 item but got 0")
}
// TestInvalidChecks ...
func (suite *ControllerTestSuite) TestInvalidChecks() {
req := createJobReq("kind")
_, err := suite.ctl.LaunchJob(req)
assert.NotNil(suite.T(), err, "invalid job kind: error expected but got nil")
req.Job.Metadata.JobKind = job.KindGeneric
req.Job.Name = "fake"
_, err = suite.ctl.LaunchJob(req)
assert.NotNil(suite.T(), err, "invalid job name: error expected but got nil")
req.Job.Metadata.JobKind = job.KindScheduled
req.Job.Name = job.SampleJob
req.Job.Metadata.ScheduleDelay = 0
_, err = suite.ctl.LaunchJob(req)
assert.NotNil(suite.T(), err, "invalid scheduled job: error expected but got nil")
req.Job.Metadata.JobKind = job.KindPeriodic
req.Job.Metadata.Cron = "x x x x x x"
_, err = suite.ctl.LaunchJob(req)
assert.NotNil(suite.T(), err, "invalid job name: error expected but got nil")
}
// TestGetPeriodicExecutions tests GetPeriodicExecutions
func (suite *ControllerTestSuite) TestGetPeriodicExecutions() {
pool := tests.GiveMeRedisPool()
namespace := tests.GiveMeTestNamespace()
jobID := utils.MakeIdentifier()
nID := time.Now().Unix()
mockJobStats := &job.Stats{
Info: &job.StatsInfo{
JobID: jobID,
Status: job.ScheduledStatus.String(),
JobKind: job.KindPeriodic,
JobName: job.SampleJob,
IsUnique: false,
CronSpec: "0 0 * * * *",
NumericPID: nID,
},
}
if _, err := c.LaunchJob(req); err == nil {
t.Fatal("error expected but got nil")
t := job.NewBasicTrackerWithStats(context.TODO(), mockJobStats, namespace, pool, nil)
err := t.Save()
require.NoError(suite.T(), err)
executionID := utils.MakeIdentifier()
runAt := time.Now().Add(1 * time.Hour).Unix()
executionStats := &job.Stats{
Info: &job.StatsInfo{
JobID: executionID,
Status: job.ScheduledStatus.String(),
JobKind: job.KindScheduled,
JobName: job.SampleJob,
IsUnique: false,
CronSpec: "0 0 * * * *",
RunAt: runAt,
EnqueueTime: runAt,
UpstreamJobID: jobID,
},
}
req.Job.Name = "fake"
if _, err := c.LaunchJob(req); err == nil {
t.Fatal("error expected but got nil")
}
t2 := job.NewBasicTrackerWithStats(context.TODO(), executionStats, namespace, pool, nil)
err = t2.Save()
require.NoError(suite.T(), err)
req.Job.Metadata.JobKind = "Scheduled"
if _, err := c.LaunchJob(req); err == nil {
t.Fatal("error expected but got nil")
}
suite.lcmCtl.On("Track", jobID).Return(t, nil)
suite.lcmCtl.On("Track", executionID).Return(t2, nil)
req.Job.Metadata.JobKind = "Periodic"
req.Job.Metadata.Cron = "x x x x x x"
if _, err := c.LaunchJob(req); err == nil {
t.Fatal("error expected but got nil")
}
_, total, err := suite.ctl.GetPeriodicExecutions(jobID, &query.Parameter{
PageSize: 10,
PageNumber: 1,
Extras: make(query.ExtraParameters),
})
require.NoError(suite.T(), err)
assert.Equal(suite.T(), int64(1), total)
}
func createJobReq(kind string, isUnique bool, withHook bool) models.JobRequest {
params := make(map[string]interface{})
params["name"] = "testing"
req := models.JobRequest{
Job: &models.JobData{
Name: "DEMO",
func createJobReq(kind string) *job.Request {
params := make(job.Parameters)
params["name"] = "testing:v1"
return &job.Request{
Job: &job.RequestBody{
Name: job.SampleJob,
Parameters: params,
Metadata: &models.JobMetadata{
StatusHook: "http://localhost:9090",
Metadata: &job.Metadata{
JobKind: kind,
IsUnique: isUnique,
IsUnique: true,
ScheduleDelay: 100,
Cron: "5 * * * * *",
},
},
}
}
if withHook {
req.Job.StatusHook = "http://localhost:9090"
// Implement lcm controller interface
func (suite *ControllerTestSuite) Serve() error {
return suite.lcmCtl.Serve()
}
func (suite *ControllerTestSuite) New(stats *job.Stats) (job.Tracker, error) {
return suite.lcmCtl.New(stats)
}
func (suite *ControllerTestSuite) Track(jobID string) (job.Tracker, error) {
return suite.lcmCtl.Track(jobID)
}
// Implement worker interface
func (suite *ControllerTestSuite) Start() error {
return suite.worker.Start()
}
func (suite *ControllerTestSuite) RegisterJobs(jobs map[string]interface{}) error {
return suite.worker.RegisterJobs(jobs)
}
func (suite *ControllerTestSuite) Enqueue(jobName string, params job.Parameters, isUnique bool, webHook string) (*job.Stats, error) {
return suite.worker.Enqueue(jobName, params, isUnique, webHook)
}
func (suite *ControllerTestSuite) Schedule(jobName string, params job.Parameters, runAfterSeconds uint64, isUnique bool, webHook string) (*job.Stats, error) {
return suite.worker.Schedule(jobName, params, runAfterSeconds, isUnique, webHook)
}
func (suite *ControllerTestSuite) PeriodicallyEnqueue(jobName string, params job.Parameters, cronSetting string, isUnique bool, webHook string) (*job.Stats, error) {
return suite.worker.PeriodicallyEnqueue(jobName, params, cronSetting, isUnique, webHook)
}
func (suite *ControllerTestSuite) Stats() (*worker.Stats, error) {
return suite.worker.Stats()
}
func (suite *ControllerTestSuite) IsKnownJob(name string) (interface{}, bool) {
return suite.worker.IsKnownJob(name)
}
func (suite *ControllerTestSuite) ValidateJobParameters(jobType interface{}, params job.Parameters) error {
return suite.worker.ValidateJobParameters(jobType, params)
}
func (suite *ControllerTestSuite) StopJob(jobID string) error {
return suite.worker.StopJob(jobID)
}
func (suite *ControllerTestSuite) RetryJob(jobID string) error {
return suite.worker.RetryJob(jobID)
}
func (suite *ControllerTestSuite) ScheduledJobs(query *query.Parameter) ([]*job.Stats, int64, error) {
return suite.worker.ScheduledJobs(query)
}
// Implement fake objects with mock
type fakeLcmController struct {
mock.Mock
}
func (flc *fakeLcmController) Serve() error {
return flc.Called().Error(0)
}
func (flc *fakeLcmController) New(stats *job.Stats) (job.Tracker, error) {
args := flc.Called(stats)
if args.Error(1) != nil {
return nil, args.Error(1)
}
return req
return args.Get(0).(job.Tracker), nil
}
type fakePool struct{}
func (f *fakePool) Start() error {
return nil
}
func (f *fakePool) RegisterJob(name string, job interface{}) error {
return nil
}
func (f *fakePool) RegisterJobs(jobs map[string]interface{}) error {
return nil
}
func (f *fakePool) Enqueue(jobName string, params models.Parameters, isUnique bool) (models.JobStats, error) {
return models.JobStats{
Stats: &models.JobStatData{
JobID: "fake_ID",
},
}, nil
}
func (f *fakePool) Schedule(jobName string, params models.Parameters, runAfterSeconds uint64, isUnique bool) (models.JobStats, error) {
return models.JobStats{
Stats: &models.JobStatData{
JobID: "fake_ID_Scheduled",
},
}, nil
}
func (f *fakePool) PeriodicallyEnqueue(jobName string, params models.Parameters, cronSetting string) (models.JobStats, error) {
return models.JobStats{
Stats: &models.JobStatData{
JobID: "fake_ID_Periodic",
},
}, nil
}
func (f *fakePool) Stats() (models.JobPoolStats, error) {
return models.JobPoolStats{
Pools: []*models.JobPoolStatsData{
{
Status: "running",
},
},
}, nil
}
func (f *fakePool) IsKnownJob(name string) (interface{}, bool) {
return (*fakeJob)(nil), true
}
func (f *fakePool) ValidateJobParameters(jobType interface{}, params map[string]interface{}) error {
return nil
}
func (f *fakePool) GetJobStats(jobID string) (models.JobStats, error) {
return models.JobStats{
Stats: &models.JobStatData{
JobID: "fake_ID",
Status: "running",
},
}, nil
}
func (f *fakePool) StopJob(jobID string) error {
return nil
}
func (f *fakePool) CancelJob(jobID string) error {
return nil
}
func (f *fakePool) RetryJob(jobID string) error {
return nil
}
func (f *fakePool) RegisterHook(jobID string, hookURL string) error {
return nil
}
type fakeJob struct{}
func (j *fakeJob) MaxFails() uint {
return 3
}
func (j *fakeJob) ShouldRetry() bool {
return true
}
func (j *fakeJob) Validate(params map[string]interface{}) error {
if p, ok := params["name"]; ok {
if p == "testing" {
return nil
}
func (flc *fakeLcmController) Track(jobID string) (job.Tracker, error) {
args := flc.Called(jobID)
if args.Error(1) != nil {
return nil, args.Error(1)
}
return errors.New("testing error")
return args.Get(0).(job.Tracker), nil
}
func (j *fakeJob) Run(ctx env.JobContext, params map[string]interface{}) error {
return nil
type fakeWorker struct {
mock.Mock
}
func (f *fakeWorker) Start() error {
return f.Called().Error(0)
}
func (f *fakeWorker) RegisterJobs(jobs map[string]interface{}) error {
return f.Called(jobs).Error(0)
}
func (f *fakeWorker) Enqueue(jobName string, params job.Parameters, isUnique bool, webHook string) (*job.Stats, error) {
args := f.Called(jobName, params, isUnique, webHook)
if args.Error(1) != nil {
return nil, args.Error(1)
}
return args.Get(0).(*job.Stats), nil
}
func (f *fakeWorker) Schedule(jobName string, params job.Parameters, runAfterSeconds uint64, isUnique bool, webHook string) (*job.Stats, error) {
args := f.Called(jobName, params, runAfterSeconds, isUnique, webHook)
if args.Error(1) != nil {
return nil, args.Error(1)
}
return args.Get(0).(*job.Stats), nil
}
func (f *fakeWorker) PeriodicallyEnqueue(jobName string, params job.Parameters, cronSetting string, isUnique bool, webHook string) (*job.Stats, error) {
args := f.Called(jobName, params, cronSetting, isUnique, webHook)
if args.Error(1) != nil {
return nil, args.Error(1)
}
return args.Get(0).(*job.Stats), nil
}
func (f *fakeWorker) Stats() (*worker.Stats, error) {
args := f.Called()
if args.Error(1) != nil {
return nil, args.Error(1)
}
return args.Get(0).(*worker.Stats), nil
}
func (f *fakeWorker) IsKnownJob(name string) (interface{}, bool) {
args := f.Called(name)
if !args.Bool(1) {
return nil, args.Bool(1)
}
return args.Get(0), args.Bool(1)
}
func (f *fakeWorker) ValidateJobParameters(jobType interface{}, params job.Parameters) error {
return f.Called(jobType, params).Error(0)
}
func (f *fakeWorker) StopJob(jobID string) error {
return f.Called(jobID).Error(0)
}
func (f *fakeWorker) RetryJob(jobID string) error {
return f.Called(jobID).Error(0)
}
func (f *fakeWorker) ScheduledJobs(query *query.Parameter) ([]*job.Stats, int64, error) {
args := f.Called(query)
if args.Error(2) != nil {
return nil, 0, args.Error(2)
}
return args.Get(0).([]*job.Stats), int64(args.Int(1)), nil
}

View File

@ -16,28 +16,30 @@
package core
import (
"github.com/goharbor/harbor/src/jobservice/models"
"github.com/goharbor/harbor/src/jobservice/common/query"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/jobservice/worker"
)
// Interface defines the related main methods of job operation.
type Interface interface {
// LaunchJob is used to handle the job submission request.
//
// req JobRequest : Job request contains related required information of queuing job.
// req *job.Request : Job request contains related required information of queuing job.
//
// Returns:
// JobStats: Job status info with ID and self link returned if job is successfully launched.
// error : Error returned if failed to launch the specified job.
LaunchJob(req models.JobRequest) (models.JobStats, error)
// job.Stats : Job status info with ID and self link returned if job is successfully launched.
// error : Error returned if failed to launch the specified job.
LaunchJob(req *job.Request) (*job.Stats, error)
// GetJob is used to handle the job stats query request.
//
// jobID string: ID of job.
//
// Returns:
// JobStats: Job status info if job exists.
// error : Error returned if failed to get the specified job.
GetJob(jobID string) (models.JobStats, error)
// *job.Stats : Job status info if job exists.
// error : Error returned if failed to get the specified job.
GetJob(jobID string) (*job.Stats, error)
// StopJob is used to handle the job stopping request.
//
@ -55,17 +57,19 @@ type Interface interface {
// error : Error returned if failed to retry the specified job.
RetryJob(jobID string) error
// Cancel the job
//
// jobID string : ID of the enqueued job
//
// Returns:
// error : error returned if meet any problems
CancelJob(jobID string) error
// CheckStatus is used to handle the job service healthy status checking request.
CheckStatus() (models.JobPoolStats, error)
CheckStatus() (*worker.Stats, error)
// GetJobLogData is used to return the log text data for the specified job if exists
GetJobLogData(jobID string) ([]byte, error)
// Get the periodic executions for the specified periodic job.
// Pagination by query is supported.
// The total number is also returned.
GetPeriodicExecutions(periodicJobID string, query *query.Parameter) ([]*job.Stats, int64, error)
// Get the scheduled jobs by page
// The page number in the query will be ignored, default 20 is used. This is the limitation of backend lib.
// The total number is also returned.
ScheduledJobs(query *query.Parameter) ([]*job.Stats, int64, error)
}

View File

@ -16,6 +16,7 @@ package env
import (
"context"
"github.com/goharbor/harbor/src/jobservice/job"
"sync"
)
@ -33,6 +34,6 @@ type Context struct {
ErrorChan chan error
// The base job context reference
// It will be the parent conetext of job execution context
JobContext JobContext
// It will be the parent context of job execution context
JobContext job.Context
}

View File

@ -21,26 +21,20 @@ import (
)
const (
// JobStoppedErrorCode is code for jobStoppedError
JobStoppedErrorCode = 10000 + iota
// JobCancelledErrorCode is code for jobCancelledError
JobCancelledErrorCode
// ReadRequestBodyErrorCode is code for the error of reading http request body error
ReadRequestBodyErrorCode
ReadRequestBodyErrorCode = 10000 + iota
// HandleJSONDataErrorCode is code for the error of handling json data error
HandleJSONDataErrorCode
// MissingBackendHandlerErrorCode is code for the error of missing backend controller
MissingBackendHandlerErrorCode
// LaunchJobErrorCode is code for the error of launching job
LaunchJobErrorCode
// CheckStatsErrorCode is code for the error of checking stats of worker pool
// CheckStatsErrorCode is code for the error of checking stats of worker worker
CheckStatsErrorCode
// GetJobStatsErrorCode is code for the error of getting stats of enqueued job
GetJobStatsErrorCode
// StopJobErrorCode is code for the error of stopping job
StopJobErrorCode
// CancelJobErrorCode is code for the error of cancelling job
CancelJobErrorCode
// RetryJobErrorCode is code for the error of retrying job
RetryJobErrorCode
// UnknownActionNameErrorCode is code for the case of unknown action name
@ -53,6 +47,14 @@ const (
UnAuthorizedErrorCode
// ResourceConflictsErrorCode is code for the error of resource conflicting
ResourceConflictsErrorCode
// BadRequestErrorCode is code for the error of bad request
BadRequestErrorCode
// GetScheduledJobsErrorCode is code for the error of getting scheduled jobs
GetScheduledJobsErrorCode
// GetPeriodicExecutionErrorCode is code for the error of getting periodic executions
GetPeriodicExecutionErrorCode
// StatusMismatchErrorCode is code for the error of mismatching status
StatusMismatchErrorCode
)
// baseError ...
@ -82,92 +84,67 @@ func New(code uint16, err string, description string) error {
// ReadRequestBodyError is error wrapper for the error of reading request body.
func ReadRequestBodyError(err error) error {
return New(ReadRequestBodyErrorCode, "Read request body failed with error", err.Error())
return New(ReadRequestBodyErrorCode, "read request body failed with error", err.Error())
}
// HandleJSONDataError is error wrapper for the error of handling json data.
func HandleJSONDataError(err error) error {
return New(HandleJSONDataErrorCode, "Handle json data failed with error", err.Error())
return New(HandleJSONDataErrorCode, "handle json data failed with error", err.Error())
}
// MissingBackendHandlerError is error wrapper for the error of missing backend controller.
func MissingBackendHandlerError(err error) error {
return New(MissingBackendHandlerErrorCode, "Missing backend controller to handle the requests", err.Error())
return New(MissingBackendHandlerErrorCode, "missing backend controller to handle the requests", err.Error())
}
// LaunchJobError is error wrapper for the error of launching job failed.
func LaunchJobError(err error) error {
return New(LaunchJobErrorCode, "Launch job failed with error", err.Error())
return New(LaunchJobErrorCode, "launch job failed with error", err.Error())
}
// CheckStatsError is error wrapper for the error of checking stats failed
func CheckStatsError(err error) error {
return New(CheckStatsErrorCode, "Check stats of server failed with error", err.Error())
return New(CheckStatsErrorCode, "check stats of server failed with error", err.Error())
}
// GetJobStatsError is error wrapper for the error of getting job stats
func GetJobStatsError(err error) error {
return New(GetJobStatsErrorCode, "Get job stats failed with error", err.Error())
return New(GetJobStatsErrorCode, "get job stats failed with error", err.Error())
}
// StopJobError is error for the case of stopping job failed
func StopJobError(err error) error {
return New(StopJobErrorCode, "Stop job failed with error", err.Error())
}
// CancelJobError is error for the case of cancelling job failed
func CancelJobError(err error) error {
return New(CancelJobErrorCode, "Cancel job failed with error", err.Error())
return New(StopJobErrorCode, "stop job failed with error", err.Error())
}
// RetryJobError is error for the case of retrying job failed
func RetryJobError(err error) error {
return New(RetryJobErrorCode, "Retry job failed with error", err.Error())
return New(RetryJobErrorCode, "retry job failed with error", err.Error())
}
// UnknownActionNameError is error for the case of getting unknown job action
func UnknownActionNameError(err error) error {
return New(UnknownActionNameErrorCode, "Unknown job action name", err.Error())
return New(UnknownActionNameErrorCode, "unknown job action name", err.Error())
}
// GetJobLogError is error for the case of getting job log failed
func GetJobLogError(err error) error {
return New(GetJobLogErrorCode, "Failed to get the job log", err.Error())
return New(GetJobLogErrorCode, "failed to get the job log", err.Error())
}
// UnauthorizedError is error for the case of unauthorized accessing
func UnauthorizedError(err error) error {
return New(UnAuthorizedErrorCode, "Unauthorized", err.Error())
return New(UnAuthorizedErrorCode, "unauthorized", err.Error())
}
// jobStoppedError is designed for the case of stopping job.
type jobStoppedError struct {
baseError
// GetScheduledJobsError is error for the case of getting scheduled jobs failed
func GetScheduledJobsError(err error) error {
return New(GetScheduledJobsErrorCode, "failed to get scheduled jobs", err.Error())
}
// JobStoppedError is error wrapper for the case of stopping job.
func JobStoppedError() error {
return jobStoppedError{
baseError{
Code: JobStoppedErrorCode,
Err: "Job is stopped",
},
}
}
// jobCancelledError is designed for the case of cancelling job.
type jobCancelledError struct {
baseError
}
// JobCancelledError is error wrapper for the case of cancelling job.
func JobCancelledError() error {
return jobCancelledError{
baseError{
Code: JobStoppedErrorCode,
Err: "Job is cancelled",
},
}
// GetPeriodicExecutionError is error for the case of getting periodic jobs failed
func GetPeriodicExecutionError(err error) error {
return New(GetPeriodicExecutionErrorCode, "failed to get periodic executions", err.Error())
}
// objectNotFound is designed for the case of no object found
@ -202,26 +179,70 @@ func ConflictError(object string) error {
}
}
// IsJobStoppedError return true if the error is jobStoppedError
func IsJobStoppedError(err error) bool {
_, ok := err.(jobStoppedError)
return ok
// badRequestError is designed for the case of bad request
type badRequestError struct {
baseError
}
// IsJobCancelledError return true if the error is jobCancelledError
func IsJobCancelledError(err error) bool {
_, ok := err.(jobCancelledError)
return ok
// BadRequestError returns the error of handing bad request case
func BadRequestError(object interface{}) error {
return badRequestError{
baseError{
Code: BadRequestErrorCode,
Err: "bad request",
Description: fmt.Sprintf("%s", object),
},
}
}
// statusMismatchError is designed for the case of job status update mismatching
type statusMismatchError struct {
baseError
}
// StatusMismatchError returns the error of job status mismatching
func StatusMismatchError(current, target string) error {
return statusMismatchError{
baseError{
Code: StatusMismatchErrorCode,
Err: "mismatch job status",
Description: fmt.Sprintf("current %s, setting to %s", current, target),
},
}
}
// IsObjectNotFoundError return true if the error is objectNotFoundError
func IsObjectNotFoundError(err error) bool {
if err == nil {
return false
}
_, ok := err.(objectNotFoundError)
return ok
}
// IsConflictError returns true if the error is conflictError
func IsConflictError(err error) bool {
if err == nil {
return false
}
_, ok := err.(conflictError)
return ok
}
// IsBadRequestError returns true if the error is badRequestError
func IsBadRequestError(err error) bool {
if err == nil {
return false
}
_, ok := err.(badRequestError)
return ok
}
// IsStatusMismatchError returns true if the error is statusMismatchError
func IsStatusMismatchError(err error) bool {
if err == nil {
return false
}
_, ok := err.(statusMismatchError)
return ok
}

View File

@ -0,0 +1,343 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hook
import (
"context"
"encoding/json"
"math/rand"
"net/url"
"time"
"github.com/pkg/errors"
"github.com/goharbor/harbor/src/jobservice/job"
"sync"
"github.com/goharbor/harbor/src/jobservice/common/rds"
"github.com/goharbor/harbor/src/jobservice/env"
"github.com/goharbor/harbor/src/jobservice/lcm"
"github.com/goharbor/harbor/src/jobservice/logger"
"github.com/gomodule/redigo/redis"
)
const (
// Influenced by the worker number setting
maxEventChanBuffer = 1024
// Max concurrent client handlers
maxHandlers = 5
// The max time for expiring the retrying events
// 180 days
maxEventExpireTime = 3600 * 24 * 180
// Waiting a short while if any errors occurred
shortLoopInterval = 5 * time.Second
// Waiting for long while if no retrying elements found
longLoopInterval = 5 * time.Minute
)
// Agent is designed to handle the hook events with reasonable numbers of concurrent threads
type Agent interface {
// Trigger hooks
Trigger(evt *Event) error
// Serves events now
Serve() error
// Attach a job life cycle controller
Attach(ctl lcm.Controller)
}
// Event contains the hook URL and the data
type Event struct {
URL string `json:"url"`
Message string `json:"message"` // meaningful text for event
Data *job.StatusChange `json:"data"` // generic data
Timestamp int64 `json:"timestamp"` // Use as time threshold of discarding the event (unit: second)
}
// Validate event
func (e *Event) Validate() error {
_, err := url.Parse(e.URL)
if err != nil {
return err
}
if e.Data == nil {
return errors.New("nil hook data")
}
return nil
}
// Serialize event to bytes
func (e *Event) Serialize() ([]byte, error) {
return json.Marshal(e)
}
// Deserialize the bytes to event
func (e *Event) Deserialize(bytes []byte) error {
return json.Unmarshal(bytes, e)
}
// Basic agent for usage
type basicAgent struct {
context context.Context
namespace string
client Client
ctl lcm.Controller
events chan *Event
tokens chan bool
redisPool *redis.Pool
wg *sync.WaitGroup
}
// NewAgent is constructor of basic agent
func NewAgent(ctx *env.Context, ns string, redisPool *redis.Pool) Agent {
tks := make(chan bool, maxHandlers)
// Put tokens
for i := 0; i < maxHandlers; i++ {
tks <- true
}
return &basicAgent{
context: ctx.SystemContext,
namespace: ns,
client: NewClient(ctx.SystemContext),
events: make(chan *Event, maxEventChanBuffer),
tokens: tks,
redisPool: redisPool,
wg: ctx.WG,
}
}
// Attach a job life cycle controller
func (ba *basicAgent) Attach(ctl lcm.Controller) {
ba.ctl = ctl
}
// Trigger implements the same method of interface @Agent
func (ba *basicAgent) Trigger(evt *Event) error {
if evt == nil {
return errors.New("nil event")
}
if err := evt.Validate(); err != nil {
return err
}
ba.events <- evt
return nil
}
// Start the basic agent
// Termination depends on the system context
// Blocking call
func (ba *basicAgent) Serve() error {
if ba.ctl == nil {
return errors.New("nil life cycle controller of hook agent")
}
ba.wg.Add(1)
go ba.loopRetry()
logger.Info("Hook event retrying loop is started")
ba.wg.Add(1)
go ba.serve()
logger.Info("Basic hook agent is started")
return nil
}
func (ba *basicAgent) serve() {
defer func() {
logger.Info("Basic hook agent is stopped")
ba.wg.Done()
}()
for {
select {
case evt := <-ba.events:
// if exceed, wait here
// avoid too many request connections at the same time
<-ba.tokens
go func(evt *Event) {
defer func() {
ba.tokens <- true // return token
}()
if err := ba.client.SendEvent(evt); err != nil {
logger.Errorf("Send hook event '%s' to '%s' failed with error: %s; push to the queue for retrying later", evt.Message, evt.URL, err)
// Push event to the retry queue
if err := ba.pushForRetry(evt); err != nil {
// Failed to push to the retry queue, let's directly push it
// to the event channel of this node with reasonable backoff time.
logger.Errorf("Failed to push hook event to the retry queue: %s", err)
// Put to the event chan after waiting for a reasonable while,
// waiting is important, it can avoid sending large scale failure expecting
// requests in a short while.
// As 'pushForRetry' has checked the timestamp and expired event
// will be directly discarded and nil error is returned, no need to
// check it again here.
<-time.After(time.Duration(rand.Int31n(55)+5) * time.Second)
ba.events <- evt
}
}
}(evt)
case <-ba.context.Done():
return
}
}
}
func (ba *basicAgent) pushForRetry(evt *Event) error {
if evt == nil {
// do nothing
return nil
}
// Anyway we'll need the raw JSON, let's try to serialize it here
rawJSON, err := evt.Serialize()
if err != nil {
return err
}
now := time.Now().Unix()
if evt.Timestamp > 0 && now-evt.Timestamp >= maxEventExpireTime {
// Expired, do not need to push back to the retry queue
logger.Warningf("Event is expired: %s", rawJSON)
return nil
}
conn := ba.redisPool.Get()
defer func() {
_ = conn.Close()
}()
key := rds.KeyHookEventRetryQueue(ba.namespace)
args := make([]interface{}, 0)
// Use nano time to get more accurate timestamp
score := time.Now().UnixNano()
args = append(args, key, "NX", score, rawJSON)
_, err = conn.Do("ZADD", args...)
if err != nil {
return err
}
return nil
}
func (ba *basicAgent) loopRetry() {
defer func() {
logger.Info("Hook event retrying loop exit")
ba.wg.Done()
}()
token := make(chan bool, 1)
token <- true
for {
<-token
if err := ba.reSend(); err != nil {
waitInterval := shortLoopInterval
if err == rds.ErrNoElements {
// No elements
waitInterval = longLoopInterval
} else {
logger.Errorf("Resend hook event error: %s", err.Error())
}
select {
case <-time.After(waitInterval):
// Just wait, do nothing
case <-ba.context.Done():
// Terminated
return
}
}
// Put token back
token <- true
}
}
func (ba *basicAgent) reSend() error {
evt, err := ba.popMinOne()
if err != nil {
return err
}
jobID, status, err := extractJobID(evt.Data)
if err != nil {
return err
}
t, err := ba.ctl.Track(jobID)
if err != nil {
return err
}
diff := status.Compare(job.Status(t.Job().Info.Status))
if diff > 0 ||
(diff == 0 && t.Job().Info.CheckIn != evt.Data.CheckIn) {
ba.events <- evt
return nil
}
return errors.Errorf("outdated hook event: %s, latest job status: %s", evt.Message, t.Job().Info.Status)
}
func (ba *basicAgent) popMinOne() (*Event, error) {
conn := ba.redisPool.Get()
defer func() {
_ = conn.Close()
}()
key := rds.KeyHookEventRetryQueue(ba.namespace)
minOne, err := rds.ZPopMin(conn, key)
if err != nil {
return nil, err
}
rawEvent, ok := minOne.([]byte)
if !ok {
return nil, errors.New("bad request: non bytes slice for raw event")
}
evt := &Event{}
if err := evt.Deserialize(rawEvent); err != nil {
return nil, err
}
return evt, nil
}
// Extract the job ID and status from the event data field
// First return is job ID
// Second return is job status
// Last one is error
func extractJobID(data *job.StatusChange) (string, job.Status, error) {
if data != nil && len(data.JobID) > 0 {
status := job.Status(data.Status)
if status.Validate() == nil {
return data.JobID, status, nil
}
}
return "", "", errors.New("malform job status change data")
}

View File

@ -0,0 +1,202 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hook
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"sync/atomic"
"testing"
"time"
"github.com/goharbor/harbor/src/jobservice/common/rds"
"github.com/goharbor/harbor/src/jobservice/env"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/jobservice/lcm"
"github.com/goharbor/harbor/src/jobservice/tests"
"github.com/gomodule/redigo/redis"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"sync"
)
// HookAgentTestSuite tests functions of hook agent
type HookAgentTestSuite struct {
suite.Suite
pool *redis.Pool
namespace string
lcmCtl lcm.Controller
envContext *env.Context
cancel context.CancelFunc
}
// TestHookAgentTestSuite is entry of go test
func TestHookAgentTestSuite(t *testing.T) {
suite.Run(t, new(HookAgentTestSuite))
}
// SetupSuite prepares test suites
func (suite *HookAgentTestSuite) SetupSuite() {
suite.pool = tests.GiveMeRedisPool()
suite.namespace = tests.GiveMeTestNamespace()
ctx, cancel := context.WithCancel(context.Background())
suite.envContext = &env.Context{
SystemContext: ctx,
WG: new(sync.WaitGroup),
}
suite.cancel = cancel
suite.lcmCtl = lcm.NewController(suite.envContext, suite.namespace, suite.pool, func(hookURL string, change *job.StatusChange) error { return nil })
}
// TearDownSuite prepares test suites
func (suite *HookAgentTestSuite) TearDownSuite() {
conn := suite.pool.Get()
defer func() {
_ = conn.Close()
}()
_ = tests.ClearAll(suite.namespace, conn)
}
// TestEventSending ...
func (suite *HookAgentTestSuite) TestEventSending() {
done := make(chan bool, 1)
expected := uint32(1300) // >1024 max
count := uint32(0)
counter := &count
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer func() {
c := atomic.AddUint32(counter, 1)
if c == expected {
done <- true
}
}()
_, _ = fmt.Fprintln(w, "ok")
}))
defer ts.Close()
// in case test failed and avoid dead lock
go func() {
<-time.After(time.Duration(10) * time.Second)
done <- true // time out
}()
agent := NewAgent(suite.envContext, suite.namespace, suite.pool)
agent.Attach(suite.lcmCtl)
err := agent.Serve()
require.NoError(suite.T(), err, "agent serve: nil error expected but got %s", err)
go func() {
defer func() {
suite.cancel()
}()
for i := uint32(0); i < expected; i++ {
changeData := &job.StatusChange{
JobID: fmt.Sprintf("job-%d", i),
Status: "running",
}
evt := &Event{
URL: ts.URL,
Message: fmt.Sprintf("status of job %s change to %s", changeData.JobID, changeData.Status),
Data: changeData,
Timestamp: time.Now().Unix(),
}
err := agent.Trigger(evt)
require.Nil(suite.T(), err, "agent trigger: nil error expected but got %s", err)
}
// Check results
<-done
require.Equal(suite.T(), expected, count, "expected %d hook events but only got %d", expected, count)
}()
// Wait
suite.envContext.WG.Wait()
}
// TestRetryAndPopMin ...
func (suite *HookAgentTestSuite) TestRetryAndPopMin() {
ctx := context.Background()
tks := make(chan bool, maxHandlers)
// Put tokens
for i := 0; i < maxHandlers; i++ {
tks <- true
}
agent := &basicAgent{
context: ctx,
namespace: suite.namespace,
client: NewClient(ctx),
events: make(chan *Event, maxEventChanBuffer),
tokens: tks,
redisPool: suite.pool,
}
agent.Attach(suite.lcmCtl)
changeData := &job.StatusChange{
JobID: "fake_job_ID",
Status: job.RunningStatus.String(),
}
evt := &Event{
URL: "https://fake.js.com",
Message: fmt.Sprintf("status of job %s change to %s", changeData.JobID, changeData.Status),
Data: changeData,
Timestamp: time.Now().Unix(),
}
// Mock job stats
conn := suite.pool.Get()
defer func() {
_ = conn.Close()
}()
key := rds.KeyJobStats(suite.namespace, "fake_job_ID")
_, err := conn.Do("HSET", key, "status", job.SuccessStatus.String())
require.Nil(suite.T(), err, "prepare job stats: nil error returned but got %s", err)
err = agent.pushForRetry(evt)
require.Nil(suite.T(), err, "push for retry: nil error expected but got %s", err)
err = agent.reSend()
require.Error(suite.T(), err, "resend: non nil error expected but got nil")
assert.Equal(suite.T(), 0, len(agent.events), "the hook event should be discard but actually not")
// Change status
_, err = conn.Do("HSET", key, "status", job.PendingStatus.String())
require.Nil(suite.T(), err, "prepare job stats: nil error returned but got %s", err)
err = agent.pushForRetry(evt)
require.Nil(suite.T(), err, "push for retry: nil error expected but got %s", err)
err = agent.reSend()
require.Nil(suite.T(), err, "resend: nil error should be returned but got %s", err)
<-time.After(time.Duration(1) * time.Second)
assert.Equal(suite.T(), 1, len(agent.events), "the hook event should be requeued but actually not: %d", len(agent.events))
}

View File

@ -0,0 +1,136 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hook
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"strings"
"time"
"context"
"github.com/goharbor/harbor/src/jobservice/common/utils"
)
const (
proxyEnvHTTP = "http_proxy"
proxyEnvHTTPS = "https_proxy"
)
// Client for handling the hook events
type Client interface {
// SendEvent send the event to the subscribed parties
SendEvent(evt *Event) error
}
// Client is used to post the related data to the interested parties.
type basicClient struct {
client *http.Client
ctx context.Context
}
// NewClient return the ptr of the new hook client
func NewClient(ctx context.Context) Client {
// Create transport
transport := &http.Transport{
MaxIdleConns: 20,
IdleConnTimeout: 30 * time.Second,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
// Get the http/https proxies
proxyAddr, ok := os.LookupEnv(proxyEnvHTTP)
if !ok {
proxyAddr, ok = os.LookupEnv(proxyEnvHTTPS)
}
if ok && !utils.IsEmptyStr(proxyAddr) {
proxyURL, err := url.Parse(proxyAddr)
if err == nil {
transport.Proxy = http.ProxyURL(proxyURL)
}
}
client := &http.Client{
Timeout: 15 * time.Second,
Transport: transport,
}
return &basicClient{
client: client,
ctx: ctx,
}
}
// ReportStatus reports the status change info to the subscribed party.
// The status includes 'checkin' info with format 'check_in:<message>'
func (bc *basicClient) SendEvent(evt *Event) error {
if evt == nil {
return errors.New("nil event")
}
if err := evt.Validate(); err != nil {
return err
}
// Marshal data
data, err := json.Marshal(evt.Data)
if err != nil {
return err
}
// New post request
req, err := http.NewRequest(http.MethodPost, evt.URL, strings.NewReader(string(data)))
if err != nil {
return err
}
res, err := bc.client.Do(req)
if err != nil {
return err
}
defer func() {
_ = res.Body.Close()
}() // close connection for reuse
// Should be 200
if res.StatusCode != http.StatusOK {
if res.ContentLength > 0 {
// read error content and return
dt, err := ioutil.ReadAll(res.Body)
if err != nil {
return err
}
return errors.New(string(dt))
}
return fmt.Errorf("failed to report status change via hook, expect '200' but got '%d'", res.StatusCode)
}
return nil
}

View File

@ -0,0 +1,105 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hook
import (
"context"
"encoding/json"
"fmt"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"time"
)
// HookClientTestSuite tests functions of hook client
type HookClientTestSuite struct {
suite.Suite
mockServer *httptest.Server
client Client
}
// SetupSuite prepares test suite
func (suite *HookClientTestSuite) SetupSuite() {
suite.client = NewClient(context.Background())
suite.mockServer = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
bytes, err := ioutil.ReadAll(r.Body)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
change := &job.StatusChange{}
err = json.Unmarshal(bytes, change)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
if change.JobID == "job_ID_failed" {
w.WriteHeader(http.StatusInternalServerError)
return
}
fmt.Fprintln(w, "ok")
}))
}
// TearDownSuite clears test suite
func (suite *HookClientTestSuite) TearDownSuite() {
suite.mockServer.Close()
}
// TestHookClientTestSuite is entry of go test
func TestHookClientTestSuite(t *testing.T) {
suite.Run(t, new(HookClientTestSuite))
}
// TestHookClient ...
func (suite *HookClientTestSuite) TestHookClient() {
changeData := &job.StatusChange{
JobID: "fake_job_ID",
Status: "running",
}
evt := &Event{
URL: suite.mockServer.URL,
Data: changeData,
Message: fmt.Sprintf("Status of job %s changed to: %s", changeData.JobID, changeData.Status),
Timestamp: time.Now().Unix(),
}
err := suite.client.SendEvent(evt)
assert.Nil(suite.T(), err, "send event: nil error expected but got %s", err)
}
// TestReportStatusFailed ...
func (suite *HookClientTestSuite) TestReportStatusFailed() {
changeData := &job.StatusChange{
JobID: "job_ID_failed",
Status: "running",
}
evt := &Event{
URL: suite.mockServer.URL,
Data: changeData,
Message: fmt.Sprintf("Status of job %s changed to: %s", changeData.JobID, changeData.Status),
Timestamp: time.Now().Unix(),
}
err := suite.client.SendEvent(evt)
assert.NotNil(suite.T(), err, "send event: expected non nil error but got nil")
}

View File

@ -12,27 +12,26 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package env
package job
import (
"context"
"github.com/goharbor/harbor/src/jobservice/logger"
"github.com/goharbor/harbor/src/jobservice/models"
)
// JobContext is combination of BaseContext and other job specified resources.
// JobContext will be the real execution context for one job.
type JobContext interface {
// Context is combination of BaseContext and other job specified resources.
// Context will be the real execution context for one job.
type Context interface {
// Build the context based on the parent context
//
// dep JobData : Dependencies for building the context, just in case that the build
// function need some external info
// A new job context will be generated based on the current context
// for the provided job.
//
// Returns:
// new JobContext based on the parent one
// new Context based on the parent one
// error if meet any problems
Build(dep JobData) (JobContext, error)
Build(tracker Tracker) (Context, error)
// Get property from the context
//
@ -57,27 +56,19 @@ type JobContext interface {
// error if meet any problems
Checkin(status string) error
// OPCommand return the control operational command like stop/cancel if have
// OPCommand return the control operational command like stop if have
//
// Returns:
// op command if have
// flag to indicate if have command
OPCommand() (string, bool)
OPCommand() (OPCommand, bool)
// Return the logger
GetLogger() logger.Interface
// Launch sub jobs
LaunchJob(req models.JobRequest) (models.JobStats, error)
// Get tracker
Tracker() Tracker
}
// JobData defines job context dependencies.
type JobData struct {
ID string
Name string
Args map[string]interface{}
ExtraData map[string]interface{}
}
// JobContextInitializer is a func to initialize the concrete job context
type JobContextInitializer func(ctx *Context) (JobContext, error)
// ContextInitializer is a func to initialize the concrete job context
type ContextInitializer func(ctx context.Context) (Context, error)

View File

@ -16,22 +16,17 @@ package impl
import (
"context"
"errors"
"fmt"
"math"
"reflect"
"time"
"github.com/goharbor/harbor/src/common"
"errors"
comcfg "github.com/goharbor/harbor/src/common/config"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/jobservice/config"
"github.com/goharbor/harbor/src/jobservice/env"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/jobservice/logger"
"github.com/goharbor/harbor/src/jobservice/logger/sweeper"
jmodel "github.com/goharbor/harbor/src/jobservice/models"
)
const (
@ -42,24 +37,14 @@ const (
type Context struct {
// System context
sysContext context.Context
// Logger for job
logger logger.Interface
// op command func
opCommandFunc job.CheckOPCmdFunc
// checkin func
checkInFunc job.CheckInFunc
// launch job
launchJobFunc job.LaunchJobFunc
// other required information
properties map[string]interface{}
// admin server client
cfgMgr comcfg.CfgManager
// job life cycle tracker
tracker job.Tracker
}
// NewContext ...
@ -107,11 +92,16 @@ func (c *Context) Init() error {
// Build implements the same method in env.JobContext interface
// This func will build the job execution context before running
func (c *Context) Build(dep env.JobData) (env.JobContext, error) {
func (c *Context) Build(tracker job.Tracker) (job.Context, error) {
if tracker == nil || tracker.Job() == nil {
return nil, errors.New("nil job tracker")
}
jContext := &Context{
sysContext: c.sysContext,
cfgMgr: c.cfgMgr,
properties: make(map[string]interface{}),
tracker: tracker,
}
// Copy properties
@ -123,55 +113,21 @@ func (c *Context) Build(dep env.JobData) (env.JobContext, error) {
// Refresh config properties
err := c.cfgMgr.Load()
props := c.cfgMgr.GetAll()
if err != nil {
return nil, err
}
props := c.cfgMgr.GetAll()
for k, v := range props {
jContext.properties[k] = v
}
// Set loggers for job
if err := setLoggers(func(lg logger.Interface) {
jContext.logger = lg
}, dep.ID); err != nil {
lg, err := createLoggers(tracker.Job().Info.JobID)
if err != nil {
return nil, err
}
if opCommandFunc, ok := dep.ExtraData["opCommandFunc"]; ok {
if reflect.TypeOf(opCommandFunc).Kind() == reflect.Func {
if funcRef, ok := opCommandFunc.(job.CheckOPCmdFunc); ok {
jContext.opCommandFunc = funcRef
}
}
}
if jContext.opCommandFunc == nil {
return nil, errors.New("failed to inject opCommandFunc")
}
if checkInFunc, ok := dep.ExtraData["checkInFunc"]; ok {
if reflect.TypeOf(checkInFunc).Kind() == reflect.Func {
if funcRef, ok := checkInFunc.(job.CheckInFunc); ok {
jContext.checkInFunc = funcRef
}
}
}
if jContext.checkInFunc == nil {
return nil, errors.New("failed to inject checkInFunc")
}
if launchJobFunc, ok := dep.ExtraData["launchJobFunc"]; ok {
if reflect.TypeOf(launchJobFunc).Kind() == reflect.Func {
if funcRef, ok := launchJobFunc.(job.LaunchJobFunc); ok {
jContext.launchJobFunc = funcRef
}
}
}
if jContext.launchJobFunc == nil {
return nil, errors.New("failed to inject launchJobFunc")
}
jContext.logger = lg
return jContext, nil
}
@ -189,22 +145,21 @@ func (c *Context) SystemContext() context.Context {
// Checkin is bridge func for reporting detailed status
func (c *Context) Checkin(status string) error {
if c.checkInFunc != nil {
c.checkInFunc(status)
} else {
return errors.New("nil check in function")
}
return nil
return c.tracker.CheckIn(status)
}
// OPCommand return the control operational command like stop/cancel if have
func (c *Context) OPCommand() (string, bool) {
if c.opCommandFunc != nil {
return c.opCommandFunc()
func (c *Context) OPCommand() (job.OPCommand, bool) {
latest, err := c.tracker.Status()
if err != nil {
return job.NilCommand, false
}
return "", false
if job.StoppedStatus == latest {
return job.StopCommand, true
}
return job.NilCommand, false
}
// GetLogger returns the logger
@ -212,54 +167,31 @@ func (c *Context) GetLogger() logger.Interface {
return c.logger
}
// LaunchJob launches sub jobs
func (c *Context) LaunchJob(req jmodel.JobRequest) (jmodel.JobStats, error) {
if c.launchJobFunc == nil {
return jmodel.JobStats{}, errors.New("nil launch job function")
}
return c.launchJobFunc(req)
// Tracker returns the job tracker attached with the context
func (c *Context) Tracker() job.Tracker {
return c.tracker
}
func getDBFromConfig(cfg map[string]interface{}) *models.Database {
database := &models.Database{}
database.Type = cfg[common.DatabaseType].(string)
postgresql := &models.PostGreSQL{}
postgresql.Host = cfg[common.PostGreSQLHOST].(string)
postgresql.Port = int(cfg[common.PostGreSQLPort].(float64))
postgresql.Username = cfg[common.PostGreSQLUsername].(string)
postgresql.Password = cfg[common.PostGreSQLPassword].(string)
postgresql.Database = cfg[common.PostGreSQLDatabase].(string)
postgresql.SSLMode = cfg[common.PostGreSQLSSLMode].(string)
database.PostGreSQL = postgresql
return database
}
// create loggers based on the configurations and set it to the job executing context.
func setLoggers(setter func(lg logger.Interface), jobID string) error {
if setter == nil {
return errors.New("missing setter func")
}
// create loggers based on the configurations.
func createLoggers(jobID string) (logger.Interface, error) {
// Init job loggers here
lOptions := []logger.Option{}
lOptions := make([]logger.Option, 0)
for _, lc := range config.DefaultConfig.JobLoggerConfigs {
// For running job, the depth should be 5
if lc.Name == logger.LoggerNameFile || lc.Name == logger.LoggerNameStdOutput || lc.Name == logger.LoggerNameDB {
if lc.Name == logger.NameFile || lc.Name == logger.NameStdOutput || lc.Name == logger.NameDB {
if lc.Settings == nil {
lc.Settings = map[string]interface{}{}
}
lc.Settings["depth"] = 5
}
if lc.Name == logger.LoggerNameFile || lc.Name == logger.LoggerNameDB {
if lc.Name == logger.NameFile || lc.Name == logger.NameDB {
// Need extra param
fSettings := map[string]interface{}{}
for k, v := range lc.Settings {
// Copy settings
fSettings[k] = v
}
if lc.Name == logger.LoggerNameFile {
if lc.Name == logger.NameFile {
// Append file name param
fSettings["filename"] = fmt.Sprintf("%s.log", jobID)
lOptions = append(lOptions, logger.BackendOption(lc.Name, lc.Level, fSettings))
@ -273,14 +205,7 @@ func setLoggers(setter func(lg logger.Interface), jobID string) error {
}
}
// Get logger for the job
lg, err := logger.GetLogger(lOptions...)
if err != nil {
return fmt.Errorf("initialize job logger error: %s", err)
}
setter(lg)
return nil
return logger.GetLogger(lOptions...)
}
func initDBCompleted() error {

View File

@ -0,0 +1,126 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package impl
import (
"context"
"os"
"testing"
comcfg "github.com/goharbor/harbor/src/common/config"
"github.com/goharbor/harbor/src/jobservice/common/utils"
"github.com/goharbor/harbor/src/jobservice/config"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/jobservice/tests"
"github.com/gomodule/redigo/redis"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
)
// ContextImplTestSuite tests functions of context impl
type ContextImplTestSuite struct {
suite.Suite
tracker job.Tracker
namespace string
pool *redis.Pool
jobID string
}
// TestContextImplTestSuite is entry of go test
func TestContextImplTestSuite(t *testing.T) {
suite.Run(t, new(ContextImplTestSuite))
}
// SetupSuite prepares test suite
func (suite *ContextImplTestSuite) SetupSuite() {
config.DefaultConfig.JobLoggerConfigs = []*config.LoggerConfig{
{
Name: "STD_OUTPUT",
Level: "DEBUG",
},
{
Name: "FILE",
Level: "INFO",
Settings: map[string]interface{}{
"base_dir": os.TempDir(),
},
Sweeper: &config.LogSweeperConfig{
Duration: 1,
Settings: map[string]interface{}{
"work_dir": os.TempDir(),
},
},
},
}
suite.namespace = tests.GiveMeTestNamespace()
suite.pool = tests.GiveMeRedisPool()
suite.jobID = utils.MakeIdentifier()
mockStats := &job.Stats{
Info: &job.StatsInfo{
JobID: suite.jobID,
JobKind: job.KindGeneric,
JobName: job.SampleJob,
Status: job.PendingStatus.String(),
IsUnique: false,
},
}
suite.tracker = job.NewBasicTrackerWithStats(
context.Background(),
mockStats,
suite.namespace,
suite.pool,
nil,
)
err := suite.tracker.Save()
require.NoError(suite.T(), err, "job stats: nil error expected but got %s", err)
}
// SetupSuite clears test suite
func (suite *ContextImplTestSuite) TearDownSuite() {
conn := suite.pool.Get()
defer func() {
_ = conn.Close()
}()
_ = tests.ClearAll(suite.namespace, conn)
}
// TestContextImpl tests the context impl
func (suite *ContextImplTestSuite) TestContextImpl() {
cfgMem := comcfg.NewInMemoryManager()
cfgMem.Set("read_only", "true")
ctx := NewContext(context.Background(), cfgMem)
jCtx, err := ctx.Build(suite.tracker)
require.NoErrorf(suite.T(), err, "build job context: nil error expected but got %s", err)
v, ok := jCtx.Get("read_only")
assert.Equal(suite.T(), true, ok)
assert.Equal(suite.T(), v, v.(bool))
err = jCtx.Checkin("check in testing")
assert.NoErrorf(suite.T(), err, "check in: nil error expected but got %s", err)
l := jCtx.GetLogger()
assert.NotNil(suite.T(), l, "logger should be not nil")
_, ok = jCtx.OPCommand()
assert.Equal(suite.T(), false, ok)
}

View File

@ -17,144 +17,97 @@ package impl
import (
"context"
"errors"
"reflect"
"github.com/goharbor/harbor/src/jobservice/env"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/jobservice/logger"
jmodel "github.com/goharbor/harbor/src/jobservice/models"
)
// DefaultContext provides a basic job context
type DefaultContext struct {
// System context
sysContext context.Context
// Logger for job
logger logger.Interface
// op command func
opCommandFunc job.CheckOPCmdFunc
// checkin func
checkInFunc job.CheckInFunc
// launch job
launchJobFunc job.LaunchJobFunc
// other required information
// Other required information
properties map[string]interface{}
// Track the job attached with the context
tracker job.Tracker
}
// NewDefaultContext is constructor of building DefaultContext
func NewDefaultContext(sysCtx context.Context) env.JobContext {
func NewDefaultContext(sysCtx context.Context) job.Context {
return &DefaultContext{
sysContext: sysCtx,
properties: make(map[string]interface{}),
}
}
// Build implements the same method in env.JobContext interface
// Build implements the same method in env.Context interface
// This func will build the job execution context before running
func (c *DefaultContext) Build(dep env.JobData) (env.JobContext, error) {
func (dc *DefaultContext) Build(t job.Tracker) (job.Context, error) {
if t == nil {
return nil, errors.New("nil job tracker")
}
jContext := &DefaultContext{
sysContext: c.sysContext,
sysContext: dc.sysContext,
tracker: t,
properties: make(map[string]interface{}),
}
// Copy properties
if len(c.properties) > 0 {
for k, v := range c.properties {
if len(dc.properties) > 0 {
for k, v := range dc.properties {
jContext.properties[k] = v
}
}
// Set loggers for job
if err := setLoggers(func(lg logger.Interface) {
jContext.logger = lg
}, dep.ID); err != nil {
lg, err := createLoggers(t.Job().Info.JobID)
if err != nil {
return nil, err
}
if opCommandFunc, ok := dep.ExtraData["opCommandFunc"]; ok {
if reflect.TypeOf(opCommandFunc).Kind() == reflect.Func {
if funcRef, ok := opCommandFunc.(job.CheckOPCmdFunc); ok {
jContext.opCommandFunc = funcRef
}
}
}
if jContext.opCommandFunc == nil {
return nil, errors.New("failed to inject opCommandFunc")
}
if checkInFunc, ok := dep.ExtraData["checkInFunc"]; ok {
if reflect.TypeOf(checkInFunc).Kind() == reflect.Func {
if funcRef, ok := checkInFunc.(job.CheckInFunc); ok {
jContext.checkInFunc = funcRef
}
}
}
if jContext.checkInFunc == nil {
return nil, errors.New("failed to inject checkInFunc")
}
if launchJobFunc, ok := dep.ExtraData["launchJobFunc"]; ok {
if reflect.TypeOf(launchJobFunc).Kind() == reflect.Func {
if funcRef, ok := launchJobFunc.(job.LaunchJobFunc); ok {
jContext.launchJobFunc = funcRef
}
}
}
if jContext.launchJobFunc == nil {
return nil, errors.New("failed to inject launchJobFunc")
}
jContext.logger = lg
return jContext, nil
}
// Get implements the same method in env.JobContext interface
func (c *DefaultContext) Get(prop string) (interface{}, bool) {
v, ok := c.properties[prop]
// Get implements the same method in env.Context interface
func (dc *DefaultContext) Get(prop string) (interface{}, bool) {
v, ok := dc.properties[prop]
return v, ok
}
// SystemContext implements the same method in env.JobContext interface
func (c *DefaultContext) SystemContext() context.Context {
return c.sysContext
// SystemContext implements the same method in env.Context interface
func (dc *DefaultContext) SystemContext() context.Context {
return dc.sysContext
}
// Checkin is bridge func for reporting detailed status
func (c *DefaultContext) Checkin(status string) error {
if c.checkInFunc != nil {
c.checkInFunc(status)
} else {
return errors.New("nil check in function")
}
return nil
func (dc *DefaultContext) Checkin(status string) error {
return dc.tracker.CheckIn(status)
}
// OPCommand return the control operational command like stop/cancel if have
func (c *DefaultContext) OPCommand() (string, bool) {
if c.opCommandFunc != nil {
return c.opCommandFunc()
// OPCommand return the control operational command like stop if have
func (dc *DefaultContext) OPCommand() (job.OPCommand, bool) {
latest, err := dc.tracker.Status()
if err != nil {
return job.NilCommand, false
}
return "", false
if job.StoppedStatus == latest {
return job.StopCommand, true
}
return job.NilCommand, false
}
// GetLogger returns the logger
func (c *DefaultContext) GetLogger() logger.Interface {
return c.logger
func (dc *DefaultContext) GetLogger() logger.Interface {
return dc.logger
}
// LaunchJob launches sub jobs
func (c *DefaultContext) LaunchJob(req jmodel.JobRequest) (jmodel.JobStats, error) {
if c.launchJobFunc == nil {
return jmodel.JobStats{}, errors.New("nil launch job function")
}
return c.launchJobFunc(req)
// Tracker returns the tracker tracking the job attached with the context
func (dc *DefaultContext) Tracker() job.Tracker {
return dc.tracker
}

View File

@ -1,104 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package impl
import (
"context"
"fmt"
"os"
"testing"
"time"
"github.com/goharbor/harbor/src/jobservice/config"
"github.com/goharbor/harbor/src/jobservice/env"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/jobservice/models"
)
func TestDefaultContext(t *testing.T) {
defaultContext := NewDefaultContext(context.Background())
jobData := env.JobData{
ID: "fake_id",
Name: "DEMO",
Args: make(map[string]interface{}),
ExtraData: make(map[string]interface{}),
}
var opCmdFund job.CheckOPCmdFunc = func() (string, bool) {
return "stop", true
}
var checkInFunc job.CheckInFunc = func(msg string) {
fmt.Println(msg)
}
var launchJobFunc job.LaunchJobFunc = func(req models.JobRequest) (models.JobStats, error) {
return models.JobStats{
Stats: &models.JobStatData{
JobID: "fake_sub_job_id",
Status: "pending",
JobName: "DEMO",
JobKind: job.JobKindGeneric,
EnqueueTime: time.Now().Unix(),
UpdateTime: time.Now().Unix(),
},
}, nil
}
jobData.ExtraData["opCommandFunc"] = opCmdFund
jobData.ExtraData["checkInFunc"] = checkInFunc
jobData.ExtraData["launchJobFunc"] = launchJobFunc
oldLogConfig := config.DefaultConfig.JobLoggerConfigs
defer func() {
config.DefaultConfig.JobLoggerConfigs = oldLogConfig
}()
logSettings := map[string]interface{}{}
logSettings["base_dir"] = os.TempDir()
config.DefaultConfig.JobLoggerConfigs = []*config.LoggerConfig{
{
Level: "DEBUG",
Name: "FILE",
Settings: logSettings,
},
}
newJobContext, err := defaultContext.Build(jobData)
if err != nil {
t.Fatal(err)
}
cmd, ok := newJobContext.OPCommand()
if !ok || cmd != "stop" {
t.Fatalf("expect op command 'stop' but got %s", cmd)
}
if err := newJobContext.Checkin("hello"); err != nil {
t.Fatal(err)
}
stats, err := newJobContext.LaunchJob(models.JobRequest{})
if err != nil {
t.Fatal(err)
}
if stats.Stats.JobID != "fake_sub_job_id" {
t.Fatalf("expect job id 'fake_sub_job_id' but got %s", stats.Stats.JobID)
}
ctx := newJobContext.SystemContext()
if ctx == nil {
t.Fatal("got nil system context")
}
}

View File

@ -1,127 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package impl
import (
"errors"
"fmt"
"strings"
"time"
"github.com/goharbor/harbor/src/jobservice/env"
"github.com/goharbor/harbor/src/jobservice/errs"
"github.com/goharbor/harbor/src/jobservice/opm"
)
// DemoJob is the job to demostrate the job interface.
type DemoJob struct{}
// MaxFails is implementation of same method in Interface.
func (dj *DemoJob) MaxFails() uint {
return 3
}
// ShouldRetry ...
func (dj *DemoJob) ShouldRetry() bool {
return true
}
// Validate is implementation of same method in Interface.
func (dj *DemoJob) Validate(params map[string]interface{}) error {
if params == nil || len(params) == 0 {
return errors.New("parameters required for replication job")
}
name, ok := params["image"]
if !ok {
return errors.New("missing parameter 'image'")
}
if !strings.HasPrefix(name.(string), "demo") {
return fmt.Errorf("expected '%s' but got '%s'", "demo steven", name)
}
return nil
}
// Run the replication logic here.
func (dj *DemoJob) Run(ctx env.JobContext, params map[string]interface{}) error {
logger := ctx.GetLogger()
defer func() {
logger.Info("I'm finished, exit!")
}()
fmt.Println("I'm running")
logger.Infof("params: %#v\n", params)
logger.Infof("context: %#v\n", ctx)
if v, ok := ctx.Get("email_from"); ok {
fmt.Printf("Get prop form context: email_from=%s\n", v)
}
/*if u, err := dao.GetUser(models.User{}); err == nil {
fmt.Printf("u=%#+v\n", u)
}*/
logger.Info("check in 30%")
ctx.Checkin("30%")
time.Sleep(2 * time.Second)
logger.Warning("check in 60%")
ctx.Checkin("60%")
time.Sleep(2 * time.Second)
logger.Debug("check in 100%")
ctx.Checkin("100%")
time.Sleep(1 * time.Second)
// HOLD ON FOR A WHILE
logger.Error("Holding for 5 sec")
<-time.After(5 * time.Second)
if cmd, ok := ctx.OPCommand(); ok {
logger.Infof("cmd=%s\n", cmd)
fmt.Printf("Receive OP command: %s\n", cmd)
if cmd == opm.CtlCommandCancel {
logger.Info("exit for receiving cancel signal")
return errs.JobCancelledError()
}
logger.Info("exit for receiving stop signal")
return errs.JobStoppedError()
}
/*fmt.Println("Launch sub job")
jobParams := make(map[string]interface{})
jobParams["image"] = "demo:1.7"
subDemoJob := models.JobRequest{
Job: &models.JobData{
Name: "DEMO",
Parameters: jobParams,
Metadata: &models.JobMetadata{
JobKind: job.JobKindGeneric,
},
},
}
subJob, err := ctx.LaunchJob(subDemoJob)
if err != nil {
fmt.Printf("Create sub job failed with error: %s\n", err)
logger.Error(err)
return
}
fmt.Printf("Sub job: %v", subJob)*/
fmt.Println("I'm close to end")
return nil
}

View File

@ -23,7 +23,7 @@ import (
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/config"
"github.com/goharbor/harbor/src/common/registryctl"
"github.com/goharbor/harbor/src/jobservice/env"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/jobservice/logger"
"github.com/goharbor/harbor/src/registryctl/client"
)
@ -56,12 +56,12 @@ func (gc *GarbageCollector) ShouldRetry() bool {
}
// Validate implements the interface in job/Interface
func (gc *GarbageCollector) Validate(params map[string]interface{}) error {
func (gc *GarbageCollector) Validate(params job.Parameters) error {
return nil
}
// Run implements the interface in job/Interface
func (gc *GarbageCollector) Run(ctx env.JobContext, params map[string]interface{}) error {
func (gc *GarbageCollector) Run(ctx job.Context, params job.Parameters) error {
if err := gc.init(ctx, params); err != nil {
return err
}
@ -93,12 +93,12 @@ func (gc *GarbageCollector) Run(ctx env.JobContext, params map[string]interface{
return nil
}
func (gc *GarbageCollector) init(ctx env.JobContext, params map[string]interface{}) error {
func (gc *GarbageCollector) init(ctx job.Context, params job.Parameters) error {
registryctl.Init()
gc.registryCtlClient = registryctl.RegistryCtlClient
gc.logger = ctx.GetLogger()
errTpl := "Failed to get required property: %s"
errTpl := "failed to get required property: %s"
if v, ok := ctx.Get(common.CoreURL); ok && len(v.(string)) > 0 {
gc.CoreURL = v.(string)
} else {
@ -165,7 +165,7 @@ func (gc *GarbageCollector) cleanCache() error {
func delKeys(con redis.Conn, pattern string) error {
iter := 0
keys := []string{}
keys := make([]string, 0)
for {
arr, err := redis.Values(con.Do("SCAN", iter, "MATCH", pattern))
if err != nil {

View File

@ -18,8 +18,7 @@ import (
"encoding/json"
"fmt"
"github.com/goharbor/harbor/src/jobservice/env"
"github.com/goharbor/harbor/src/jobservice/opm"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/replication/model"
"github.com/goharbor/harbor/src/replication/transfer"
@ -51,13 +50,13 @@ func (r *Replication) ShouldRetry() bool {
}
// Validate does nothing
func (r *Replication) Validate(params map[string]interface{}) error {
func (r *Replication) Validate(params job.Parameters) error {
return nil
}
// Run gets the corresponding transfer according to the resource type
// and calls its function to do the real work
func (r *Replication) Run(ctx env.JobContext, params map[string]interface{}) error {
func (r *Replication) Run(ctx job.Context, params job.Parameters) error {
logger := ctx.GetLogger()
src, dst, err := parseParams(params)
@ -77,15 +76,15 @@ func (r *Replication) Run(ctx env.JobContext, params map[string]interface{}) err
if !exist {
return false
}
return cmd == opm.CtlCommandStop
return cmd == job.StopCommand
}
transfer, err := factory(ctx.GetLogger(), stopFunc)
trans, err := factory(ctx.GetLogger(), stopFunc)
if err != nil {
logger.Errorf("failed to create transfer: %v", err)
return err
}
return transfer.Transfer(src, dst)
return trans.Transfer(src, dst)
}
func parseParams(params map[string]interface{}) (*model.Resource, *model.Resource, error) {

View File

@ -22,9 +22,7 @@ import (
common_http "github.com/goharbor/harbor/src/common/http"
"github.com/goharbor/harbor/src/common/http/modifier/auth"
reg "github.com/goharbor/harbor/src/common/utils/registry"
"github.com/goharbor/harbor/src/jobservice/env"
"github.com/goharbor/harbor/src/jobservice/errs"
"github.com/goharbor/harbor/src/jobservice/opm"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/replication/model"
)
@ -32,7 +30,7 @@ import (
// a scheduler when submitting it as a scheduled job. It receives
// a URL and data, and post the data to the URL when it is running
type Scheduler struct {
ctx env.JobContext
ctx job.Context
}
// ShouldRetry ...
@ -46,15 +44,15 @@ func (s *Scheduler) MaxFails() uint {
}
// Validate ....
func (s *Scheduler) Validate(params map[string]interface{}) error {
func (s *Scheduler) Validate(params job.Parameters) error {
return nil
}
// Run ...
func (s *Scheduler) Run(ctx env.JobContext, params map[string]interface{}) error {
func (s *Scheduler) Run(ctx job.Context, params job.Parameters) error {
cmd, exist := ctx.OPCommand()
if exist && cmd == opm.CtlCommandStop {
return errs.JobStoppedError()
if exist && cmd == job.StopCommand {
return nil
}
logger := ctx.GetLogger()

View File

@ -0,0 +1,87 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sample
import (
"errors"
"fmt"
"strings"
"time"
"github.com/goharbor/harbor/src/jobservice/job"
)
// Job is a sample to show how to implement a job.
type Job struct{}
// MaxFails is implementation of same method in Interface.
func (j *Job) MaxFails() uint {
return 3
}
// ShouldRetry ...
func (j *Job) ShouldRetry() bool {
return true
}
// Validate is implementation of same method in Interface.
func (j *Job) Validate(params job.Parameters) error {
if params == nil || len(params) == 0 {
return errors.New("parameters required for replication job")
}
name, ok := params["image"]
if !ok {
return errors.New("missing parameter 'image'")
}
if !strings.HasPrefix(name.(string), "demo") {
return fmt.Errorf("expected '%s' but got '%s'", "demo *", name)
}
return nil
}
// Run the replication logic here.
func (j *Job) Run(ctx job.Context, params job.Parameters) error {
logger := ctx.GetLogger()
logger.Info("Sample job starting")
defer func() {
logger.Info("Sample job exit")
}()
logger.Infof("Params: %#v\n", params)
if v, ok := ctx.Get("sample"); ok {
fmt.Printf("Get prop form context: sample=%s\n", v)
}
ctx.Checkin("progress data: %30")
<-time.After(1 * time.Second)
ctx.Checkin("progress data: %60")
// HOLD ON FOR A WHILE
logger.Warning("Holding for 10 seconds")
<-time.After(10 * time.Second)
if cmd, ok := ctx.OPCommand(); ok {
if cmd == job.StopCommand {
logger.Info("Exit for receiving stop signal")
return nil
}
}
// Successfully exit
return nil
}

View File

@ -25,7 +25,7 @@ import (
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/jobservice/env"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/jobservice/job/impl/utils"
)
@ -50,7 +50,7 @@ func (sa *All) ShouldRetry() bool {
}
// Validate implements the interface in job/Interface
func (sa *All) Validate(params map[string]interface{}) error {
func (sa *All) Validate(params job.Parameters) error {
if len(params) > 0 {
return fmt.Errorf("the parms should be empty for scan all job")
}
@ -58,7 +58,7 @@ func (sa *All) Validate(params map[string]interface{}) error {
}
// Run implements the interface in job/Interface
func (sa *All) Run(ctx env.JobContext, params map[string]interface{}) error {
func (sa *All) Run(ctx job.Context, params job.Parameters) error {
logger := ctx.GetLogger()
logger.Info("Scanning all the images in the registry")
err := sa.init(ctx)
@ -107,7 +107,7 @@ func (sa *All) Run(ctx env.JobContext, params map[string]interface{}) error {
return nil
}
func (sa *All) init(ctx env.JobContext) error {
func (sa *All) init(ctx job.Context) error {
if v, err := getAttrFromCtx(ctx, common.RegistryURL); err == nil {
sa.registryURL = v
} else {
@ -133,9 +133,9 @@ func (sa *All) init(ctx env.JobContext) error {
return nil
}
func getAttrFromCtx(ctx env.JobContext, key string) (string, error) {
func getAttrFromCtx(ctx job.Context, key string) (string, error) {
if v, ok := ctx.Get(key); ok && len(v.(string)) > 0 {
return v.(string), nil
}
return "", fmt.Errorf("Failed to get required property: %s", key)
return "", fmt.Errorf("failed to get required property: %s", key)
}

View File

@ -24,11 +24,11 @@ import (
"github.com/docker/distribution/manifest/schema2"
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/job"
cjob "github.com/goharbor/harbor/src/common/job"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils/clair"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/jobservice/env"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/jobservice/job/impl/utils"
)
@ -51,12 +51,12 @@ func (cj *ClairJob) ShouldRetry() bool {
}
// Validate implements the interface in job/Interface
func (cj *ClairJob) Validate(params map[string]interface{}) error {
func (cj *ClairJob) Validate(params job.Parameters) error {
return nil
}
// Run implements the interface in job/Interface
func (cj *ClairJob) Run(ctx env.JobContext, params map[string]interface{}) error {
func (cj *ClairJob) Run(ctx job.Context, params job.Parameters) error {
logger := ctx.GetLogger()
if err := cj.init(ctx); err != nil {
logger.Errorf("Failed to initialize the job, error: %v", err)
@ -114,8 +114,8 @@ func (cj *ClairJob) Run(ctx env.JobContext, params map[string]interface{}) error
return err
}
func (cj *ClairJob) init(ctx env.JobContext) error {
errTpl := "Failed to get required property: %s"
func (cj *ClairJob) init(ctx job.Context) error {
errTpl := "failed to get required property: %s"
if v, ok := ctx.Get(common.RegistryURL); ok && len(v.(string)) > 0 {
cj.registryURL = v.(string)
} else {
@ -140,8 +140,8 @@ func (cj *ClairJob) init(ctx env.JobContext) error {
return nil
}
func transformParam(params map[string]interface{}) (*job.ScanJobParms, error) {
res := job.ScanJobParms{}
func transformParam(params job.Parameters) (*cjob.ScanJobParms, error) {
res := cjob.ScanJobParms{}
parmsBytes, err := json.Marshal(params)
if err != nil {
return nil, err
@ -151,7 +151,7 @@ func transformParam(params map[string]interface{}) (*job.ScanJobParms, error) {
}
func prepareLayers(payload []byte, registryURL, repo, tk string) ([]models.ClairLayer, error) {
layers := []models.ClairLayer{}
layers := make([]models.ClairLayer, 0)
manifest, _, err := distribution.UnmarshalManifest(schema2.MediaTypeManifest, payload)
if err != nil {
return layers, err
@ -160,7 +160,7 @@ func prepareLayers(payload []byte, registryURL, repo, tk string) ([]models.Clair
// form the chain by using the digests of all parent layers in the image, such that if another image is built on top of this image the layer name can be re-used.
shaChain := ""
for _, d := range manifest.References() {
if d.MediaType == schema2.MediaTypeConfig {
if d.MediaType == schema2.MediaTypeImageConfig {
continue
}
shaChain += string(d.Digest) + "-"

View File

@ -14,22 +14,6 @@
package job
import (
"github.com/goharbor/harbor/src/jobservice/env"
"github.com/goharbor/harbor/src/jobservice/models"
)
// CheckOPCmdFunc is the function to check if the related operation commands
// like STOP or CANCEL is fired for the specified job. If yes, return the
// command code for job to determine if take corresponding action.
type CheckOPCmdFunc func() (string, bool)
// CheckInFunc is designed for job to report more detailed progress info
type CheckInFunc func(message string)
// LaunchJobFunc is designed to launch sub jobs in the job
type LaunchJobFunc func(req models.JobRequest) (models.JobStats, error)
// Interface defines the related injection and run entry methods.
type Interface interface {
// Declare how many times the job can be retried if failed.
@ -38,7 +22,7 @@ type Interface interface {
// uint: the failure count allowed. If it is set to 0, then default value 4 is used.
MaxFails() uint
// Tell the worker pool if retry the failed job when the fails is
// Tell the worker worker if retry the failed job when the fails is
// still less that the number declared by the method 'MaxFails'.
//
// Returns:
@ -49,16 +33,16 @@ type Interface interface {
//
// Return:
// error if parameters are not valid. NOTES: If no parameters needed, directly return nil.
Validate(params map[string]interface{}) error
Validate(params Parameters) error
// Run the business logic here.
// The related arguments will be injected by the workerpool.
//
// ctx env.JobContext : Job execution context.
// ctx Context : Job execution context.
// params map[string]interface{} : parameters with key-pair style for the job execution.
//
// Returns:
// error if failed to run. NOTES: If job is stopped or cancelled, a specified error should be returned
//
Run(ctx env.JobContext, params map[string]interface{}) error
Run(ctx Context, params Parameters) error
}

View File

@ -1,32 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package job
const (
// JobStatusPending : job status pending
JobStatusPending = "Pending"
// JobStatusRunning : job status running
JobStatusRunning = "Running"
// JobStatusStopped : job status stopped
JobStatusStopped = "Stopped"
// JobStatusCancelled : job status cancelled
JobStatusCancelled = "Cancelled"
// JobStatusError : job status error
JobStatusError = "Error"
// JobStatusSuccess : job status success
JobStatusSuccess = "Success"
// JobStatusScheduled : job status scheduled
JobStatusScheduled = "Scheduled"
)

View File

@ -15,10 +15,10 @@
package job
const (
// JobKindGeneric : Kind of generic job
JobKindGeneric = "Generic"
// JobKindScheduled : Kind of scheduled job
JobKindScheduled = "Scheduled"
// JobKindPeriodic : Kind of periodic job
JobKindPeriodic = "Periodic"
// KindGeneric : Kind of generic job
KindGeneric = "Generic"
// KindScheduled : Kind of scheduled job
KindScheduled = "Scheduled"
// KindPeriodic : Kind of periodic job
KindPeriodic = "Periodic"
)

View File

@ -0,0 +1,33 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package job
// Define the register name constants of known jobs
const (
// SampleJob is name of demo job
SampleJob = "DEMO"
// ImageScanJob is name of scan job it will be used as key to register to job service.
ImageScanJob = "IMAGE_SCAN"
// ImageScanAllJob is the name of "scanall" job in job service
ImageScanAllJob = "IMAGE_SCAN_ALL"
// ImageGC the name of image garbage collection job in job service
ImageGC = "IMAGE_GC"
// Replication : the name of the replication job in job service
Replication = "REPLICATION"
// ReplicationScheduler : the name of the replication scheduler job in job service
ReplicationScheduler = "IMAGE_REPLICATE"
)

View File

@ -0,0 +1,133 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package job
import (
"github.com/goharbor/harbor/src/jobservice/common/utils"
"github.com/pkg/errors"
)
// Parameters for job execution.
type Parameters map[string]interface{}
// Request is the request of launching a job.
type Request struct {
Job *RequestBody `json:"job"`
}
// RequestBody keeps the basic info.
type RequestBody struct {
Name string `json:"name"`
Parameters Parameters `json:"parameters"`
Metadata *Metadata `json:"metadata"`
StatusHook string `json:"status_hook"`
}
// Metadata stores the metadata of job.
type Metadata struct {
JobKind string `json:"kind"`
ScheduleDelay uint64 `json:"schedule_delay,omitempty"`
Cron string `json:"cron_spec,omitempty"`
IsUnique bool `json:"unique"`
}
// Stats keeps the result of job launching.
type Stats struct {
Info *StatsInfo `json:"job"`
}
// StatsInfo keeps the stats of job
type StatsInfo struct {
JobID string `json:"id"`
Status string `json:"status"`
JobName string `json:"name"`
JobKind string `json:"kind"`
IsUnique bool `json:"unique"`
RefLink string `json:"ref_link,omitempty"`
CronSpec string `json:"cron_spec,omitempty"`
EnqueueTime int64 `json:"enqueue_time"`
UpdateTime int64 `json:"update_time"`
RunAt int64 `json:"run_at,omitempty"`
CheckIn string `json:"check_in,omitempty"`
CheckInAt int64 `json:"check_in_at,omitempty"`
DieAt int64 `json:"die_at,omitempty"`
WebHookURL string `json:"web_hook_url,omitempty"`
UpstreamJobID string `json:"upstream_job_id,omitempty"` // Ref the upstream job if existing
NumericPID int64 `json:"numeric_policy_id,omitempty"` // The numeric policy ID of the periodic job
Parameters Parameters `json:"parameters,omitempty"`
}
// ActionRequest defines for triggering job action like stop/cancel.
type ActionRequest struct {
Action string `json:"action"`
}
// StatusChange is designed for reporting the status change via hook.
type StatusChange struct {
JobID string `json:"job_id"`
Status string `json:"status"`
CheckIn string `json:"check_in,omitempty"`
Metadata *StatsInfo `json:"metadata,omitempty"`
}
// SimpleStatusChange only keeps job ID and the target status
type SimpleStatusChange struct {
JobID string `json:"job_id"`
TargetStatus string `json:"target_status"`
}
// Validate the job stats
func (st *Stats) Validate() error {
if st.Info == nil {
return errors.New("nil stats body")
}
if utils.IsEmptyStr(st.Info.JobID) {
return errors.New("missing job ID in job stats")
}
if utils.IsEmptyStr(st.Info.JobName) {
return errors.New("missing job name in job stats")
}
if utils.IsEmptyStr(st.Info.JobKind) {
return errors.New("missing job name in job stats")
}
if st.Info.JobKind != KindGeneric &&
st.Info.JobKind != KindPeriodic &&
st.Info.JobKind != KindScheduled {
return errors.Errorf("job kind is not supported: %s", st.Info.JobKind)
}
status := Status(st.Info.Status)
if err := status.Validate(); err != nil {
return err
}
if st.Info.JobKind == KindPeriodic {
if utils.IsEmptyStr(st.Info.CronSpec) {
return errors.New("missing cron spec for periodic job")
}
}
if st.Info.JobKind == KindScheduled {
if st.Info.RunAt == 0 {
return errors.New("enqueue timestamp missing for scheduled job")
}
}
return nil
}

View File

@ -12,4 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package pool
package job
const (
// StopCommand is const for stop command
StopCommand OPCommand = "stop"
// NilCommand is const for a nil command
NilCommand OPCommand = "nil"
)
// OPCommand is the type of job operation commands
type OPCommand string
// IsStop return if the op command is stop
func (oc OPCommand) IsStop() bool {
return oc == "stop"
}

View File

@ -0,0 +1,82 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package job
import "fmt"
const (
// PendingStatus : job status pending
PendingStatus Status = "Pending"
// RunningStatus : job status running
RunningStatus Status = "Running"
// StoppedStatus : job status stopped
StoppedStatus Status = "Stopped"
// ErrorStatus : job status error
ErrorStatus Status = "Error"
// SuccessStatus : job status success
SuccessStatus Status = "Success"
// ScheduledStatus : job status scheduled
ScheduledStatus Status = "Scheduled"
)
// Status of job
type Status string
// Validate the status
// If it's valid, then return nil error
// otherwise an non nil error is returned
func (s Status) Validate() error {
if s.Code() == -1 {
return fmt.Errorf("%s is not valid job status", s)
}
return nil
}
// Code of job status
func (s Status) Code() int {
switch s {
case "Pending":
return 0
case "Scheduled":
return 1
case "Running":
return 2
// All final status share the same code
// Each job will have only 1 final status
case "Stopped":
return 3
case "Error":
return 3
case "Success":
return 3
default:
}
return -1
}
// Compare the two job status
// if < 0, s before another status
// if == 0, same status
// if > 0, s after another status
func (s Status) Compare(another Status) int {
return s.Code() - another.Code()
}
// String returns the raw string value of the status
func (s Status) String() string {
return string(s)
}

View File

@ -0,0 +1,748 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package job
import (
"context"
"encoding/json"
"github.com/goharbor/harbor/src/jobservice/common/query"
"github.com/goharbor/harbor/src/jobservice/common/rds"
"github.com/goharbor/harbor/src/jobservice/common/utils"
"github.com/goharbor/harbor/src/jobservice/errs"
"github.com/goharbor/harbor/src/jobservice/logger"
"github.com/gomodule/redigo/redis"
"github.com/pkg/errors"
"math/rand"
"strconv"
"time"
)
const (
// Try best to keep the job stats data but anyway clear it after a long time
statDataExpireTime = 180 * 24 * 3600
)
// Tracker is designed to track the life cycle of the job described by the stats
// The status change is linear and then has strict preorder and successor
// Check should be enforced before switching
//
// Pending is default status when creating job, so no need to switch
type Tracker interface {
// Save the job stats which tracked by this tracker to the backend
//
// Return:
// none nil error returned if any issues happened
Save() error
// Load the job stats which tracked by this tracker with the backend data
//
// Return:
// none nil error returned if any issues happened
Load() error
// Get the job stats which tracked by this tracker
//
// Returns:
// *models.Info : job stats data
Job() *Stats
// Update the properties of the job stats
//
// fieldAndValues ...interface{} : One or more properties being updated
//
// Returns:
// error if update failed
Update(fieldAndValues ...interface{}) error
// Executions returns the executions of the job tracked by this tracker.
// Please pay attention, this only for periodic job.
//
// Returns:
// job execution IDs matched the query
// the total number
// error if any issues happened
Executions(q *query.Parameter) ([]string, int64, error)
// NumericID returns the numeric ID of periodic job.
// Please pay attention, this only for periodic job.
NumericID() (int64, error)
// Mark the periodic job execution to done by update the score
// of the relation between its periodic policy and execution to -1.
PeriodicExecutionDone() error
// Check in message
CheckIn(message string) error
// Update status with retry enabled
UpdateStatusWithRetry(targetStatus Status) error
// The current status of job
Status() (Status, error)
// Expire the job stats data
Expire() error
// Switch status to running
Run() error
// Switch status to stopped
Stop() error
// Switch the status to error
Fail() error
// Switch the status to success
Succeed() error
}
// basicTracker implements Tracker interface based on redis
type basicTracker struct {
namespace string
context context.Context
pool *redis.Pool
jobID string
jobStats *Stats
callback HookCallback
}
// NewBasicTrackerWithID builds a tracker with the provided job ID
func NewBasicTrackerWithID(
ctx context.Context,
jobID string,
ns string,
pool *redis.Pool,
callback HookCallback,
) Tracker {
return &basicTracker{
namespace: ns,
context: ctx,
pool: pool,
jobID: jobID,
callback: callback,
}
}
// NewBasicTrackerWithStats builds a tracker with the provided job stats
func NewBasicTrackerWithStats(
ctx context.Context,
stats *Stats,
ns string,
pool *redis.Pool,
callback HookCallback,
) Tracker {
return &basicTracker{
namespace: ns,
context: ctx,
pool: pool,
jobStats: stats,
jobID: stats.Info.JobID,
callback: callback,
}
}
// Refresh the job stats which tracked by this tracker
func (bt *basicTracker) Load() error {
return bt.retrieve()
}
// Job returns the job stats which tracked by this tracker
func (bt *basicTracker) Job() *Stats {
return bt.jobStats
}
// Update the properties of the job stats
func (bt *basicTracker) Update(fieldAndValues ...interface{}) error {
if len(fieldAndValues) == 0 {
return errors.New("no properties specified to update")
}
conn := bt.pool.Get()
defer func() {
_ = conn.Close()
}()
key := rds.KeyJobStats(bt.namespace, bt.jobID)
args := []interface{}{"update_time", time.Now().Unix()} // update timestamp
args = append(args, fieldAndValues...)
return rds.HmSet(conn, key, args...)
}
// Status returns the current status of job tracked by this tracker
func (bt *basicTracker) Status() (Status, error) {
// Retrieve the latest status again in case get the outdated one.
conn := bt.pool.Get()
defer func() {
_ = conn.Close()
}()
rootKey := rds.KeyJobStats(bt.namespace, bt.jobID)
return getStatus(conn, rootKey)
}
// NumericID returns the numeric ID of the periodic job
func (bt *basicTracker) NumericID() (int64, error) {
if bt.jobStats.Info.NumericPID > 0 {
return bt.jobStats.Info.NumericPID, nil
}
return -1, errors.Errorf("numeric ID not found for job: %s", bt.jobID)
}
// PeriodicExecutionDone mark the execution done
func (bt *basicTracker) PeriodicExecutionDone() error {
if utils.IsEmptyStr(bt.jobStats.Info.UpstreamJobID) {
return errors.Errorf("%s is not periodic job execution", bt.jobID)
}
key := rds.KeyUpstreamJobAndExecutions(bt.namespace, bt.jobStats.Info.UpstreamJobID)
conn := bt.pool.Get()
defer func() {
_ = conn.Close()
}()
args := []interface{}{key, "XX", -1, bt.jobID}
_, err := conn.Do("ZADD", args...)
return err
}
// Check in message
func (bt *basicTracker) CheckIn(message string) error {
if utils.IsEmptyStr(message) {
return errors.New("check in error: empty message")
}
now := time.Now().Unix()
current := Status(bt.jobStats.Info.Status)
bt.refresh(current, message)
err := bt.fireHookEvent(current, message)
err = bt.Update(
"check_in", message,
"check_in_at", now,
"update_time", now,
)
return err
}
// Executions of the tracked job
func (bt *basicTracker) Executions(q *query.Parameter) ([]string, int64, error) {
if bt.jobStats.Info.JobKind != KindPeriodic {
return nil, 0, errors.New("only periodic job has executions")
}
conn := bt.pool.Get()
defer func() {
_ = conn.Close()
}()
key := rds.KeyUpstreamJobAndExecutions(bt.namespace, bt.jobID)
// Query executions by "non stopped"
if nonStoppedOnly, ok := q.Extras.Get(query.ExtraParamKeyNonStoppedOnly); ok {
if v, yes := nonStoppedOnly.(bool); yes && v {
return queryExecutions(conn, key, q)
}
}
// Pagination
var pageNumber, pageSize uint = 1, query.DefaultPageSize
if q != nil {
if q.PageNumber > 0 {
pageNumber = q.PageNumber
}
if q.PageSize > 0 {
pageSize = q.PageSize
}
}
// Get total first
total, err := redis.Int64(conn.Do("ZCARD", key))
if err != nil {
return nil, 0, err
}
// No items
result := make([]string, 0)
if total == 0 || (int64)((pageNumber-1)*pageSize) >= total {
return result, total, nil
}
min, max := (pageNumber-1)*pageSize, pageNumber*pageSize-1
args := []interface{}{key, min, max}
list, err := redis.Values(conn.Do("ZREVRANGE", args...))
if err != nil {
return nil, 0, err
}
for _, item := range list {
if eID, ok := item.([]byte); ok {
result = append(result, string(eID))
}
}
return result, total, nil
}
// Expire job stats
func (bt *basicTracker) Expire() error {
conn := bt.pool.Get()
defer func() {
_ = conn.Close()
}()
key := rds.KeyJobStats(bt.namespace, bt.jobID)
num, err := conn.Do("EXPIRE", key, statDataExpireTime)
if err != nil {
return err
}
if num == 0 {
return errors.Errorf("job stats for expiring %s does not exist", bt.jobID)
}
return nil
}
// Run job
// Either one is failed, the final return will be marked as failed.
func (bt *basicTracker) Run() error {
err := bt.compareAndSet(RunningStatus)
if !errs.IsStatusMismatchError(err) {
bt.refresh(RunningStatus)
if er := bt.fireHookEvent(RunningStatus); err == nil && er != nil {
return er
}
}
return err
}
// Stop job
// Stop is final status, if failed to do, retry should be enforced.
// Either one is failed, the final return will be marked as failed.
func (bt *basicTracker) Stop() error {
err := bt.UpdateStatusWithRetry(StoppedStatus)
if !errs.IsStatusMismatchError(err) {
bt.refresh(StoppedStatus)
if er := bt.fireHookEvent(StoppedStatus); err == nil && er != nil {
return er
}
}
return err
}
// Fail job
// Fail is final status, if failed to do, retry should be enforced.
// Either one is failed, the final return will be marked as failed.
func (bt *basicTracker) Fail() error {
err := bt.UpdateStatusWithRetry(ErrorStatus)
if !errs.IsStatusMismatchError(err) {
bt.refresh(ErrorStatus)
if er := bt.fireHookEvent(ErrorStatus); err == nil && er != nil {
return er
}
}
return err
}
// Succeed job
// Succeed is final status, if failed to do, retry should be enforced.
// Either one is failed, the final return will be marked as failed.
func (bt *basicTracker) Succeed() error {
err := bt.UpdateStatusWithRetry(SuccessStatus)
if !errs.IsStatusMismatchError(err) {
bt.refresh(SuccessStatus)
if er := bt.fireHookEvent(SuccessStatus); err == nil && er != nil {
return er
}
}
return err
}
// Save the stats of job tracked by this tracker
func (bt *basicTracker) Save() (err error) {
if bt.jobStats == nil {
return errors.New("nil job stats to save")
}
conn := bt.pool.Get()
defer func() {
_ = conn.Close()
}()
// Alliance
stats := bt.jobStats
key := rds.KeyJobStats(bt.namespace, stats.Info.JobID)
args := make([]interface{}, 0)
args = append(args, key)
args = append(args,
"id", stats.Info.JobID,
"name", stats.Info.JobName,
"kind", stats.Info.JobKind,
"unique", stats.Info.IsUnique,
"status", stats.Info.Status,
"ref_link", stats.Info.RefLink,
"enqueue_time", stats.Info.EnqueueTime,
"run_at", stats.Info.RunAt,
"cron_spec", stats.Info.CronSpec,
"web_hook_url", stats.Info.WebHookURL,
"numeric_policy_id", stats.Info.NumericPID,
)
if stats.Info.CheckInAt > 0 && !utils.IsEmptyStr(stats.Info.CheckIn) {
args = append(args,
"check_in", stats.Info.CheckIn,
"check_in_at", stats.Info.CheckInAt,
)
}
if stats.Info.DieAt > 0 {
args = append(args, "die_at", stats.Info.DieAt)
}
if !utils.IsEmptyStr(stats.Info.UpstreamJobID) {
args = append(args, "upstream_job_id", stats.Info.UpstreamJobID)
}
if len(stats.Info.Parameters) > 0 {
if bytes, err := json.Marshal(&stats.Info.Parameters); err == nil {
args = append(args, "parameters", string(bytes))
}
}
// Set update timestamp
args = append(args, "update_time", time.Now().Unix())
// Do it in a transaction
err = conn.Send("MULTI")
err = conn.Send("HMSET", args...)
// If job kind is periodic job, expire time should not be set
// If job kind is scheduled job, expire time should be runAt+
if stats.Info.JobKind != KindPeriodic {
var expireTime int64 = statDataExpireTime
if stats.Info.JobKind == KindScheduled {
nowTime := time.Now().Unix()
future := stats.Info.RunAt - nowTime
if future > 0 {
expireTime += future
}
}
expireTime += rand.Int63n(15) // Avoid lots of keys being expired at the same time
err = conn.Send("EXPIRE", key, expireTime)
}
// Link with its upstream job if upstream job ID exists for future querying
if !utils.IsEmptyStr(stats.Info.UpstreamJobID) {
k := rds.KeyUpstreamJobAndExecutions(bt.namespace, stats.Info.UpstreamJobID)
zargs := []interface{}{k, "NX", stats.Info.RunAt, stats.Info.JobID}
err = conn.Send("ZADD", zargs...)
}
// Check command send error only once here before executing
if err != nil {
return
}
_, err = conn.Do("EXEC")
return
}
// UpdateStatusWithRetry updates the status with retry enabled.
// If update status failed, then retry if permitted.
// Try best to do
func (bt *basicTracker) UpdateStatusWithRetry(targetStatus Status) error {
err := bt.compareAndSet(targetStatus)
if err != nil {
// Status mismatching error will be ignored
if !errs.IsStatusMismatchError(err) {
// Push to the retrying Q
if er := bt.pushToQueueForRetry(targetStatus); er != nil {
logger.Errorf("push job status update request to retry queue error: %s", er)
// If failed to put it into the retrying Q in case, let's downgrade to retry in current process
// by recursively call in goroutines.
bt.retryUpdateStatus(targetStatus)
}
}
}
return err
}
// Refresh the job stats in mem
func (bt *basicTracker) refresh(targetStatus Status, checkIn ...string) {
now := time.Now().Unix()
bt.jobStats.Info.Status = targetStatus.String()
if len(checkIn) > 0 {
bt.jobStats.Info.CheckIn = checkIn[0]
bt.jobStats.Info.CheckInAt = now
}
bt.jobStats.Info.UpdateTime = now
}
// FireHookEvent fires the hook event
func (bt *basicTracker) fireHookEvent(status Status, checkIn ...string) error {
// Check if hook URL is registered
if utils.IsEmptyStr(bt.jobStats.Info.WebHookURL) {
// Do nothing
return nil
}
change := &StatusChange{
JobID: bt.jobID,
Status: status.String(),
Metadata: bt.jobStats.Info,
}
if len(checkIn) > 0 {
change.CheckIn = checkIn[0]
}
// If callback is registered, then trigger now
if bt.callback != nil {
return bt.callback(bt.jobStats.Info.WebHookURL, change)
}
return nil
}
func (bt *basicTracker) pushToQueueForRetry(targetStatus Status) error {
simpleStatusChange := &SimpleStatusChange{
JobID: bt.jobID,
TargetStatus: targetStatus.String(),
}
rawJSON, err := json.Marshal(simpleStatusChange)
if err != nil {
return err
}
conn := bt.pool.Get()
defer func() {
_ = conn.Close()
}()
key := rds.KeyStatusUpdateRetryQueue(bt.namespace)
args := []interface{}{key, "NX", time.Now().Unix(), rawJSON}
_, err = conn.Do("ZADD", args...)
return err
}
func (bt *basicTracker) retryUpdateStatus(targetStatus Status) {
go func() {
select {
case <-time.After(time.Duration(5)*time.Minute + time.Duration(rand.Int31n(13))*time.Second):
// Check the update timestamp
if time.Now().Unix()-bt.jobStats.Info.UpdateTime < statDataExpireTime-24*3600 {
if err := bt.compareAndSet(targetStatus); err != nil {
logger.Errorf("Retry to update job status error: %s", err)
bt.retryUpdateStatus(targetStatus)
}
// Success
}
return
case <-bt.context.Done():
return // terminated
}
}()
}
func (bt *basicTracker) compareAndSet(targetStatus Status) error {
conn := bt.pool.Get()
defer func() {
_ = conn.Close()
}()
rootKey := rds.KeyJobStats(bt.namespace, bt.jobID)
st, err := getStatus(conn, rootKey)
if err != nil {
return err
}
diff := st.Compare(targetStatus)
if diff > 0 {
return errs.StatusMismatchError(st.String(), targetStatus.String())
}
if diff == 0 {
// Desired matches actual
return nil
}
return setStatus(conn, rootKey, targetStatus)
}
// retrieve the stats of job tracked by this tracker from the backend data
func (bt *basicTracker) retrieve() error {
conn := bt.pool.Get()
defer func() {
_ = conn.Close()
}()
key := rds.KeyJobStats(bt.namespace, bt.jobID)
vals, err := redis.Strings(conn.Do("HGETALL", key))
if err != nil {
return err
}
if vals == nil || len(vals) == 0 {
return errs.NoObjectFoundError(bt.jobID)
}
res := &Stats{
Info: &StatsInfo{},
}
for i, l := 0, len(vals); i < l; i = i + 2 {
prop := vals[i]
value := vals[i+1]
switch prop {
case "id":
res.Info.JobID = value
break
case "name":
res.Info.JobName = value
break
case "kind":
res.Info.JobKind = value
case "unique":
v, err := strconv.ParseBool(value)
if err != nil {
v = false
}
res.Info.IsUnique = v
case "status":
res.Info.Status = value
break
case "ref_link":
res.Info.RefLink = value
break
case "enqueue_time":
v, _ := strconv.ParseInt(value, 10, 64)
res.Info.EnqueueTime = v
break
case "update_time":
v, _ := strconv.ParseInt(value, 10, 64)
res.Info.UpdateTime = v
break
case "run_at":
v, _ := strconv.ParseInt(value, 10, 64)
res.Info.RunAt = v
break
case "check_in_at":
v, _ := strconv.ParseInt(value, 10, 64)
res.Info.CheckInAt = v
break
case "check_in":
res.Info.CheckIn = value
break
case "cron_spec":
res.Info.CronSpec = value
break
case "web_hook_url":
res.Info.WebHookURL = value
break
case "die_at":
v, _ := strconv.ParseInt(value, 10, 64)
res.Info.DieAt = v
case "upstream_job_id":
res.Info.UpstreamJobID = value
break
case "numeric_policy_id":
v, _ := strconv.ParseInt(value, 10, 64)
res.Info.NumericPID = v
break
case "parameters":
params := make(Parameters)
if err := json.Unmarshal([]byte(value), &params); err == nil {
res.Info.Parameters = params
}
break
default:
break
}
}
bt.jobStats = res
return nil
}
func getStatus(conn redis.Conn, key string) (Status, error) {
values, err := rds.HmGet(conn, key, "status")
if err != nil {
return "", err
}
if len(values) == 1 {
st := Status(values[0].([]byte))
if st.Validate() == nil {
return st, nil
}
}
return "", errors.New("malformed status data returned")
}
func setStatus(conn redis.Conn, key string, status Status) error {
return rds.HmSet(conn, key, "status", status.String(), "update_time", time.Now().Unix())
}
// queryExecutions queries periodic executions by status
func queryExecutions(conn redis.Conn, dataKey string, q *query.Parameter) ([]string, int64, error) {
total, err := redis.Int64(conn.Do("ZCOUNT", dataKey, 0, "+inf"))
if err != nil {
return nil, 0, err
}
var pageNumber, pageSize uint = 1, query.DefaultPageSize
if q.PageNumber > 0 {
pageNumber = q.PageNumber
}
if q.PageSize > 0 {
pageSize = q.PageSize
}
results := make([]string, 0)
if total == 0 || (int64)((pageNumber-1)*pageSize) >= total {
return results, total, nil
}
offset := (pageNumber - 1) * pageSize
args := []interface{}{dataKey, "+inf", 0, "LIMIT", offset, pageSize}
eIDs, err := redis.Values(conn.Do("ZREVRANGEBYSCORE", args...))
if err != nil {
return nil, 0, err
}
for _, eID := range eIDs {
if eIDBytes, ok := eID.([]byte); ok {
results = append(results, string(eIDBytes))
}
}
return results, total, nil
}

View File

@ -0,0 +1,218 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package job
import (
"context"
"testing"
"time"
"github.com/goharbor/harbor/src/jobservice/common/query"
"github.com/goharbor/harbor/src/jobservice/common/utils"
"github.com/goharbor/harbor/src/jobservice/tests"
"github.com/gomodule/redigo/redis"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
)
// TrackerTestSuite tests functions of tracker
type TrackerTestSuite struct {
suite.Suite
namespace string
pool *redis.Pool
}
// TestTrackerTestSuite is entry of go test
func TestTrackerTestSuite(t *testing.T) {
suite.Run(t, new(TrackerTestSuite))
}
// SetupSuite prepares test suite
func (suite *TrackerTestSuite) SetupSuite() {
suite.namespace = tests.GiveMeTestNamespace()
suite.pool = tests.GiveMeRedisPool()
}
// TearDownSuite prepares test suites
func (suite *TrackerTestSuite) TearDownSuite() {
conn := suite.pool.Get()
defer func() {
_ = conn.Close()
}()
_ = tests.ClearAll(suite.namespace, conn)
}
// TestTracker tests tracker
func (suite *TrackerTestSuite) TestTracker() {
jobID := utils.MakeIdentifier()
mockJobStats := &Stats{
Info: &StatsInfo{
JobID: jobID,
Status: SuccessStatus.String(),
JobKind: KindGeneric,
JobName: SampleJob,
IsUnique: false,
},
}
tracker := NewBasicTrackerWithStats(
context.TODO(),
mockJobStats,
suite.namespace,
suite.pool,
func(hookURL string, change *StatusChange) error {
return nil
},
)
err := tracker.Save()
require.Nil(suite.T(), err, "save: nil error expected but got %s", err)
s, err := tracker.Status()
assert.Nil(suite.T(), err, "get status: nil error expected but got %s", err)
assert.Equal(suite.T(), SuccessStatus, s, "get status: expected pending but got %s", s)
j := tracker.Job()
assert.Equal(suite.T(), jobID, j.Info.JobID, "job: expect job ID %s but got %s", jobID, j.Info.JobID)
err = tracker.Update("web_hook_url", "http://hook.url")
assert.Nil(suite.T(), err, "update: nil error expected but got %s", err)
err = tracker.Load()
assert.Nil(suite.T(), err, "load: nil error expected but got %s", err)
assert.Equal(
suite.T(),
"http://hook.url",
tracker.Job().Info.WebHookURL,
"web hook: expect %s but got %s",
"http://hook.url",
tracker.Job().Info.WebHookURL,
)
err = tracker.Run()
assert.Error(suite.T(), err, "run: non nil error expected but got nil")
err = tracker.CheckIn("check in")
assert.Nil(suite.T(), err, "check in: nil error expected but got %s", err)
err = tracker.Succeed()
assert.Nil(suite.T(), err, "succeed: nil error expected but got %s", err)
err = tracker.Stop()
assert.Nil(suite.T(), err, "stop: nil error expected but got %s", err)
err = tracker.Fail()
assert.Nil(suite.T(), err, "fail: nil error expected but got %s", err)
t := NewBasicTrackerWithID(
context.TODO(),
jobID,
suite.namespace,
suite.pool,
func(hookURL string, change *StatusChange) error {
return nil
},
)
err = t.Load()
assert.NoError(suite.T(), err)
err = t.Expire()
assert.NoError(suite.T(), err)
}
// TestPeriodicTracker tests tracker of periodic
func (suite *TrackerTestSuite) TestPeriodicTracker() {
jobID := utils.MakeIdentifier()
nID := time.Now().Unix()
mockJobStats := &Stats{
Info: &StatsInfo{
JobID: jobID,
Status: ScheduledStatus.String(),
JobKind: KindPeriodic,
JobName: SampleJob,
IsUnique: false,
CronSpec: "0 0 * * * *",
NumericPID: nID,
},
}
t := NewBasicTrackerWithStats(context.TODO(), mockJobStats, suite.namespace, suite.pool, nil)
err := t.Save()
require.NoError(suite.T(), err)
executionID := utils.MakeIdentifier()
runAt := time.Now().Add(1 * time.Hour).Unix()
executionStats := &Stats{
Info: &StatsInfo{
JobID: executionID,
Status: ScheduledStatus.String(),
JobKind: KindScheduled,
JobName: SampleJob,
IsUnique: false,
CronSpec: "0 0 * * * *",
RunAt: runAt,
EnqueueTime: runAt,
UpstreamJobID: jobID,
},
}
t2 := NewBasicTrackerWithStats(context.TODO(), executionStats, suite.namespace, suite.pool, nil)
err = t2.Save()
require.NoError(suite.T(), err)
id, err := t.NumericID()
require.NoError(suite.T(), err)
assert.Equal(suite.T(), nID, id)
_, total, err := t.Executions(&query.Parameter{
PageNumber: 1,
PageSize: 10,
Extras: make(query.ExtraParameters),
})
require.NoError(suite.T(), err)
assert.Equal(suite.T(), int64(1), total)
err = t2.PeriodicExecutionDone()
require.NoError(suite.T(), err)
}
// TestPushForRetry tests push for retry
func (suite *TrackerTestSuite) TestPushForRetry() {
ID := utils.MakeIdentifier()
runAt := time.Now().Add(1 * time.Hour).Unix()
jobStats := &Stats{
Info: &StatsInfo{
JobID: ID,
Status: ScheduledStatus.String(),
JobKind: KindScheduled,
JobName: SampleJob,
IsUnique: false,
RunAt: runAt,
EnqueueTime: runAt,
},
}
t := &basicTracker{
namespace: suite.namespace,
context: context.TODO(),
pool: suite.pool,
jobID: ID,
jobStats: jobStats,
callback: nil,
}
err := t.pushToQueueForRetry(RunningStatus)
require.NoError(suite.T(), err)
}

View File

@ -12,11 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package impl
package job
// Define the register name constants of known jobs
const (
// KnownJobDemo is name of demo job
KnownJobDemo = "DEMO"
)
// HookCallback defines a callback to trigger when hook events happened
type HookCallback func(hookURL string, change *StatusChange) error

View File

@ -0,0 +1,179 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lcm
import (
"context"
"encoding/json"
"github.com/goharbor/harbor/src/jobservice/common/rds"
"github.com/goharbor/harbor/src/jobservice/env"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/jobservice/logger"
"github.com/gomodule/redigo/redis"
"github.com/pkg/errors"
"sync"
"time"
)
const (
// Waiting a short while if any errors occurred
shortLoopInterval = 5 * time.Second
// Waiting for long while if no retrying elements found
longLoopInterval = 5 * time.Minute
)
// Controller is designed to control the life cycle of the job
type Controller interface {
// Run daemon process if needed
Serve() error
// New tracker from the new provided stats
New(stats *job.Stats) (job.Tracker, error)
// Track the life cycle of the specified existing job
Track(jobID string) (job.Tracker, error)
}
// basicController is default implementation of Controller based on redis
type basicController struct {
context context.Context
namespace string
pool *redis.Pool
callback job.HookCallback
wg *sync.WaitGroup
}
// NewController is the constructor of basic controller
func NewController(ctx *env.Context, ns string, pool *redis.Pool, callback job.HookCallback) Controller {
return &basicController{
context: ctx.SystemContext,
namespace: ns,
pool: pool,
callback: callback,
wg: ctx.WG,
}
}
// Serve ...
func (bc *basicController) Serve() error {
bc.wg.Add(1)
go bc.loopForRestoreDeadStatus()
logger.Info("Status restoring loop is started")
return nil
}
// New tracker
func (bc *basicController) New(stats *job.Stats) (job.Tracker, error) {
if stats == nil {
return nil, errors.New("nil stats when creating job tracker")
}
if err := stats.Validate(); err != nil {
return nil, errors.Errorf("error occurred when creating job tracker: %s", err)
}
bt := job.NewBasicTrackerWithStats(bc.context, stats, bc.namespace, bc.pool, bc.callback)
if err := bt.Save(); err != nil {
return nil, err
}
return bt, nil
}
// Track and attache with the job
func (bc *basicController) Track(jobID string) (job.Tracker, error) {
bt := job.NewBasicTrackerWithID(bc.context, jobID, bc.namespace, bc.pool, bc.callback)
if err := bt.Load(); err != nil {
return nil, err
}
return bt, nil
}
// loopForRestoreDeadStatus is a loop to restore the dead states of jobs
func (bc *basicController) loopForRestoreDeadStatus() {
defer func() {
logger.Info("Status restoring loop is stopped")
bc.wg.Done()
}()
token := make(chan bool, 1)
token <- true
for {
<-token
if err := bc.restoreDeadStatus(); err != nil {
waitInterval := shortLoopInterval
if err == rds.ErrNoElements {
// No elements
waitInterval = longLoopInterval
} else {
logger.Errorf("restore dead status error: %s, put it back to the retrying Q later again", err)
}
// wait for a while or be terminated
select {
case <-time.After(waitInterval):
case <-bc.context.Done():
return
}
}
// Return token
token <- true
}
}
// restoreDeadStatus try to restore the dead status
func (bc *basicController) restoreDeadStatus() error {
// Get one
deadOne, err := bc.popOneDead()
if err != nil {
return err
}
// Try to update status
t, err := bc.Track(deadOne.JobID)
if err != nil {
return err
}
return t.UpdateStatusWithRetry(job.Status(deadOne.TargetStatus))
}
// popOneDead retrieves one dead status from the backend Q from lowest to highest
func (bc *basicController) popOneDead() (*job.SimpleStatusChange, error) {
conn := bc.pool.Get()
defer func() {
_ = conn.Close()
}()
key := rds.KeyStatusUpdateRetryQueue(bc.namespace)
v, err := rds.ZPopMin(conn, key)
if err != nil {
return nil, err
}
if bytes, ok := v.([]byte); ok {
ssc := &job.SimpleStatusChange{}
if err := json.Unmarshal(bytes, ssc); err == nil {
return ssc, nil
}
}
return nil, errors.New("pop one dead error: bad result reply")
}

View File

@ -0,0 +1,123 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lcm
import (
"context"
"encoding/json"
"github.com/goharbor/harbor/src/jobservice/common/rds"
"github.com/goharbor/harbor/src/jobservice/common/utils"
"github.com/goharbor/harbor/src/jobservice/env"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/jobservice/tests"
"github.com/gomodule/redigo/redis"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"sync"
"testing"
"time"
)
// LcmControllerTestSuite tests functions of life cycle controller
type LcmControllerTestSuite struct {
suite.Suite
namespace string
pool *redis.Pool
ctl Controller
cancel context.CancelFunc
}
// SetupSuite prepares test suite
func (suite *LcmControllerTestSuite) SetupSuite() {
suite.namespace = tests.GiveMeTestNamespace()
suite.pool = tests.GiveMeRedisPool()
ctx, cancel := context.WithCancel(context.Background())
suite.cancel = cancel
envCtx := &env.Context{
SystemContext: ctx,
WG: new(sync.WaitGroup),
}
suite.ctl = NewController(envCtx, suite.namespace, suite.pool, func(hookURL string, change *job.StatusChange) error { return nil })
}
// TearDownSuite clears test suite
func (suite *LcmControllerTestSuite) TearDownSuite() {
suite.cancel()
}
// TestLcmControllerTestSuite is entry of go test
func TestLcmControllerTestSuite(t *testing.T) {
suite.Run(t, new(LcmControllerTestSuite))
}
// TestNewAndTrack tests controller.New() and controller.Track()
func (suite *LcmControllerTestSuite) TestNewAndTrack() {
jobID := utils.MakeIdentifier()
suite.newsStats(jobID)
t, err := suite.ctl.Track(jobID)
require.Nil(suite.T(), err, "lcm track: nil error expected but got %s", err)
assert.Equal(suite.T(), job.SampleJob, t.Job().Info.JobName, "lcm new: expect job name %s but got %s", job.SampleJob, t.Job().Info.JobName)
}
// TestNew tests controller.Serve()
func (suite *LcmControllerTestSuite) TestServe() {
// Prepare mock data
jobID := utils.MakeIdentifier()
suite.newsStats(jobID)
conn := suite.pool.Get()
defer func() {
_ = conn.Close()
}()
simpleChange := &job.SimpleStatusChange{
JobID: jobID,
TargetStatus: job.RunningStatus.String(),
}
rawJSON, err := json.Marshal(simpleChange)
require.Nil(suite.T(), err, "json marshal: nil error expected but got %s", err)
key := rds.KeyStatusUpdateRetryQueue(suite.namespace)
args := []interface{}{key, "NX", time.Now().Unix(), rawJSON}
_, err = conn.Do("ZADD", args...)
require.Nil(suite.T(), err, "prepare mock data: nil error expected but got %s", err)
err = suite.ctl.Serve()
require.NoError(suite.T(), err, "lcm: nil error expected but got %s", err)
<-time.After(1 * time.Second)
count, err := redis.Int(conn.Do("ZCARD", key))
require.Nil(suite.T(), err, "get total dead status: nil error expected but got %s", err)
assert.Equal(suite.T(), 0, count)
}
// newsStats create job stats
func (suite *LcmControllerTestSuite) newsStats(jobID string) {
stats := &job.Stats{
Info: &job.StatsInfo{
JobID: jobID,
JobKind: job.KindGeneric,
JobName: job.SampleJob,
IsUnique: true,
Status: job.PendingStatus.String(),
},
}
t, err := suite.ctl.New(stats)
require.Nil(suite.T(), err, "lcm new: nil error expected but got %s", err)
assert.Equal(suite.T(), jobID, t.Job().Info.JobID, "lcm new: expect job ID %s but got %s", jobID, t.Job().Info.JobID)
}

View File

@ -49,14 +49,15 @@ func TestDBLogger(t *testing.T) {
l.Warningf("JobLog Warningf: %s", "TestDBLogger")
l.Errorf("JobLog Errorf: %s", "TestDBLogger")
l.Close()
_ = l.Close()
dbGetter := getter.NewDBGetter()
ll, err := dbGetter.Retrieve(uuid)
require.Nil(t, err)
log.Infof("get logger %s", ll)
sweeper.PrepareDBSweep()
err = sweeper.PrepareDBSweep()
require.NoError(t, err)
dbSweeper := sweeper.NewDBSweeper(-1)
count, err := dbSweeper.Sweep()
require.Nil(t, err)

View File

@ -38,12 +38,12 @@ func GetLogger(loggerOptions ...Option) (Interface, error) {
// No options specified, enable std as default
if len(loggerOptions) == 0 {
defaultOp := BackendOption(LoggerNameStdOutput, "", nil)
defaultOp := BackendOption(NameStdOutput, "", nil)
defaultOp.Apply(lOptions)
}
// Create backends
loggers := []Interface{}
loggers := make([]Interface, 0)
for name, ops := range lOptions.values {
if !IsKnownLogger(name) {
return nil, fmt.Errorf("no logger registered for name '%s'", name)
@ -105,7 +105,7 @@ func GetSweeper(context context.Context, sweeperOptions ...Option) (sweeper.Inte
op.Apply(sOptions)
}
sweepers := []sweeper.Interface{}
sweepers := make([]sweeper.Interface, 0)
for name, ops := range sOptions.values {
if !HasSweeper(name) {
return nil, fmt.Errorf("no sweeper provided for the logger %s", name)
@ -147,7 +147,7 @@ func GetLogDataGetter(loggerOptions ...Option) (getter.Interface, error) {
}
// Iterate with specified order
keys := []string{}
keys := make([]string, 0)
for k := range lOptions.values {
keys = append(keys, k)
}
@ -175,14 +175,14 @@ func GetLogDataGetter(loggerOptions ...Option) (getter.Interface, error) {
// Init the loggers and sweepers
func Init(ctx context.Context) error {
// For loggers
options := []Option{}
options := make([]Option, 0)
// For sweepers
sOptions := []Option{}
sOptions := make([]Option, 0)
for _, lc := range config.DefaultConfig.LoggerConfigs {
// Inject logger depth here for FILE and STD logger to avoid configuring it in the yaml
// For logger of job service itself, the depth should be 6
if lc.Name == LoggerNameFile || lc.Name == LoggerNameStdOutput {
if lc.Name == NameFile || lc.Name == NameStdOutput {
if lc.Settings == nil {
lc.Settings = map[string]interface{}{}
}
@ -202,7 +202,7 @@ func Init(ctx context.Context) error {
// Avoid data race issue
singletons.Store(systemKeyServiceLogger, lg)
jOptions := []Option{}
jOptions := make([]Option, 0)
// Append configured sweepers in job loggers if existing
for _, lc := range config.DefaultConfig.JobLoggerConfigs {
jOptions = append(jOptions, BackendOption(lc.Name, lc.Level, lc.Settings))
@ -224,12 +224,12 @@ func Init(ctx context.Context) error {
// If sweepers configured
if len(sOptions) > 0 {
// Get the sweeper controller
sweeper, err := GetSweeper(ctx, sOptions...)
swp, err := GetSweeper(ctx, sOptions...)
if err != nil {
return fmt.Errorf("create logger sweeper error: %s", err)
}
// Start sweep loop
_, err = sweeper.Sweep()
_, err = swp.Sweep()
if err != nil {
return fmt.Errorf("start logger sweeper error: %s", err)
}

View File

@ -74,7 +74,7 @@ func TestGetLoggersMulti(t *testing.T) {
}
}()
ops := []Option{}
ops := make([]Option, 0)
ops = append(
ops,
BackendOption("STD_OUTPUT", "DEBUG", nil),

View File

@ -17,7 +17,7 @@ func TestFileFactory(t *testing.T) {
require.Nil(t, err)
if closer, ok := ff.(Closer); ok {
closer.Close()
_ = closer.Close()
}
}

View File

@ -39,14 +39,16 @@ func TestDBGetter(t *testing.T) {
require.Nil(t, err)
l.Debug("JobLog Debug: TestDBLoggerGetter")
l.Close()
err = l.Close()
require.NoError(t, err)
dbGetter := NewDBGetter()
ll, err := dbGetter.Retrieve(uuid)
require.Nil(t, err)
log.Infof("get logger %s", ll)
sweeper.PrepareDBSweep()
err = sweeper.PrepareDBSweep()
require.NoError(t, err)
dbSweeper := sweeper.NewDBSweeper(-1)
count, err := dbSweeper.Sweep()
require.Nil(t, err)
@ -60,7 +62,8 @@ func TestDBGetterError(t *testing.T) {
require.Nil(t, err)
l.Debug("JobLog Debug: TestDBLoggerGetter")
l.Close()
err = l.Close()
require.NoError(t, err)
dbGetter := NewDBGetter()
_, err = dbGetter.Retrieve("")
@ -68,7 +71,8 @@ func TestDBGetterError(t *testing.T) {
_, err = dbGetter.Retrieve("not_exist_uuid")
require.NotNil(t, err)
sweeper.PrepareDBSweep()
err = sweeper.PrepareDBSweep()
require.NoError(t, err)
dbSweeper := sweeper.NewDBSweeper(-1)
count, err := dbSweeper.Sweep()
require.Nil(t, err)

View File

@ -8,7 +8,7 @@ import (
"github.com/goharbor/harbor/src/jobservice/errs"
"github.com/goharbor/harbor/src/jobservice/utils"
"github.com/goharbor/harbor/src/jobservice/common/utils"
)
// FileGetter is responsible for retrieving file log data

View File

@ -7,12 +7,12 @@ import (
)
const (
// LoggerNameFile is unique name of the file logger.
LoggerNameFile = "FILE"
// LoggerNameStdOutput is the unique name of the std logger.
LoggerNameStdOutput = "STD_OUTPUT"
// LoggerNameDB is the unique name of the DB logger.
LoggerNameDB = "DB"
// NameFile is unique name of the file logger.
NameFile = "FILE"
// NameStdOutput is the unique name of the std logger.
NameStdOutput = "STD_OUTPUT"
// NameDB is the unique name of the DB logger.
NameDB = "DB"
)
// Declaration is used to declare a supported logger.
@ -31,11 +31,11 @@ type Declaration struct {
// log info.
var knownLoggers = map[string]*Declaration{
// File logger
LoggerNameFile: {FileFactory, FileSweeperFactory, FileGetterFactory, false},
NameFile: {FileFactory, FileSweeperFactory, FileGetterFactory, false},
// STD output(both stdout and stderr) logger
LoggerNameStdOutput: {StdFactory, nil, nil, true},
NameStdOutput: {StdFactory, nil, nil, true},
// DB logger
LoggerNameDB: {DBFactory, DBSweeperFactory, DBGetterFactory, false},
NameDB: {DBFactory, DBSweeperFactory, DBGetterFactory, false},
}
// IsKnownLogger checks if the logger is supported with name.
@ -97,11 +97,11 @@ func GetLoggerName(l Interface) string {
switch l.(type) {
case *backend.DBLogger:
name = LoggerNameDB
name = NameDB
case *backend.StdOutputLogger:
name = LoggerNameStdOutput
name = NameStdOutput
case *backend.FileLogger:
name = LoggerNameFile
name = NameFile
default:
name = reflect.TypeOf(l).String()
}

View File

@ -13,28 +13,28 @@ func TestKnownLoggers(t *testing.T) {
b := IsKnownLogger("Unknown")
require.False(t, b)
b = IsKnownLogger(LoggerNameFile)
b = IsKnownLogger(NameFile)
require.True(t, b)
// no getter
b = HasGetter(LoggerNameStdOutput)
b = HasGetter(NameStdOutput)
require.False(t, b)
// has getter
b = HasGetter(LoggerNameDB)
b = HasGetter(NameDB)
require.True(t, b)
// no sweeper
b = HasSweeper(LoggerNameStdOutput)
b = HasSweeper(NameStdOutput)
require.False(t, b)
// has sweeper
b = HasSweeper(LoggerNameDB)
b = HasSweeper(NameDB)
require.True(t, b)
// unknown logger
l := KnownLoggers("unknown")
require.Nil(t, l)
// known logger
l = KnownLoggers(LoggerNameDB)
l = KnownLoggers(NameDB)
require.NotNil(t, l)
// unknown level
@ -52,14 +52,14 @@ func TestGetLoggerName(t *testing.T) {
uuid := "uuid_for_unit_test"
l, err := backend.NewDBLogger(uuid, "DEBUG", 4)
require.Nil(t, err)
require.Equal(t, LoggerNameDB, GetLoggerName(l))
require.Equal(t, NameDB, GetLoggerName(l))
stdLog := backend.NewStdOutputLogger("DEBUG", backend.StdErr, 4)
require.Equal(t, LoggerNameStdOutput, GetLoggerName(stdLog))
require.Equal(t, NameStdOutput, GetLoggerName(stdLog))
fileLog, err := backend.NewFileLogger("DEBUG", path.Join(os.TempDir(), "TestFileLogger.log"), 4)
require.Nil(t, err)
require.Equal(t, LoggerNameFile, GetLoggerName(fileLog))
require.Equal(t, NameFile, GetLoggerName(fileLog))
e := &Entry{}
n := GetLoggerName(e)

View File

@ -15,9 +15,3 @@ func Retrieve(logID string) ([]byte, error) {
return val.(getter.Interface).Retrieve(logID)
}
// HasLogGetterConfigured checks if a log data getter is there for using
func HasLogGetterConfigured() bool {
_, ok := singletons.Load(systemKeyLogDataGetter)
return ok
}

View File

@ -0,0 +1,50 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logger
import (
"context"
"github.com/goharbor/harbor/src/jobservice/config"
"github.com/stretchr/testify/require"
"os"
"testing"
)
func TestRetrieve(t *testing.T) {
config.DefaultConfig.JobLoggerConfigs = []*config.LoggerConfig{
{
Name: "STD_OUTPUT",
Level: "DEBUG",
},
{
Name: "FILE",
Level: "INFO",
Settings: map[string]interface{}{
"base_dir": os.TempDir(),
},
Sweeper: &config.LogSweeperConfig{
Duration: 1,
Settings: map[string]interface{}{
"work_dir": os.TempDir(),
},
},
},
}
err := Init(context.TODO())
require.NoError(t, err)
_, err = Retrieve("no_id")
require.Error(t, err)
}

View File

@ -38,9 +38,11 @@ func TestDBGetter(t *testing.T) {
require.Nil(t, err)
l.Debug("JobLog Debug: TestDBLoggerSweeper")
l.Close()
err = l.Close()
require.NoError(t, err)
PrepareDBSweep()
err = PrepareDBSweep()
require.NoError(t, err)
dbSweeper := NewDBSweeper(-1)
count, err := dbSweeper.Sweep()
require.Nil(t, err)

View File

@ -16,19 +16,19 @@ package main
import (
"context"
"errors"
"flag"
"fmt"
"github.com/goharbor/harbor/src/common"
comcfg "github.com/goharbor/harbor/src/common/config"
"github.com/goharbor/harbor/src/jobservice/config"
"github.com/goharbor/harbor/src/jobservice/env"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/jobservice/job/impl"
"github.com/pkg/errors"
"os"
"github.com/goharbor/harbor/src/jobservice/common/utils"
"github.com/goharbor/harbor/src/jobservice/config"
"github.com/goharbor/harbor/src/jobservice/logger"
"github.com/goharbor/harbor/src/jobservice/runtime"
"github.com/goharbor/harbor/src/jobservice/utils"
"os"
)
func main() {
@ -47,8 +47,10 @@ func main() {
panic(fmt.Sprintf("load configurations error: %s\n", err))
}
// Append node ID
vCtx := context.WithValue(context.Background(), utils.NodeID, utils.GenerateNodeID())
// Create the root context
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(vCtx)
defer cancel()
// Initialize logger
@ -57,7 +59,7 @@ func main() {
}
// Set job context initializer
runtime.JobService.SetJobContextInitializer(func(ctx *env.Context) (env.JobContext, error) {
runtime.JobService.SetJobContextInitializer(func(ctx context.Context) (job.Context, error) {
secret := config.GetAuthSecret()
if utils.IsEmptyStr(secret) {
return nil, errors.New("empty auth secret")
@ -65,7 +67,7 @@ func main() {
coreURL := os.Getenv("CORE_URL")
configURL := coreURL + common.CoreConfigPath
cfgMgr := comcfg.NewRESTCfgManager(configURL, secret)
jobCtx := impl.NewContext(ctx.SystemContext, cfgMgr)
jobCtx := impl.NewContext(ctx, cfgMgr)
if err := jobCtx.Init(); err != nil {
return nil, err
@ -75,5 +77,7 @@ func main() {
})
// Start
runtime.JobService.LoadAndRun(ctx, cancel)
if err := runtime.JobService.LoadAndRun(ctx, cancel); err != nil {
logger.Fatal(err)
}
}

View File

@ -1,99 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package models
// Parameters for job execution.
type Parameters map[string]interface{}
// JobRequest is the request of launching a job.
type JobRequest struct {
Job *JobData `json:"job"`
}
// JobData keeps the basic info.
type JobData struct {
Name string `json:"name"`
Parameters Parameters `json:"parameters"`
Metadata *JobMetadata `json:"metadata"`
StatusHook string `json:"status_hook"`
}
// JobMetadata stores the metadata of job.
type JobMetadata struct {
JobKind string `json:"kind"`
ScheduleDelay uint64 `json:"schedule_delay,omitempty"`
Cron string `json:"cron_spec,omitempty"`
IsUnique bool `json:"unique"`
}
// JobStats keeps the result of job launching.
type JobStats struct {
Stats *JobStatData `json:"job"`
}
// JobStatData keeps the stats of job
type JobStatData struct {
JobID string `json:"id"`
Status string `json:"status"`
JobName string `json:"name"`
JobKind string `json:"kind"`
IsUnique bool `json:"unique"`
RefLink string `json:"ref_link,omitempty"`
CronSpec string `json:"cron_spec,omitempty"`
EnqueueTime int64 `json:"enqueue_time"`
UpdateTime int64 `json:"update_time"`
RunAt int64 `json:"run_at,omitempty"`
CheckIn string `json:"check_in,omitempty"`
CheckInAt int64 `json:"check_in_at,omitempty"`
DieAt int64 `json:"die_at,omitempty"`
HookStatus string `json:"hook_status,omitempty"`
Executions []string `json:"executions,omitempty"` // For the jobs like periodic jobs, which may execute multiple times
UpstreamJobID string `json:"upstream_job_id,omitempty"` // Ref the upstream job if existing
IsMultipleExecutions bool `json:"multiple_executions"` // Indicate if the job has subsequent executions
}
// JobPoolStats represents the healthy and status of all the running worker pools.
type JobPoolStats struct {
Pools []*JobPoolStatsData `json:"worker_pools"`
}
// JobPoolStatsData represent the healthy and status of the worker pool.
type JobPoolStatsData struct {
WorkerPoolID string `json:"worker_pool_id"`
StartedAt int64 `json:"started_at"`
HeartbeatAt int64 `json:"heartbeat_at"`
JobNames []string `json:"job_names"`
Concurrency uint `json:"concurrency"`
Status string `json:"status"`
}
// JobActionRequest defines for triggering job action like stop/cancel.
type JobActionRequest struct {
Action string `json:"action"`
}
// JobStatusChange is designed for reporting the status change via hook.
type JobStatusChange struct {
JobID string `json:"job_id"`
Status string `json:"status"`
CheckIn string `json:"check_in,omitempty"`
Metadata *JobStatData `json:"metadata,omitempty"`
}
// Message is designed for sub/pub messages
type Message struct {
Event string
Data interface{} // generic format
}

View File

@ -1,107 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package opm
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"time"
"github.com/goharbor/harbor/src/jobservice/models"
"github.com/goharbor/harbor/src/jobservice/utils"
)
const (
clientTimeout = 10 * time.Second
maxIdleConnections = 20
idleConnectionTimeout = 30 * time.Second
)
// DefaultHookClient is for default use.
var DefaultHookClient = NewHookClient()
// HookClient is used to post the related data to the interested parties.
type HookClient struct {
client *http.Client
}
// NewHookClient return the ptr of the new HookClient
func NewHookClient() *HookClient {
client := &http.Client{
Timeout: clientTimeout,
Transport: &http.Transport{
MaxIdleConns: maxIdleConnections,
IdleConnTimeout: idleConnectionTimeout,
},
}
return &HookClient{
client: client,
}
}
// ReportStatus reports the status change info to the subscribed party.
// The status includes 'checkin' info with format 'check_in:<message>'
func (hc *HookClient) ReportStatus(hookURL string, status models.JobStatusChange) error {
if utils.IsEmptyStr(hookURL) {
return errors.New("empty hook url") // do nothing
}
// Parse and validate URL
url, err := url.Parse(hookURL)
if err != nil {
return err
}
// Marshal data
data, err := json.Marshal(&status)
if err != nil {
return err
}
// New post request
req, err := http.NewRequest(http.MethodPost, url.String(), strings.NewReader(string(data)))
if err != nil {
return err
}
res, err := hc.client.Do(req)
if err != nil {
return err
}
defer res.Body.Close() // close connection for reuse
// Should be 200
if res.StatusCode != http.StatusOK {
if res.ContentLength > 0 {
// read error content and return
dt, err := ioutil.ReadAll(res.Body)
if err != nil {
return err
}
return errors.New(string(dt))
}
return fmt.Errorf("failed to report status change via hook, expect '200' but got '%d'", res.StatusCode)
}
return nil
}

View File

@ -1,54 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package opm
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/goharbor/harbor/src/jobservice/models"
)
func TestHookClient(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "ok")
}))
defer ts.Close()
err := DefaultHookClient.ReportStatus(ts.URL, models.JobStatusChange{
JobID: "fake_job_ID",
Status: "running",
})
if err != nil {
t.Fatal(err)
}
}
func TestReportStatusFailed(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("failed"))
}))
defer ts.Close()
err := DefaultHookClient.ReportStatus(ts.URL, models.JobStatusChange{
JobID: "fake_job_ID",
Status: "running",
})
if err == nil {
t.Fatal("expect error but got nil")
}
}

View File

@ -1,69 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package opm
import (
"sync"
"github.com/goharbor/harbor/src/jobservice/utils"
)
// HookStore is used to cache the hooks in memory.
// Use job ID as key to index
type HookStore struct {
lock *sync.RWMutex
data map[string]string
}
// NewHookStore is to create a ptr of new HookStore.
func NewHookStore() *HookStore {
return &HookStore{
lock: new(sync.RWMutex),
data: make(map[string]string),
}
}
// Add new record
func (hs *HookStore) Add(jobID string, hookURL string) {
if utils.IsEmptyStr(jobID) {
return // do nothing
}
hs.lock.Lock()
defer hs.lock.Unlock()
hs.data[jobID] = hookURL
}
// Get one hook url by job ID
func (hs *HookStore) Get(jobID string) (string, bool) {
hs.lock.RLock()
defer hs.lock.RUnlock()
hookURL, ok := hs.data[jobID]
return hookURL, ok
}
// Remove the specified one
func (hs *HookStore) Remove(jobID string) (string, bool) {
hs.lock.Lock()
defer hs.lock.Unlock()
hookURL, ok := hs.data[jobID]
delete(hs.data, jobID)
return hookURL, ok
}

View File

@ -1,137 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package opm
import "github.com/goharbor/harbor/src/jobservice/models"
// Range for list scope defining
type Range int
// JobStatsManager defines the methods to handle stats of job.
type JobStatsManager interface {
// Start to serve
Start()
// Shutdown the manager
Shutdown()
// Save the job stats
// Async method to retry and improve performance
//
// jobStats models.JobStats : the job stats to be saved
Save(jobStats models.JobStats)
// Get the job stats from backend store
// Sync method as we need the data
//
// Returns:
// models.JobStats : job stats data
// error : error if meet any problems
Retrieve(jobID string) (models.JobStats, error)
// Update the properties of the job stats
//
// jobID string : ID of the being retried job
// fieldAndValues ...interface{} : One or more properties being updated
//
// Returns:
// error if update failed
Update(jobID string, fieldAndValues ...interface{}) error
// SetJobStatus will mark the status of job to the specified one
// Async method to retry
SetJobStatus(jobID string, status string)
// Send command fro the specified job
//
// jobID string : ID of the being retried job
// command string : the command applied to the job like stop/cancel
// isCached bool : to indicate if only cache the op command
//
// Returns:
// error if it was not successfully sent
SendCommand(jobID string, command string, isCached bool) error
// CtlCommand checks if control command is fired for the specified job.
//
// jobID string : ID of the job
//
// Returns:
// the command if it was fired
// error if it was not fired yet to meet some other problems
CtlCommand(jobID string) (string, error)
// CheckIn message for the specified job like detailed progress info.
//
// jobID string : ID of the job
// message string : The message being checked in
//
CheckIn(jobID string, message string)
// DieAt marks the failed jobs with the time they put into dead queue.
//
// jobID string : ID of the job
// message string : The message being checked in
//
DieAt(jobID string, dieAt int64)
// RegisterHook is used to save the hook url or cache the url in memory.
//
// jobID string : ID of job
// hookURL string : the hook url being registered
// isCached bool : to indicate if only cache the hook url
//
// Returns:
// error if meet any problems
RegisterHook(jobID string, hookURL string, isCached bool) error
// Get hook returns the web hook url for the specified job if it is registered
//
// jobID string : ID of job
//
// Returns:
// the web hook url if existing
// non-nil error if meet any problems
GetHook(jobID string) (string, error)
// Mark the periodic job stats expired
//
// jobID string : ID of job
//
// Returns:
// error if meet any problems
ExpirePeriodicJobStats(jobID string) error
// Persist the links between upstream job and the executions.
//
// upstreamJobID string: ID of the upstream job
// executions ...string: IDs of the execution jobs
//
// Returns:
// error if meet any issues
AttachExecution(upstreamJobID string, executions ...string) error
// Get all the executions (IDs) fro the specified upstream Job.
//
// upstreamJobID string: ID of the upstream job
// ranges ...Range: Define the start and end for the list, e.g:
// 0, 10 means [0:10]
// 10 means [10:]
// empty means [0:-1]==all
// Returns:
// the ID list of the executions if no error occurred
// or a non-nil error is returned
GetExecutions(upstreamJobID string, ranges ...Range) ([]string, error)
}

View File

@ -1,178 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package opm
import (
"context"
"encoding/json"
"errors"
"fmt"
"sync"
"time"
"github.com/goharbor/harbor/src/jobservice/logger"
"github.com/goharbor/harbor/src/jobservice/models"
"github.com/goharbor/harbor/src/jobservice/utils"
"github.com/gomodule/redigo/redis"
)
const (
commandValidTime = 5 * time.Minute
commandSweepTickerTime = 1 * time.Hour
// EventFireCommand for firing command event
EventFireCommand = "fire_command"
)
type oPCommand struct {
command string
fireTime int64
}
// oPCommands maintain commands list
type oPCommands struct {
lock *sync.RWMutex
commands map[string]*oPCommand
context context.Context
redisPool *redis.Pool
namespace string
stopChan chan struct{}
doneChan chan struct{}
}
// newOPCommands is constructor of OPCommands
func newOPCommands(ctx context.Context, ns string, redisPool *redis.Pool) *oPCommands {
return &oPCommands{
lock: new(sync.RWMutex),
commands: make(map[string]*oPCommand),
context: ctx,
redisPool: redisPool,
namespace: ns,
stopChan: make(chan struct{}, 1),
doneChan: make(chan struct{}, 1),
}
}
// Start the command sweeper
func (opc *oPCommands) Start() {
go opc.loop()
logger.Info("OP commands sweeper is started")
}
// Stop the command sweeper
func (opc *oPCommands) Stop() {
opc.stopChan <- struct{}{}
<-opc.doneChan
}
// Fire command
func (opc *oPCommands) Fire(jobID string, command string) error {
if utils.IsEmptyStr(jobID) {
return errors.New("empty job ID")
}
if command != CtlCommandStop && command != CtlCommandCancel {
return fmt.Errorf("Unsupported command %s", command)
}
notification := &models.Message{
Event: EventFireCommand,
Data: []string{jobID, command},
}
rawJSON, err := json.Marshal(notification)
if err != nil {
return err
}
conn := opc.redisPool.Get()
defer conn.Close()
_, err = conn.Do("PUBLISH", utils.KeyPeriodicNotification(opc.namespace), rawJSON)
return err
}
// Push command into the list
func (opc *oPCommands) Push(jobID string, command string) error {
if utils.IsEmptyStr(jobID) {
return errors.New("empty job ID")
}
if command != CtlCommandStop && command != CtlCommandCancel {
return fmt.Errorf("Unsupported command %s", command)
}
opc.lock.Lock()
defer opc.lock.Unlock()
opc.commands[jobID] = &oPCommand{
command: command,
fireTime: time.Now().Unix(),
}
return nil
}
// Pop out the command if existing
func (opc *oPCommands) Pop(jobID string) (string, bool) {
if utils.IsEmptyStr(jobID) {
return "", false
}
opc.lock.RLock()
defer opc.lock.RUnlock()
c, ok := opc.commands[jobID]
if ok {
if time.Unix(c.fireTime, 0).Add(commandValidTime).After(time.Now()) {
delete(opc.commands, jobID)
return c.command, true
}
}
return "", false
}
func (opc *oPCommands) loop() {
defer func() {
logger.Info("OP commands is stopped")
opc.doneChan <- struct{}{}
}()
tk := time.NewTicker(commandSweepTickerTime)
defer tk.Stop()
for {
select {
case <-tk.C:
opc.sweepCommands()
case <-opc.context.Done():
return
case <-opc.stopChan:
return
}
}
}
func (opc *oPCommands) sweepCommands() {
opc.lock.Lock()
defer opc.lock.Unlock()
for k, v := range opc.commands {
if time.Unix(v.fireTime, 0).Add(commandValidTime).After(time.Now()) {
delete(opc.commands, k)
}
}
}

View File

@ -1,826 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package opm
import (
"context"
"encoding/json"
"errors"
"fmt"
"math"
"math/rand"
"strconv"
"sync/atomic"
"time"
"github.com/goharbor/harbor/src/jobservice/errs"
"github.com/goharbor/harbor/src/jobservice/logger"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/jobservice/models"
"github.com/goharbor/harbor/src/jobservice/utils"
"github.com/gomodule/redigo/redis"
)
const (
processBufferSize = 1024
opSaveStats = "save_job_stats"
opUpdateStatus = "update_job_status"
opCheckIn = "check_in"
opDieAt = "mark_die_at"
opReportStatus = "report_status"
opPersistExecutions = "persist_executions"
opUpdateStats = "update_job_stats"
maxFails = 3
jobStatsDataExpireTime = 60 * 60 * 24 * 5 // 5 days
// CtlCommandStop : command stop
CtlCommandStop = "stop"
// CtlCommandCancel : command cancel
CtlCommandCancel = "cancel"
// CtlCommandRetry : command retry
CtlCommandRetry = "retry"
// EventRegisterStatusHook is event name of registering hook
EventRegisterStatusHook = "register_hook"
)
type queueItem struct {
Op string
Fails uint
Data interface{}
}
func (qi *queueItem) string() string {
data, err := json.Marshal(qi)
if err != nil {
return fmt.Sprintf("%v", qi)
}
return string(data)
}
// RedisJobStatsManager implements JobStatsManager based on redis.
type RedisJobStatsManager struct {
namespace string
redisPool *redis.Pool
context context.Context
stopChan chan struct{}
doneChan chan struct{}
processChan chan *queueItem
isRunning *atomic.Value
hookStore *HookStore // cache the hook here to avoid requesting backend
opCommands *oPCommands // maintain the OP commands
}
// NewRedisJobStatsManager is constructor of RedisJobStatsManager
func NewRedisJobStatsManager(ctx context.Context, namespace string, redisPool *redis.Pool) JobStatsManager {
isRunning := &atomic.Value{}
isRunning.Store(false)
return &RedisJobStatsManager{
namespace: namespace,
context: ctx,
redisPool: redisPool,
stopChan: make(chan struct{}, 1),
doneChan: make(chan struct{}, 1),
processChan: make(chan *queueItem, processBufferSize),
hookStore: NewHookStore(),
isRunning: isRunning,
opCommands: newOPCommands(ctx, namespace, redisPool),
}
}
// Start is implementation of same method in JobStatsManager interface.
func (rjs *RedisJobStatsManager) Start() {
if rjs.isRunning.Load().(bool) {
return
}
go rjs.loop()
rjs.opCommands.Start()
rjs.isRunning.Store(true)
logger.Info("Redis job stats manager is started")
}
// Shutdown is implementation of same method in JobStatsManager interface.
func (rjs *RedisJobStatsManager) Shutdown() {
defer func() {
rjs.isRunning.Store(false)
}()
if !(rjs.isRunning.Load().(bool)) {
return
}
rjs.opCommands.Stop()
rjs.stopChan <- struct{}{}
<-rjs.doneChan
}
// Save is implementation of same method in JobStatsManager interface.
// Async method
func (rjs *RedisJobStatsManager) Save(jobStats models.JobStats) {
item := &queueItem{
Op: opSaveStats,
Data: jobStats,
}
rjs.processChan <- item
}
// Retrieve is implementation of same method in JobStatsManager interface.
// Sync method
func (rjs *RedisJobStatsManager) Retrieve(jobID string) (models.JobStats, error) {
if utils.IsEmptyStr(jobID) {
return models.JobStats{}, errors.New("empty job ID")
}
res, err := rjs.getJobStats(jobID)
if err != nil {
return models.JobStats{}, err
}
if res.Stats.IsMultipleExecutions {
executions, err := rjs.GetExecutions(jobID)
if err != nil {
return models.JobStats{}, err
}
res.Stats.Executions = executions
}
return res, nil
}
// SetJobStatus is implementation of same method in JobStatsManager interface.
// Async method
func (rjs *RedisJobStatsManager) SetJobStatus(jobID string, status string) {
if utils.IsEmptyStr(jobID) || utils.IsEmptyStr(status) {
return
}
item := &queueItem{
Op: opUpdateStatus,
Data: []string{jobID, status},
}
rjs.processChan <- item
// Report status at the same time
rjs.submitStatusReportingItem(jobID, status, "")
}
func (rjs *RedisJobStatsManager) loop() {
controlChan := make(chan struct{})
defer func() {
rjs.isRunning.Store(false)
// Notify other sub goroutines
close(controlChan)
logger.Info("Redis job stats manager is stopped")
}()
for {
select {
case item := <-rjs.processChan:
go func(item *queueItem) {
clearHookCache := false
if err := rjs.process(item); err != nil {
item.Fails++
if item.Fails < maxFails {
logger.Warningf("Failed to process '%s' request with error: %s\n", item.Op, err)
// Retry after a random interval
go func() {
timer := time.NewTimer(time.Duration(backoff(item.Fails)) * time.Second)
defer timer.Stop()
select {
case <-timer.C:
rjs.processChan <- item
return
case <-controlChan:
}
}()
} else {
logger.Errorf("Failed to process '%s' request with error: %s (%d times tried)\n", item.Op, err, maxFails)
if item.Op == opReportStatus {
clearHookCache = true
}
}
} else {
logger.Debugf("Operation is successfully processed: %s", item.string())
if item.Op == opReportStatus {
clearHookCache = true
}
}
if clearHookCache {
// Clear cache to save memory if job status is success or stopped.
data := item.Data.([]string)
status := data[2]
if status == job.JobStatusSuccess || status == job.JobStatusStopped {
rjs.hookStore.Remove(data[0])
}
}
}(item)
break
case <-rjs.stopChan:
rjs.doneChan <- struct{}{}
return
case <-rjs.context.Done():
return
}
}
}
// SendCommand for the specified job
func (rjs *RedisJobStatsManager) SendCommand(jobID string, command string, isCached bool) error {
if utils.IsEmptyStr(jobID) {
return errors.New("empty job ID")
}
if command != CtlCommandStop && command != CtlCommandCancel {
return errors.New("unknown command")
}
if !isCached {
// Let other interested parties awareness
if err := rjs.opCommands.Fire(jobID, command); err != nil {
return err
}
}
// Directly add to op commands maintaining list
return rjs.opCommands.Push(jobID, command)
}
// CheckIn mesage
func (rjs *RedisJobStatsManager) CheckIn(jobID string, message string) {
if utils.IsEmptyStr(jobID) || utils.IsEmptyStr(message) {
return
}
item := &queueItem{
Op: opCheckIn,
Data: []string{jobID, message},
}
rjs.processChan <- item
// Report checkin message at the same time
rjs.submitStatusReportingItem(jobID, job.JobStatusRunning, message)
}
// CtlCommand checks if control command is fired for the specified job.
func (rjs *RedisJobStatsManager) CtlCommand(jobID string) (string, error) {
if utils.IsEmptyStr(jobID) {
return "", errors.New("empty job ID")
}
c, ok := rjs.opCommands.Pop(jobID)
if !ok {
return "", fmt.Errorf("no OP command fired to job %s", jobID)
}
return c, nil
}
// DieAt marks the failed jobs with the time they put into dead queue.
func (rjs *RedisJobStatsManager) DieAt(jobID string, dieAt int64) {
if utils.IsEmptyStr(jobID) || dieAt == 0 {
return
}
item := &queueItem{
Op: opDieAt,
Data: []interface{}{jobID, dieAt},
}
rjs.processChan <- item
}
// RegisterHook is used to save the hook url or cache the url in memory.
func (rjs *RedisJobStatsManager) RegisterHook(jobID string, hookURL string, isCached bool) error {
if utils.IsEmptyStr(jobID) {
return errors.New("empty job ID")
}
if !utils.IsValidURL(hookURL) {
return errors.New("invalid hook url")
}
if !isCached {
return rjs.saveHook(jobID, hookURL)
}
rjs.hookStore.Add(jobID, hookURL)
return nil
}
// GetHook returns the status web hook url for the specified job if existing
func (rjs *RedisJobStatsManager) GetHook(jobID string) (string, error) {
if utils.IsEmptyStr(jobID) {
return "", errors.New("empty job ID")
}
// First retrieve from the cache
if hookURL, ok := rjs.hookStore.Get(jobID); ok {
return hookURL, nil
}
// Not hit in cache! Get it from the backend.
hookURL, err := rjs.getHook(jobID)
if err != nil {
return "", err
}
// Cache and return
rjs.hookStore.Add(jobID, hookURL)
return hookURL, nil
}
// ExpirePeriodicJobStats marks the periodic job stats expired
func (rjs *RedisJobStatsManager) ExpirePeriodicJobStats(jobID string) error {
conn := rjs.redisPool.Get()
defer conn.Close()
// The periodic job (policy) is stopped/unscheduled and then
// the stats of periodic job now can be expired
key := utils.KeyJobStats(rjs.namespace, jobID)
_, err := conn.Do("EXPIRE", key, jobStatsDataExpireTime)
return err
}
// AttachExecution persist the links between upstream jobs and the related executions (jobs).
func (rjs *RedisJobStatsManager) AttachExecution(upstreamJobID string, executions ...string) error {
if len(upstreamJobID) == 0 {
return errors.New("empty upstream job ID is not allowed")
}
if len(executions) == 0 {
return errors.New("no executions existing to persist")
}
// Send to process channel
item := &queueItem{
Op: opPersistExecutions,
Data: []interface{}{upstreamJobID, executions},
}
rjs.processChan <- item
return nil
}
// GetExecutions returns the existing executions (IDs) for the specified job.
func (rjs *RedisJobStatsManager) GetExecutions(upstreamJobID string, ranges ...Range) ([]string, error) {
if len(upstreamJobID) == 0 {
return nil, errors.New("no upstream ID specified")
}
conn := rjs.redisPool.Get()
defer conn.Close()
var start, end interface{} = "-inf", "+inf"
if len(ranges) >= 1 {
start = int(ranges[0])
}
if len(ranges) > 1 {
end = int(ranges[1])
}
key := utils.KeyUpstreamJobAndExecutions(rjs.namespace, upstreamJobID)
ids, err := redis.Strings(conn.Do("ZRANGEBYSCORE", key, start, end))
if err != nil {
if err == redis.ErrNil {
return []string{}, nil
}
return nil, err
}
return ids, nil
}
// Update the properties of job stats
func (rjs *RedisJobStatsManager) Update(jobID string, fieldAndValues ...interface{}) error {
if len(jobID) == 0 {
return errors.New("no updating job")
}
if len(fieldAndValues) == 0 || len(fieldAndValues)%2 != 0 {
return errors.New("filed and its value should be pair")
}
data := []interface{}{}
data = append(data, jobID)
data = append(data, fieldAndValues...)
item := &queueItem{
Op: opUpdateStats,
Data: data,
}
rjs.processChan <- item
return nil
}
func (rjs *RedisJobStatsManager) submitStatusReportingItem(jobID string, status, checkIn string) {
// Let it run in a separate goroutine to avoid waiting more time
go func() {
var (
hookURL string
ok bool
err error
)
hookURL, ok = rjs.hookStore.Get(jobID)
if !ok {
// Retrieve from backend
hookURL, err = rjs.getHook(jobID)
if err != nil || !utils.IsValidURL(hookURL) {
// logged and exit
logger.Warningf("no status hook found for job %s\n, abandon status reporting", jobID)
return
}
}
item := &queueItem{
Op: opReportStatus,
Data: []string{jobID, hookURL, status, checkIn},
}
rjs.processChan <- item
}()
}
func (rjs *RedisJobStatsManager) reportStatus(jobID string, hookURL, status, checkIn string) error {
reportingStatus := models.JobStatusChange{
JobID: jobID,
Status: status,
CheckIn: checkIn,
}
// Return the whole metadata of the job.
// To support forward compatibility, keep the original fields `Status` and `CheckIn`.
// TODO: If querying job stats causes performance issues, a two-level cache should be enabled.
jobStats, err := rjs.getJobStats(jobID)
if err != nil {
// Just logged
logger.Errorf("Retrieving stats of job %s for hook reporting failed with error: %s", jobID, err)
} else {
// Override status/check in message
// Just double confirmation
jobStats.Stats.CheckIn = checkIn
jobStats.Stats.Status = status
reportingStatus.Metadata = jobStats.Stats
}
return DefaultHookClient.ReportStatus(hookURL, reportingStatus)
}
func (rjs *RedisJobStatsManager) updateJobStats(jobID string, fieldAndValues ...interface{}) error {
conn := rjs.redisPool.Get()
defer conn.Close()
key := utils.KeyJobStats(rjs.namespace, jobID)
args := make([]interface{}, 0, len(fieldAndValues)+1)
args = append(args, key)
args = append(args, fieldAndValues...)
args = append(args, "update_time", time.Now().Unix())
_, err := conn.Do("HMSET", args...)
return err
}
func (rjs *RedisJobStatsManager) updateJobStatus(jobID string, status string) error {
args := make([]interface{}, 0, 4)
args = append(args, "status", status)
if status == job.JobStatusSuccess {
// make sure the 'die_at' is reset in case it's a retrying job
args = append(args, "die_at", 0)
}
return rjs.updateJobStats(jobID, args...)
}
func (rjs *RedisJobStatsManager) checkIn(jobID string, message string) error {
now := time.Now().Unix()
args := make([]interface{}, 0, 4)
args = append(args, "check_in", message, "check_in_at", now)
return rjs.updateJobStats(jobID, args...)
}
func (rjs *RedisJobStatsManager) dieAt(jobID string, baseTime int64) error {
conn := rjs.redisPool.Get()
defer conn.Close()
// Query the dead job in the time scope of [baseTime,baseTime+5]
key := utils.RedisKeyDead(rjs.namespace)
jobWithScores, err := utils.GetZsetByScore(rjs.redisPool, key, []int64{baseTime, baseTime + 5})
if err != nil {
return err
}
for _, jws := range jobWithScores {
if j, err := utils.DeSerializeJob(jws.JobBytes); err == nil {
if j.ID == jobID {
// Found
args := make([]interface{}, 0, 6)
args = append(args, "die_at", jws.Score)
return rjs.updateJobStats(jobID, args...)
}
}
}
return fmt.Errorf("seems %s is not a dead job", jobID)
}
func (rjs *RedisJobStatsManager) getJobStats(jobID string) (models.JobStats, error) {
conn := rjs.redisPool.Get()
defer conn.Close()
key := utils.KeyJobStats(rjs.namespace, jobID)
vals, err := redis.Strings(conn.Do("HGETALL", key))
if err != nil {
return models.JobStats{}, err
}
if vals == nil || len(vals) == 0 {
return models.JobStats{}, errs.NoObjectFoundError(fmt.Sprintf("job '%s'", jobID))
}
res := models.JobStats{
Stats: &models.JobStatData{},
}
for i, l := 0, len(vals); i < l; i = i + 2 {
prop := vals[i]
value := vals[i+1]
switch prop {
case "id":
res.Stats.JobID = value
break
case "name":
res.Stats.JobName = value
break
case "kind":
res.Stats.JobKind = value
case "unique":
v, err := strconv.ParseBool(value)
if err != nil {
v = false
}
res.Stats.IsUnique = v
case "status":
res.Stats.Status = value
break
case "ref_link":
res.Stats.RefLink = value
break
case "enqueue_time":
v, _ := strconv.ParseInt(value, 10, 64)
res.Stats.EnqueueTime = v
break
case "update_time":
v, _ := strconv.ParseInt(value, 10, 64)
res.Stats.UpdateTime = v
break
case "run_at":
v, _ := strconv.ParseInt(value, 10, 64)
res.Stats.RunAt = v
break
case "check_in_at":
v, _ := strconv.ParseInt(value, 10, 64)
res.Stats.CheckInAt = v
break
case "check_in":
res.Stats.CheckIn = value
break
case "cron_spec":
res.Stats.CronSpec = value
break
case "die_at":
v, _ := strconv.ParseInt(value, 10, 64)
res.Stats.DieAt = v
case "upstream_job_id":
res.Stats.UpstreamJobID = value
break
case "multiple_executions":
v, err := strconv.ParseBool(value)
if err != nil {
v = false
}
res.Stats.IsMultipleExecutions = v
break
default:
break
}
}
return res, nil
}
func (rjs *RedisJobStatsManager) saveJobStats(jobStats models.JobStats) error {
if jobStats.Stats == nil {
return errors.New("malformed job stats object")
}
conn := rjs.redisPool.Get()
defer conn.Close()
key := utils.KeyJobStats(rjs.namespace, jobStats.Stats.JobID)
args := make([]interface{}, 0)
args = append(args, key)
args = append(args,
"id", jobStats.Stats.JobID,
"name", jobStats.Stats.JobName,
"kind", jobStats.Stats.JobKind,
"unique", jobStats.Stats.IsUnique,
"status", jobStats.Stats.Status,
"ref_link", jobStats.Stats.RefLink,
"enqueue_time", jobStats.Stats.EnqueueTime,
"update_time", jobStats.Stats.UpdateTime,
"run_at", jobStats.Stats.RunAt,
"cron_spec", jobStats.Stats.CronSpec,
"multiple_executions", jobStats.Stats.IsMultipleExecutions,
)
if jobStats.Stats.CheckInAt > 0 && !utils.IsEmptyStr(jobStats.Stats.CheckIn) {
args = append(args,
"check_in", jobStats.Stats.CheckIn,
"check_in_at", jobStats.Stats.CheckInAt,
)
}
if jobStats.Stats.DieAt > 0 {
args = append(args, "die_at", jobStats.Stats.DieAt)
}
if len(jobStats.Stats.UpstreamJobID) > 0 {
args = append(args, "upstream_job_id", jobStats.Stats.UpstreamJobID)
}
conn.Send("HMSET", args...)
// If job kind is periodic job, expire time should not be set
// If job kind is scheduled job, expire time should be runAt+1day
if jobStats.Stats.JobKind != job.JobKindPeriodic {
var expireTime int64 = jobStatsDataExpireTime
if jobStats.Stats.JobKind == job.JobKindScheduled {
nowTime := time.Now().Unix()
future := jobStats.Stats.RunAt - nowTime
if future > 0 {
expireTime += future
}
}
expireTime += rand.Int63n(30) // Avoid lots of keys being expired at the same time
conn.Send("EXPIRE", key, expireTime)
}
return conn.Flush()
}
func (rjs *RedisJobStatsManager) saveExecutions(upstreamJobID string, executions []string) error {
key := utils.KeyUpstreamJobAndExecutions(rjs.namespace, upstreamJobID)
conn := rjs.redisPool.Get()
defer conn.Close()
err := conn.Send("MULTI")
if err != nil {
return err
}
args := []interface{}{key}
baseScore := time.Now().Unix()
for index, execution := range executions {
args = append(args, baseScore+int64(index), execution)
}
if err := conn.Send("ZADD", args...); err != nil {
return err
}
// add expire time
if err := conn.Send("EXPIRE", key, jobStatsDataExpireTime); err != nil {
return err
}
_, err = conn.Do("EXEC")
return err
}
func (rjs *RedisJobStatsManager) process(item *queueItem) error {
switch item.Op {
case opSaveStats:
jobStats := item.Data.(models.JobStats)
return rjs.saveJobStats(jobStats)
case opUpdateStatus:
data := item.Data.([]string)
return rjs.updateJobStatus(data[0], data[1])
case opCheckIn:
data := item.Data.([]string)
return rjs.checkIn(data[0], data[1])
case opDieAt:
data := item.Data.([]interface{})
return rjs.dieAt(data[0].(string), data[1].(int64))
case opReportStatus:
data := item.Data.([]string)
return rjs.reportStatus(data[0], data[1], data[2], data[3])
case opPersistExecutions:
data := item.Data.([]interface{})
return rjs.saveExecutions(data[0].(string), data[1].([]string))
case opUpdateStats:
data := item.Data.([]interface{})
return rjs.updateJobStats(data[0].(string), data[1:]...)
default:
break
}
return nil
}
// HookData keeps the hook url info
type HookData struct {
JobID string `json:"job_id"`
HookURL string `json:"hook_url"`
}
func (rjs *RedisJobStatsManager) saveHook(jobID string, hookURL string) error {
conn := rjs.redisPool.Get()
defer conn.Close()
key := utils.KeyJobStats(rjs.namespace, jobID)
args := make([]interface{}, 0, 3)
args = append(args, key, "status_hook", hookURL)
msg := &models.Message{
Event: EventRegisterStatusHook,
Data: &HookData{
JobID: jobID,
HookURL: hookURL,
},
}
rawJSON, err := json.Marshal(msg)
if err != nil {
return err
}
// hook is saved into the job stats
// We'll not set expire time here, the expire time of the key will be set when saving job stats
if err := conn.Send("MULTI"); err != nil {
return err
}
if err := conn.Send("HMSET", args...); err != nil {
return err
}
if err := conn.Send("PUBLISH", utils.KeyPeriodicNotification(rjs.namespace), rawJSON); err != nil {
return err
}
_, err = conn.Do("EXEC")
return err
}
func (rjs *RedisJobStatsManager) getHook(jobID string) (string, error) {
conn := rjs.redisPool.Get()
defer conn.Close()
key := utils.KeyJobStats(rjs.namespace, jobID)
hookURL, err := redis.String(conn.Do("HGET", key, "status_hook"))
if err != nil {
if err == redis.ErrNil {
return "", fmt.Errorf("no registered web hook found for job '%s'", jobID)
}
return "", err
}
return hookURL, nil
}
func backoff(seed uint) int {
if seed < 1 {
seed = 1
}
return int(math.Pow(float64(seed+1), float64(seed))) + rand.Intn(5)
}

Some files were not shown because too many files have changed in this diff Show More